Index: sys/mbuf.h =================================================================== RCS file: /dump/FreeBSD-CVS/src/sys/sys/mbuf.h,v retrieving revision 1.71 diff -u -r1.71 mbuf.h --- sys/mbuf.h 2001/02/17 05:35:56 1.71 +++ sys/mbuf.h 2001/03/08 22:18:46 @@ -37,6 +37,9 @@ #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ +#define MBUF_VMGUARD +#define MCLUSTER_VMGUARD + #include /* XXX */ /* @@ -277,6 +280,26 @@ wakeup_one(&(m_wid)); \ } while (0) +#ifdef MCLUSTER_VMGUARD +#define MEXT_PREADDREF_HOOK(m) do { \ + mtx_lock(&mclfree.m_mtx); \ + if (m->m_ext.ref_cnt->refcnt == 1 && \ + m->m_ext.ext_type == EXT_CLUSTER) \ + mcl_guard(m); \ + mtx_unlock(&mclfree.m_mtx); \ +} while (0) +#define MEXT_POSTREMREF_HOOK(m) do { \ + mtx_lock(&mclfree.m_mtx); \ + if (m->m_ext.ref_cnt->refcnt == 1 && \ + m->m_ext.ext_type == EXT_CLUSTER) \ + mcl_unguard(m); \ + mtx_unlock(&mclfree.m_mtx); \ +} while (0) +#else +#define MEXT_PREADDREF_HOOK(m) +#define MEXT_POSTREMREF_HOOK(m) +#endif + /* * mbuf external reference count management macros: * @@ -293,9 +316,13 @@ #define MEXT_REM_REF(m) do { \ KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0")); \ atomic_subtract_int(&((m)->m_ext.ref_cnt->refcnt), 1); \ + MEXT_POSTREMREF_HOOK(m); \ } while(0) -#define MEXT_ADD_REF(m) atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1) +#define MEXT_ADD_REF(m) do { \ + MEXT_PREADDREF_HOOK(m); \ + atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1); \ +} while(0) #define _MEXT_ALLOC_CNT(m_cnt, how) do { \ union mext_refcnt *__mcnt; \ @@ -678,6 +705,10 @@ struct mbuf *m_pulldown(struct mbuf *, int, int, int *); struct mbuf *m_pullup(struct mbuf *, int); struct mbuf *m_split(struct mbuf *,int,int); +#ifdef MCLUSTER_VMGUARD +void mcl_guard(struct mbuf *m); +void mcl_unguard(struct mbuf *m); +#endif #endif /* _KERNEL */ #endif /* !_SYS_MBUF_H_ */ Index: kern/uipc_mbuf.c =================================================================== RCS file: /dump/FreeBSD-CVS/src/sys/kern/uipc_mbuf.c,v retrieving revision 1.69 diff -u -r1.69 uipc_mbuf.c --- kern/uipc_mbuf.c 2001/02/21 09:24:13 1.69 +++ kern/uipc_mbuf.c 2001/03/08 22:42:10 @@ -47,6 +47,11 @@ #include #include #include +#if defined(MBUF_VMGUARD) || defined(MCLUSTER_VMGUARD) +#include +#include +#include +#endif static void mbinit(void *); SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) @@ -253,7 +258,11 @@ return (0); } +#ifdef MBUF_VMGUARD + nbytes = nmb * PAGE_SIZE * 2; +#else nbytes = round_page(nmb * MSIZE); +#endif mtx_unlock(&mmbfree.m_mtx); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); @@ -270,7 +279,9 @@ if (p == NULL) return (0); +#ifndef MBUF_VMGUARD nmb = nbytes / MSIZE; +#endif /* * We don't let go of the mutex in order to avoid a race. @@ -278,9 +289,22 @@ * with grabbing the mbuf from the free list. */ for (i = 0; i < nmb; i++) { +#ifdef MBUF_VMGUARD + struct mbuf *m; + + m = (struct mbuf *)(p + PAGE_SIZE - MSIZE); + m->m_next = mmbfree.m_head; + mmbfree.m_head = m; + mtx_unlock(&mmbfree.m_mtx); + vm_map_protect(mb_map, (vm_offset_t)p + PAGE_SIZE, + (vm_offset_t)p + 2 * PAGE_SIZE, VM_PROT_NONE, 0); + mtx_lock(&mmbfree.m_mtx); + p += 2 * PAGE_SIZE; +#else ((struct mbuf *)p)->m_next = mmbfree.m_head; mmbfree.m_head = (struct mbuf *)p; p += MSIZE; +#endif } mbstat.m_mbufs += nmb; mbtypes[MT_FREE] += nmb; @@ -373,11 +397,17 @@ return (0); } +#ifdef MCLUSTER_VMGUARD + npg = ncl * 2; +#else npg = ncl; +#endif mtx_unlock(&mclfree.m_mtx); p = (caddr_t)kmem_malloc(mb_map, ctob(npg), how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); +#ifndef MCLUSTER_VMGUARD ncl = ncl * PAGE_SIZE / MCLBYTES; +#endif mtx_lock(&mclfree.m_mtx); /* @@ -393,9 +423,22 @@ * We don't let go of the mutex in order to avoid a race. */ for (i = 0; i < ncl; i++) { +#ifdef MCLUSTER_VMGUARD + union mcluster *q; + + q = (union mcluster *)(p + PAGE_SIZE - MCLBYTES); + q->mcl_next = mclfree.m_head; + mclfree.m_head = q; + mtx_unlock(&mclfree.m_mtx); + vm_map_protect(mb_map, (vm_offset_t)p + PAGE_SIZE, + (vm_offset_t)p + 2 * PAGE_SIZE, VM_PROT_NONE, 0); + mtx_lock(&mclfree.m_mtx); + p += 2 * PAGE_SIZE; +#else ((union mcluster *)p)->mcl_next = mclfree.m_head; mclfree.m_head = (union mcluster *)p; p += MCLBYTES; +#endif mbstat.m_clfree++; } mbstat.m_clusters += ncl; @@ -434,6 +477,29 @@ return (p); } + +#ifdef MCLUSTER_VMGUARD +void +mcl_guard(m) + struct mbuf *m; +{ + vm_offset_t p = (vm_offset_t)m->m_ext.ext_buf + MCLBYTES - PAGE_SIZE; + mtx_unlock(&mclfree.m_mtx); + vm_map_protect(mb_map, p, p + PAGE_SIZE, VM_PROT_READ, 0); + mtx_lock(&mclfree.m_mtx); +} + +void +mcl_unguard(m) + struct mbuf *m; +{ + vm_offset_t p = (vm_offset_t)m->m_ext.ext_buf + MCLBYTES - PAGE_SIZE; + mtx_unlock(&mclfree.m_mtx); + vm_map_protect(mb_map, p, p + PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, + 0); + mtx_lock(&mclfree.m_mtx); +} +#endif /* * m_reclaim: drain protocols in hopes to free up some resources...