You too can have a GIANT buffer cache.... etc. etc... 

After much bug fighting in the midlayer and now uvm over the last 6
months in a number of places, I think it's about time to shop this
around again. 

This will only make a difference on amd64 - if you have 4 GB or more
of RAM. What it does is allows the high (non-DMA reachable) memory to
be used for buffer cache pages. It will use your set buffer
cache percentage of both dma'able, and above dma'able pages for the
cache, migrating the oldest cache pages into high memory. pages
are flipped back into dma'able memory if they are needed for IO. 

Notwithstanding that it only "matters" on amd64, it does change how
the world works a bit, and therefore requires testing everywhere. It
has survived multiple make build/make release test cycles now on my
machines (amd64,i386,zaurus,sparc,sparc64,hppa) (with various settings
of bufcachepercent) and is running on my NFS server
(bufcachepercent=90) without any complaints throughout that - it's
been running on my laptop for a long time now. 

If you try it, and have troubles (i.e. any new regressions), please
ensure you have your machine's console accessible (check to see if you
have ddb.console=1 in /etc/sysctl.conf) and if you have problems
please try to get


trace
ps
show bcstats
show uvm

from ddb if at all possible. 

Please let me know how you do with it, and most importantly what
you try it on/with. 

-Bob

(diff also in ~beck/viagra.diff6 on cvs)
----
Index: sys/kern/kern_sysctl.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_sysctl.c,v
retrieving revision 1.231
diff -u -p -r1.231 kern_sysctl.c
--- sys/kern/kern_sysctl.c      11 Feb 2013 11:11:42 -0000      1.231
+++ sys/kern/kern_sysctl.c      4 Mar 2013 23:00:45 -0000
@@ -110,6 +110,7 @@ extern struct disklist_head disklist;
 extern fixpt_t ccpu;
 extern  long numvnodes;
 extern u_int mcllivelocks;
+extern psize_t b_dmapages_total, b_highpages_total, b_dmamaxpages;
 
 extern void nmbclust_update(void);
 
@@ -565,8 +566,8 @@ kern_sysctl(int *name, u_int namelen, vo
                return (sysctl_int(oldp, oldlenp, newp, newlen,
                    &rthreads_enabled));
        case KERN_CACHEPCT: {
-               u_int64_t dmapages;
-               int opct, pgs;
+               psize_t pgs;
+               int opct;
                opct = bufcachepercent;
                error = sysctl_int(oldp, oldlenp, newp, newlen,
                    &bufcachepercent);
@@ -576,9 +577,11 @@ kern_sysctl(int *name, u_int namelen, vo
                        bufcachepercent = opct;
                        return (EINVAL);
                }
-               dmapages = uvm_pagecount(&dma_constraint);
                if (bufcachepercent != opct) {
-                       pgs = bufcachepercent * dmapages / 100;
+                       pgs = (b_highpages_total + b_dmapages_total)
+                           * bufcachepercent / 100;
+                       b_dmamaxpages = b_dmapages_total * bufcachepercent
+                           / 100;
                        bufadjust(pgs); /* adjust bufpages */
                        bufhighpages = bufpages; /* set high water mark */
                }
Index: sys/kern/spec_vnops.c
===================================================================
RCS file: /cvs/src/sys/kern/spec_vnops.c,v
retrieving revision 1.69
diff -u -p -r1.69 spec_vnops.c
--- sys/kern/spec_vnops.c       20 Jun 2012 17:30:22 -0000      1.69
+++ sys/kern/spec_vnops.c       10 Feb 2013 19:04:12 -0000
@@ -457,7 +457,9 @@ spec_strategy(void *v)
        struct vop_strategy_args *ap = v;
        struct buf *bp = ap->a_bp;
        int maj = major(bp->b_dev);
-       
+
+       if (!ISSET(bp->b_flags, B_DAQ) && ISSET(bp->b_flags, B_BC))
+               panic("bogus buf %p passed to spec_strategy", bp);
        if (LIST_FIRST(&bp->b_dep) != NULL)
                buf_start(bp);
 
Index: sys/kern/vfs_bio.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_bio.c,v
retrieving revision 1.146
diff -u -p -r1.146 vfs_bio.c
--- sys/kern/vfs_bio.c  17 Feb 2013 17:39:29 -0000      1.146
+++ sys/kern/vfs_bio.c  4 Mar 2013 23:00:45 -0000
@@ -69,6 +69,10 @@
 #define        BQ_CLEAN        1               /* LRU queue with clean buffers 
*/
 
 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
+TAILQ_HEAD(bqda, buf) bufqueue_da;
+struct uvm_constraint_range high_constraint;
+psize_t b_dmapages_total, b_highpages_total, b_dmamaxpages;
+int needda;
 int nobuffers;
 int needbuffer;
 struct bio_ops bioops;
@@ -157,8 +161,12 @@ buf_put(struct buf *bp)
        LIST_REMOVE(bp, b_list);
        bcstats.numbufs--;
 
+       if (ISSET(bp->b_flags, B_DAQ)) {
+               TAILQ_REMOVE(&bufqueue_da, bp, b_qda);
+               CLR(bp->b_flags, B_DAQ);
+       }
        if (buf_dealloc_mem(bp) != 0)
-               return;
+                return;
        pool_put(&bufpool, bp);
 }
 
@@ -168,12 +176,21 @@ buf_put(struct buf *bp)
 void
 bufinit(void)
 {
-       u_int64_t dmapages;
        struct bqueues *dp;
 
-       dmapages = uvm_pagecount(&dma_constraint);
-       /* take away a guess at how much of this the kernel will consume */
-       dmapages -= (atop(physmem) - atop(uvmexp.free));
+       /* How much DMA accessible memory will we consider? */
+       b_dmapages_total = uvm_pagecount(&dma_constraint);
+       /* Take away a guess at how much of this the kernel will consume. */
+       b_dmapages_total -= (atop(physmem) - atop(uvmexp.free));
+
+       /* See if we have memory above the dma accessible region. */
+       high_constraint.ucr_low = dma_constraint.ucr_high;
+       high_constraint.ucr_high = no_constraint.ucr_high;
+       if (high_constraint.ucr_low != high_constraint.ucr_high) {
+               high_constraint.ucr_low++;
+               b_highpages_total = uvm_pagecount(&high_constraint);
+       } else
+               b_highpages_total = 0;
 
        /*
         * If MD code doesn't say otherwise, use up to 10% of DMA'able
@@ -189,18 +206,23 @@ bufinit(void)
        KASSERT(bufcachepercent <= 90);
        KASSERT(bufcachepercent >= 5);
        if (bufpages == 0)
-               bufpages = dmapages * bufcachepercent / 100;
+               bufpages = (b_dmapages_total + b_highpages_total)
+                   * bufcachepercent / 100;
        if (bufpages < BCACHE_MIN)
                bufpages = BCACHE_MIN;
-       KASSERT(bufpages < dmapages);
+       KASSERT(bufpages < b_dmapages_total);
 
+#if 1
+       printf("buffer cache from %d dma pages and %d high pages\n",
+           b_dmapages_total, b_highpages_total);
+#endif
        bufhighpages = bufpages;
-
+       b_dmamaxpages = b_dmapages_total * bufcachepercent / 100;
        /*
         * Set the base backoff level for the buffer cache.  We will
         * not allow uvm to steal back more than this number of pages.
         */
-       buflowpages = dmapages * 5 / 100;
+       buflowpages = b_dmapages_total * 5 / 100;
        if (buflowpages < BCACHE_MIN)
                buflowpages = BCACHE_MIN;
 
@@ -237,6 +259,7 @@ bufinit(void)
 
        for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
                TAILQ_INIT(dp);
+       TAILQ_INIT(&bufqueue_da);
 
        /*
         * hmm - bufkvm is an argument because it's static, while
@@ -321,8 +344,10 @@ bufbackoff(struct uvm_constraint_range *
        /*
         * Back off "size" buffer cache pages. Called by the page
         * daemon to consume buffer cache pages rather than scanning.
+        * Also called buy the buffer cache to back off if memory
+        * allocation in a particular range fails.
         *
-        * It returns 0 to the pagedaemon to indicate that it has
+        * It returns 0 to the caller to indicate that it has
         * succeeded in freeing enough pages. It returns -1 to
         * indicate that it could not and the pagedaemon should take
         * other measures.
@@ -341,8 +366,21 @@ bufbackoff(struct uvm_constraint_range *
        if (bufpages - pdelta < buflowpages)
                pdelta = bufpages - buflowpages;
        oldbufpages = bufpages;
-       bufadjust(bufpages - pdelta);
-       if (oldbufpages - bufpages < size)
+       if (b_highpages_total
+           && (range->ucr_high <= dma_constraint.ucr_high)) {
+               /*
+                * Free memory by moving DMA accessible pages above
+                * DMA accessible memory.
+                */
+               if (bcstats.dmapages - pdelta > b_dmamaxpages)
+                       pdelta += (bcstats.dmapages - b_dmamaxpages);
+               bufhigh(pdelta);
+               return(0);
+       } else {
+               /* Free memory by shrinking the cache. */
+               bufadjust(bufpages - pdelta);
+       }
+        if (oldbufpages - bufpages < size)
                return (-1); /* we did not free what we were asked */
        else
                return(0);
@@ -526,12 +564,18 @@ bread_cluster(struct vnode *vp, daddr64_
        for (i = 1; i < howmany; i++) {
                bcstats.pendingreads++;
                bcstats.numreads++;
-               SET(xbpp[i]->b_flags, B_READ | B_ASYNC);
+               /*
+                * We set B_DMA here because bp above will be B_DMA,
+                * and we are playing buffer slice-n-dice games from
+                * the memory allocated in bp.
+                */
+               SET(xbpp[i]->b_flags, B_DMA | B_READ | B_ASYNC);
                xbpp[i]->b_blkno = sblkno + (i * inc);
                xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
                xbpp[i]->b_data = NULL;
                xbpp[i]->b_pobj = bp->b_pobj;
                xbpp[i]->b_poffs = bp->b_poffs + (i * size);
+               buf_daq_add(xbpp[i]);
        }
 
        KASSERT(bp->b_lblkno == blkno + 1);
@@ -793,6 +837,8 @@ brelse(struct buf *bp)
                                CLR(bp->b_flags, B_WANTED);
                                wakeup(bp);
                        }
+                       if (ISSET(bp->b_flags, B_DMA) && needda)
+                               wakeup(&needda);
                        if (bp->b_vp != NULL)
                                RB_REMOVE(buf_rb_bufs,
                                    &bp->b_vp->v_bufs_tree, bp);
@@ -836,6 +882,10 @@ brelse(struct buf *bp)
                wakeup(&nobuffers);
        }
 
+       if (ISSET(bp->b_flags, B_DMA) && needda) {
+               wakeup(&needda);
+       }
+
        /* Wake up any processes waiting for any buffer to become free. */
        if (needbuffer && bcstats.numbufpages < hipages &&
            bcstats.kvaslots_avail > RESERVE_SLOTS) {
@@ -1029,6 +1079,8 @@ buf_get(struct vnode *vp, daddr64_t blkn
                return (NULL);
        }
 
+       /* Mark buffer as the cache's */
+       SET(bp->b_flags, B_BC);
        bp->b_freelist.tqe_next = NOLIST;
        bp->b_synctime = time_uptime + 300;
        bp->b_dev = NODEV;
@@ -1068,6 +1120,7 @@ buf_get(struct vnode *vp, daddr64_t blkn
        if (size) {
                buf_alloc_pages(bp, round_page(size));
                buf_map(bp);
+               buf_daq_add(bp);
        }
 
        splx(s);
@@ -1238,6 +1291,108 @@ biodone(struct buf *bp)
        }
 }
 
+/*
+ * Add buf to the head of the dma reachable queue and ensure that it
+ * is dma reachable.
+ */
+void
+buf_daq_add(struct buf *buf)
+{
+       struct buf *b;
+       int s;
+
+start:
+       KASSERT(ISSET(buf->b_flags, B_BC));
+       KASSERT(ISSET(buf->b_flags, B_BUSY));
+       KASSERT(buf->b_pobj != NULL);
+       s = splbio();
+       /*
+        * If we are adding to the queue, and we are not the cleaner or
+        * the syncer, ensure we free down below the max
+        */
+       while (b_highpages_total &&
+           curproc != syncerproc && curproc != cleanerproc &&
+           (!ISSET(buf->b_flags, B_DAQ)) && (!ISSET(buf->b_flags, B_DMA)) &&
+           (bcstats.dmapages > (b_dmamaxpages - atop(buf->b_bufsize)))) {
+               b = TAILQ_FIRST(&bufqueue_da);
+               /* find first non-busy buffer */
+               while (b && ISSET(b->b_flags, B_BUSY))
+                       b = TAILQ_NEXT(b, b_qda);
+               if (b == NULL) {
+                       /* no non-busy buffers. */
+                       needda++;
+                       tsleep(&needda, PRIBIO, "needda", 0);
+                       needda--;
+                       splx(s);
+                       goto start;
+               } else {
+                       buf_acquire_nomap(b);
+                       /* move buffer to above dma reachable memory */
+                       TAILQ_REMOVE(&bufqueue_da, b, b_qda);
+                       buf_realloc_pages(b, &high_constraint);
+                       if (ISSET(b->b_flags, B_DMA))
+                               panic("B_DMA after high flip %p", b);
+                       CLR(b->b_flags, B_DAQ);
+                       buf_release(b);
+                       splx(s);
+                       goto start;
+               }
+       }
+       /* Don't copy it if it's already in dma reachable memory. */
+       if (ISSET(buf->b_flags, B_DMA)) {
+               if (ISSET(buf->b_flags, B_DAQ))
+                       TAILQ_REMOVE(&bufqueue_da, buf, b_qda);
+               TAILQ_INSERT_TAIL(&bufqueue_da, buf, b_qda);
+               SET(buf->b_flags, B_DAQ);
+       } else {
+               if (ISSET(buf->b_flags, B_DAQ))
+                       panic("non-dma buf %p on dma queue\n", buf);
+               /* move buf to dma reachable memory */
+               buf_realloc_pages(buf, &dma_constraint);
+               if (!ISSET(buf->b_flags, B_DMA))
+                       panic("non-dma buffer after dma move %p\n", buf);
+               TAILQ_INSERT_TAIL(&bufqueue_da, buf, b_qda);
+               SET(buf->b_flags, B_DAQ);
+       }
+       splx(s);
+       return;
+
+}
+
+/*
+ * Flip some dma reachable cache pages high
+ */
+void
+bufhigh(int delta)
+{
+       psize_t newdmapages;
+       struct buf *b;
+       int s;
+       if (!b_highpages_total)
+               return;
+               s = splbio();
+       newdmapages = bcstats.dmapages - delta;
+       while ((bcstats.dmapages > newdmapages) &&
+           (b = TAILQ_FIRST(&bufqueue_da))) {
+               while (ISSET(b->b_flags, B_BUSY))
+                       b = TAILQ_NEXT(b, b_qda);
+               if (b != NULL) {
+                       buf_acquire_nomap(b);
+                       /* move buffer to above dma reachable memory */
+                       buf_realloc_pages(b, &high_constraint);
+                       if (ISSET(b->b_flags, B_DMA))
+                               panic("DMA flagged buffer after high flip %p",
+                                   b);
+                       TAILQ_REMOVE(&bufqueue_da, b, b_qda);
+                       CLR(b->b_flags, B_DAQ);
+                       buf_release(b);
+               }
+       }
+       wakeup(&needda);
+       splx(s);
+}
+
+
 #ifdef DDB
 void   bcstats_print(int (*)(const char *, ...) /* 
__attribute__((__format__(__kprintf__,1,2))) */);
 /*
@@ -1252,8 +1407,8 @@ bcstats_print(
            bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs);
        (*pr)("kvaslots %lld avail kva slots %lld\n",
            bcstats.kvaslots, bcstats.kvaslots_avail);
-       (*pr)("bufpages %lld, dirtypages %lld\n",
-           bcstats.numbufpages,  bcstats.numdirtypages);
+       (*pr)("total bufpages %lld, dmapages %lld, dirtypages %lld\n",
+           bcstats.numbufpages, bcstats.dmapages, bcstats.numdirtypages);
        (*pr)("pendingreads %lld, pendingwrites %lld\n",
            bcstats.pendingreads, bcstats.pendingwrites);
 }
Index: sys/kern/vfs_biomem.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_biomem.c,v
retrieving revision 1.23
diff -u -p -r1.23 vfs_biomem.c
--- sys/kern/vfs_biomem.c       18 Jan 2013 10:07:37 -0000      1.23
+++ sys/kern/vfs_biomem.c       10 Feb 2013 19:04:12 -0000
@@ -267,6 +267,7 @@ void
 buf_alloc_pages(struct buf *bp, vsize_t size)
 {
        voff_t offs;
+       int i;
 
        KASSERT(size == round_page(size));
        KASSERT(bp->b_pobj == NULL);
@@ -278,8 +279,18 @@ buf_alloc_pages(struct buf *bp, vsize_t 
 
        KASSERT(buf_page_offset > 0);
 
-       uvm_pagealloc_multi(buf_object, offs, size, UVM_PLA_WAITOK);
+       do {
+               i = uvm_pagealloc_multi(buf_object, offs, size,
+                   UVM_PLA_NOWAIT);
+               if (i == 0)
+                       break;
+       } while (bufbackoff(&dma_constraint, 100) == 0);
+       if (i != 0)
+               i = uvm_pagealloc_multi(buf_object, offs, size,
+                   UVM_PLA_WAITOK);
        bcstats.numbufpages += atop(size);
+       bcstats.dmapages += atop(size);
+       SET(bp->b_flags, B_DMA);
        bp->b_pobj = buf_object;
        bp->b_poffs = offs;
        bp->b_bufsize = size;
@@ -294,6 +305,7 @@ buf_free_pages(struct buf *bp)
 
        KASSERT(bp->b_data == NULL);
        KASSERT(uobj != NULL);
+       KASSERT(!ISSET(bp->b_flags, B_DAQ));
        splassert(IPL_BIO);
 
        off = bp->b_poffs;
@@ -307,10 +319,65 @@ buf_free_pages(struct buf *bp)
                pg->wire_count = 0;
                uvm_pagefree(pg);
                bcstats.numbufpages--;
+               if (ISSET(bp->b_flags, B_DMA))
+                       bcstats.dmapages--;
        }
+       CLR(bp->b_flags, B_DMA);
 }
 
-/*
- * XXX - it might make sense to make a buf_realloc_pages to avoid
- *       bouncing through the free list all the time.
- */
+/* Reallocate a buf into a particular pmem range specified by "where". */
+void
+buf_realloc_pages(struct buf *bp, struct uvm_constraint_range *where)
+{
+       vaddr_t va;
+       int dma;
+       int i;
+
+       splassert(IPL_BIO);
+       KASSERT(ISSET(bp->b_flags, B_BUSY));
+       dma = ISSET(bp->b_flags, B_DMA);
+
+       /* if the original buf is mapped, unmap it */
+       if (bp->b_data != NULL) {
+               va = (vaddr_t)bp->b_data;
+               pmap_kremove(va, bp->b_bufsize);
+               pmap_update(pmap_kernel());
+       }
+
+       i = 0;
+       do {
+               i = uvm_pagerealloc_multi(bp->b_pobj, bp->b_poffs,
+                   bp->b_bufsize, UVM_PLA_NOWAIT, where);
+               if (i == 0)
+                       break;
+       } while (bufbackoff(where, 100) == 0);
+       if (i != 0)
+               (void) uvm_pagerealloc_multi(bp->b_pobj, bp->b_poffs,
+                   bp->b_bufsize, UVM_PLA_WAITOK, where);
+
+       /*
+        * do this now, and put it back later when we know where we are
+        */
+       if (dma)
+               bcstats.dmapages -= atop(bp->b_bufsize);
+
+       dma = 1;
+       /* if the original buf was mapped, re-map it */
+       for (i = 0; i < atop(bp->b_bufsize); i++) {
+               struct vm_page *pg = uvm_pagelookup(bp->b_pobj,
+                   bp->b_poffs + ptoa(i));
+               KASSERT(pg != NULL);
+               if  (!PADDR_IS_DMA_REACHABLE(VM_PAGE_TO_PHYS(pg)))
+                       dma = 0;
+               if (bp->b_data != NULL) {
+                       pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg),
+                           VM_PROT_READ|VM_PROT_WRITE);
+                       pmap_update(pmap_kernel());
+               }
+       }
+       if (dma) {
+               SET(bp->b_flags, B_DMA);
+               bcstats.dmapages += atop(bp->b_bufsize);
+       } else
+               CLR(bp->b_flags, B_DMA);
+}
Index: sys/kern/vfs_vops.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_vops.c,v
retrieving revision 1.4
diff -u -p -r1.4 vfs_vops.c
--- sys/kern/vfs_vops.c 2 Jul 2011 15:52:25 -0000       1.4
+++ sys/kern/vfs_vops.c 10 Feb 2013 19:04:12 -0000
@@ -613,6 +613,11 @@ VOP_STRATEGY(struct buf *bp)
 
        if (bp->b_vp->v_op->vop_strategy == NULL)
                return (EOPNOTSUPP);
+       /*
+        * Flip buffer to dma reachable memory if necessary.
+        */
+       if (ISSET(bp->b_flags, B_BC))
+               buf_daq_add(bp);
 
        return ((bp->b_vp->v_op->vop_strategy)(&a));
 }
Index: sys/sys/buf.h
===================================================================
RCS file: /cvs/src/sys/sys/buf.h,v
retrieving revision 1.83
diff -u -p -r1.83 buf.h
--- sys/sys/buf.h       18 Jan 2013 08:52:04 -0000      1.83
+++ sys/sys/buf.h       10 Feb 2013 19:04:12 -0000
@@ -165,6 +165,7 @@ struct buf {
        LIST_ENTRY(buf) b_list;         /* All allocated buffers. */
        LIST_ENTRY(buf) b_vnbufs;       /* Buffer's associated vnode. */
        TAILQ_ENTRY(buf) b_freelist;    /* Free list position if not active. */
+       TAILQ_ENTRY(buf) b_qda;         /* dma reachable queue position */
        time_t  b_synctime;             /* Time this buffer should be flushed */
        struct  proc *b_proc;           /* Associated proc; NULL if kernel. */
        volatile long   b_flags;        /* B_* flags. */
@@ -234,12 +235,15 @@ struct buf {
 #define        B_SCANNED       0x00100000      /* Block already pushed during 
sync */
 #define        B_PDAEMON       0x00200000      /* I/O started by pagedaemon */
 #define        B_RELEASED      0x00400000      /* free this buffer after its 
kvm */
+#define B_BC           0x00800000      /* Managed by the Buffer Cache. */
+#define B_DMA          0x01000000      /* DMA reachable. */
+#define B_DAQ          0x02000000      /* Buf is on the DMA reachable Queue */
 
 #define        B_BITS  "\20\001AGE\002NEEDCOMMIT\003ASYNC\004BAD\005BUSY" \
     "\006CACHE\007CALL\010DELWRI\011DONE\012EINTR\013ERROR" \
     "\014INVAL\015NOCACHE\016PHYS\017RAW\020READ" \
     "\021WANTED\022WRITEINPROG\023XXX(FORMAT)\024DEFERRED" \
-    "\025SCANNED\026DAEMON\027RELEASED"
+    "\025SCANNED\026DAEMON\027RELEASED\030BC\031DMA\32DAQ"
 
 /*
  * This structure describes a clustered I/O.  It is stored in the b_saveaddr
@@ -305,6 +309,7 @@ void        bremfree(struct buf *);
 void   bufinit(void);
 void   buf_dirty(struct buf *);
 void    buf_undirty(struct buf *);
+void   buf_daq_add (struct buf *);
 int    bwrite(struct buf *);
 struct buf *getblk(struct vnode *, daddr64_t, int, int, int);
 struct buf *geteblk(int);
@@ -328,7 +333,8 @@ int buf_dealloc_mem(struct buf *);
 void   buf_fix_mapping(struct buf *, vsize_t);
 void   buf_alloc_pages(struct buf *, vsize_t);
 void   buf_free_pages(struct buf *);
-
+struct uvm_constraint_range;
+void   buf_realloc_pages(struct buf *, struct uvm_constraint_range *);
 
 void   minphys(struct buf *bp);
 int    physio(void (*strategy)(struct buf *), dev_t dev, int flags,
Index: sys/sys/mount.h
===================================================================
RCS file: /cvs/src/sys/sys/mount.h,v
retrieving revision 1.108
diff -u -p -r1.108 mount.h
--- sys/sys/mount.h     5 Sep 2012 17:01:06 -0000       1.108
+++ sys/sys/mount.h     10 Feb 2013 19:04:12 -0000
@@ -483,6 +483,7 @@ extern long buflowpages, bufhighpages, b
 #define BUFPAGES_INACT (((bcstats.numcleanpages - buflowpages) < 0) ? 0 \
     : bcstats.numcleanpages - buflowpages)
 extern int bufcachepercent;
+extern void bufhigh(int);
 extern void bufadjust(int);
 struct uvm_constraint_range;
 extern int bufbackoff(struct uvm_constraint_range*, long);
Index: sys/uvm/uvm_extern.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.104
diff -u -p -r1.104 uvm_extern.h
--- sys/uvm/uvm_extern.h        9 Mar 2012 13:01:29 -0000       1.104
+++ sys/uvm/uvm_extern.h        10 Feb 2013 19:04:12 -0000
@@ -681,11 +681,11 @@ struct vm_page            *uvm_pagealloc(struct uv
                                voff_t, struct vm_anon *, int);
 vaddr_t                        uvm_pagealloc_contig(vaddr_t, vaddr_t,
                                vaddr_t, vaddr_t);
-void                   uvm_pagealloc_multi(struct uvm_object *, voff_t,
+int                    uvm_pagealloc_multi(struct uvm_object *, voff_t,
                            vsize_t, int);
 void                   uvm_pagerealloc(struct vm_page *, 
                                             struct uvm_object *, voff_t);
-void                   uvm_pagerealloc_multi(struct uvm_object *, voff_t,
+int                    uvm_pagerealloc_multi(struct uvm_object *, voff_t,
                            vsize_t, int, struct uvm_constraint_range *);
 /* Actually, uvm_page_physload takes PF#s which need their own type */
 void                   uvm_page_physload(paddr_t, paddr_t, paddr_t,
@@ -702,6 +702,7 @@ void                        uvm_aio_aiodone(struct buf *);
 void                   uvm_pageout(void *);
 void                   uvm_aiodone_daemon(void *);
 void                   uvm_wait(const char *);
+int                    uvm_advise(void);
 
 /* uvm_pglist.c */
 int                    uvm_pglistalloc(psize_t, paddr_t,
Index: sys/uvm/uvm_page.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_page.c,v
retrieving revision 1.117
diff -u -p -r1.117 uvm_page.c
--- sys/uvm/uvm_page.c  3 Mar 2013 22:37:58 -0000       1.117
+++ sys/uvm/uvm_page.c  4 Mar 2013 23:00:46 -0000
@@ -878,19 +878,21 @@ uvm_pglistfree(struct pglist *list)
  * interface used by the buffer cache to allocate a buffer at a time.
  * The pages are allocated wired in DMA accessible memory
  */
-void
+int
 uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
     int flags)
 {
        struct pglist    plist;
        struct vm_page  *pg;
-       int              i;
+       int              i, r;
 
 
        TAILQ_INIT(&plist);
-       (void) uvm_pglistalloc(size, dma_constraint.ucr_low,
+       r = uvm_pglistalloc(size, dma_constraint.ucr_low,
            dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
-           UVM_PLA_WAITOK);
+           flags);
+       if (r != 0)
+               return(r);
        i = 0;
        while ((pg = TAILQ_FIRST(&plist)) != NULL) {
                pg->wire_count = 1;
@@ -899,6 +901,7 @@ uvm_pagealloc_multi(struct uvm_object *o
                TAILQ_REMOVE(&plist, pg, pageq);
                uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
        }
+       return(0);
 }
 
 /*
@@ -906,21 +909,23 @@ uvm_pagealloc_multi(struct uvm_object *o
  * The pages are reallocated wired outside the DMA accessible region.
  *
  */
-void
+int
 uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
     int flags, struct uvm_constraint_range *where)
 {
        struct pglist    plist;
        struct vm_page  *pg, *tpg;
-       int              i;
+       int              i,r;
        voff_t          offset;
 
 
        TAILQ_INIT(&plist);
        if (size == 0)
                panic("size 0 uvm_pagerealloc");
-       (void) uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
-           0, &plist, atop(round_page(size)), UVM_PLA_WAITOK);
+       r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
+           0, &plist, atop(round_page(size)), flags);
+       if (r != 0)
+               return(r);
        i = 0;
        while((pg = TAILQ_FIRST(&plist)) != NULL) {
                offset = off + ptoa(i++);
@@ -933,6 +938,7 @@ uvm_pagerealloc_multi(struct uvm_object 
                uvm_pagefree(tpg);
                uvm_pagealloc_pg(pg, obj, offset, NULL);
        }
+       return(0);
 }
 
 /*
Index: usr.bin/systat//iostat.c
===================================================================
RCS file: /cvs/src/usr.bin/systat/iostat.c,v
retrieving revision 1.40
diff -u -p -r1.40 iostat.c
--- usr.bin/systat//iostat.c    19 Sep 2011 14:48:04 -0000      1.40
+++ usr.bin/systat//iostat.c    10 Feb 2013 19:04:13 -0000
@@ -222,6 +222,10 @@ showbcache(void)
        print_fld_ssize(FLD_IO_SVAL, bccur.numbufpages);
        end_line();
 
+       print_fld_str(FLD_IO_SSTR, "dma pages");
+       print_fld_ssize(FLD_IO_SVAL, bccur.dmapages);
+       end_line();
+
        print_fld_str(FLD_IO_SSTR, "dirty pages");
        print_fld_ssize(FLD_IO_SVAL, bccur.numdirtypages);
        end_line();

Reply via email to