There were two problems with vslock_device functions that are
used for magic page flipping for physio and bigmem.

 - Fix error handling so that we free stuff on error.
 - We use the mappings to keep track of which pages need to be
   freed so don't unmap before freeing (this is theoretically
   incorrect and will be fixed soon).

This makes fsck happy on bigmem machines (it doesn't leak all
dma:able memory anymore).

Index: uvm/uvm_glue.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_glue.c,v
retrieving revision 1.55
diff -u -r1.55 uvm_glue.c
--- uvm/uvm_glue.c      2 Jul 2010 22:38:32 -0000       1.55
+++ uvm/uvm_glue.c      1 Apr 2011 15:08:40 -0000
@@ -222,8 +222,10 @@
                paddr_t pa;
 
                if (!pmap_extract(p->p_vmspace->vm_map.pmap,
-                   start + ptoa(i), &pa))
-                       return (EFAULT);
+                   start + ptoa(i), &pa)) {
+                       error = EFAULT;
+                       goto out_unwire;
+               }
                if (!PADDR_IS_DMA_REACHABLE(pa))
                        break;
        }
@@ -233,13 +235,15 @@
        }
 
        if ((va = uvm_km_valloc(kernel_map, sz)) == 0) {
-               return (ENOMEM);
+               error = ENOMEM;
+               goto out_unwire;
        }
 
        TAILQ_INIT(&pgl);
        error = uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low,
            dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_WAITOK);
-       KASSERT(error == 0);
+       if (error)
+               goto out_unmap;
 
        sva = va;
        while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
@@ -252,7 +256,16 @@
        KASSERT(va == sva + sz);
        *retp = (void *)(sva + off);
 
-       error = copyin(addr, *retp, len);       
+       if ((error = copyin(addr, *retp, len)) == 0)
+               return 0;
+
+       uvm_km_pgremove_intrsafe(sva, sva + sz);
+       pmap_kremove(sva, sz);
+       pmap_update(pmap_kernel());
+out_unmap:
+       uvm_km_free(kernel_map, sva, sz);
+out_unwire:
+       uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
        return (error);
 }
 
@@ -277,9 +290,9 @@
                return;
 
        kva = trunc_page((vaddr_t)map);
+       uvm_km_pgremove_intrsafe(kva, kva + sz);
        pmap_kremove(kva, sz);
        pmap_update(pmap_kernel());
-       uvm_km_pgremove_intrsafe(kva, kva + sz);
        uvm_km_free(kernel_map, kva, sz);
 }

Reply via email to