I have the modified the allocation and deallocation scheme in the ext2fs pager code to be a bit smarter. Now, when we need to allocate a single page, we preallocate several more at the same time. This code also reduces the lock contention by waiting until as late as possible to take a lock.
I also included two small side fixes: two zero initializers were removed and a comment was fixed. ext2fs/ChangeLog: 2001-11-17 Neal H Walfield <[EMAIL PROTECTED]> * pager.c (MIN_FREE_PAGE_BUFS): New macro. Minimum number of page buffers to allocate at a time. (get_page_bufs): free_page_bufs_lock does not need to held to see if there are any free pages. Only if we think that there are free pages do we need the lock to: first, confirm the suspicion and then allocate a page. In the case where there are no free pages (or we suspect that there are none), allocate MIN_FREE_PAGE_BUFS and connect all of the unused pages to the free_page_bufs list. (free_page_buf): Only lock free_page_bufs_lock if a page is to be connected to the free_page_bufs list. (free_page_bufs): No need to zero initialize a global static. (num_free_page_bufs): Likewise. (find_block): Correct comment. Index: pager.c =================================================================== RCS file: /cvsroot/hurd/hurd/ext2fs/pager.c,v retrieving revision 1.68 diff -u -p -r1.68 pager.c --- pager.c 2001/01/07 17:03:55 1.68 +++ pager.c 2001/11/18 15:41:02 @@ -70,10 +70,11 @@ do { spin_lock (&ext2s_pager_stats.lock) #endif /* STATS */ #define MAX_FREE_PAGE_BUFS 32 +#define MIN_FREE_PAGE_BUFS 4 static spin_lock_t free_page_bufs_lock = SPIN_LOCK_INITIALIZER; -static void *free_page_bufs = 0; -static int num_free_page_bufs = 0; +static void *free_page_bufs; +static int num_free_page_bufs; /* Returns a single page page-aligned buffer. */ static void * @@ -81,20 +82,41 @@ get_page_buf () { void *buf; - spin_lock (&free_page_bufs_lock); - buf = free_page_bufs; - if (buf == 0) + if (buf) { - spin_unlock (&free_page_bufs_lock); - buf = mmap (0, vm_page_size, PROT_READ|PROT_WRITE, MAP_ANON, 0, 0); - if (buf == (void *) -1) - buf = 0; + spin_lock (&free_page_bufs_lock); + buf = free_page_bufs; + if (buf) + { + assert (num_free_page_bufs > 0); + free_page_bufs = *(void **)buf; + num_free_page_bufs--; + spin_unlock (&free_page_bufs_lock); + return buf; + } + else + spin_unlock (&free_page_bufs_lock); } + + buf = mmap (0, vm_page_size * MIN_FREE_PAGE_BUFS, + PROT_READ|PROT_WRITE, MAP_ANON, 0, 0); + if (buf == MAP_FAILED) + return 0; else { - free_page_bufs = *(void **)buf; - num_free_page_bufs--; + int i; + void *p; + + for (i = 1, p = buf + vm_page_size; + i < MIN_FREE_PAGE_BUFS - 1; + i++, p += vm_page_size) + *(void **)p = p + vm_page_size; + + spin_lock (&free_page_bufs_lock); + *(void **)p = free_page_bufs; + free_page_bufs = buf + vm_page_size; + num_free_page_bufs += MIN_FREE_PAGE_BUFS - 1; spin_unlock (&free_page_bufs_lock); } @@ -105,26 +127,25 @@ get_page_buf () static void free_page_buf (void *buf) { - spin_lock (&free_page_bufs_lock); if (num_free_page_bufs < MAX_FREE_PAGE_BUFS) + /* This test is not locked, however, if we have an extra page in + the free list, it makes no real difference. */ { + spin_lock (&free_page_bufs_lock); *(void **)buf = free_page_bufs; free_page_bufs = buf; num_free_page_bufs++; spin_unlock (&free_page_bufs_lock); } else - { - spin_unlock (&free_page_bufs_lock); - munmap (buf, vm_page_size); - } + munmap (buf, vm_page_size); } /* Find the location on disk of page OFFSET in NODE. Return the disk block - in BLOCK (if unallocated, then return 0). If *LOCK is 0, then it a reader + in BLOCK (if unallocated, then return 0). If *LOCK is 0, then a reader lock is aquired on NODE's ALLOC_LOCK before doing anything, and left - locked after return -- even if an error is returned. 0 on success or an - error code otherwise is returned. */ + locked after the return -- even if an error is returned. 0 is returned + on success otherwise an error code. */ static error_t find_block (struct node *node, vm_offset_t offset, block_t *block, struct rwlock **lock) _______________________________________________ Bug-hurd mailing list [EMAIL PROTECTED] http://mail.gnu.org/mailman/listinfo/bug-hurd