The current "init_alloc_aligned" function will skip regions of memory
during an allocation if they aren't of "size" bytes of contigous
physical memory , and will never reclaim them later. ( it can't do
that with a simple "available_next" )

In addition to the fact that it is unable to reserve any low memory
(<MAX_DMA_ADDRESS) during the allocation phase because it didn't have
a "goal" ( "available_next" alone can't implement a goal) , in the
patch "init_alloc_aligned" will behave exactly like the old one , in
addition to "_init_alloc_aligned" for DMA allocation (if you don't
want to have vm_page to back it) , and this function will reclaim all
pages that was skipped during contigous memory allocation.

[Note: The current bitmap is saved starting at address 0x4000 , which
i think is a save place during initialization]

("The patch is attached")
diff --git a/i386/i386/bitops.h b/i386/i386/bitops.h
new file mode 100644
index 0000000..877b3ad
--- /dev/null
+++ b/i386/i386/bitops.h
@@ -0,0 +1,50 @@
+#ifndef _MACH_I386_BITOPS_H_
+#define _MACH_I386_BITOPS_H_
+
+#include <mach/vm_param.h>
+
+void set_bit(unsigned long nr, volatile vm_offset_t addr)
+{
+	test_and_set_bit(nr,addr);
+}
+
+void clear_bit(unsigned long nr, volatile vm_offset_t addr)
+{
+	test_and_clear_bit(nr,addr);
+}
+
+int test_and_set_bit(unsigned long nr, volatile vm_offset_t addr)
+{
+	
+        int oldbit;
+
+	__asm__ __volatile__( 
+	      "btsl %2,%1\n\tsbbl %0,%0"
+	      :"=r" (oldbit),"=m" (*(void volatile *)addr)
+	      :"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+
+int test_and_clear_bit(unsigned long nr, volatile vm_offset_t addr)
+{
+
+	int oldbit;
+	__asm__ __volatile__(
+		"btrl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (*(void volatile *)addr)
+		:"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+
+int test_bit(int nr, unsigned long * addr)
+{
+	int     mask;
+	addr += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	return ((mask & (*addr)) != 0);
+}
+
+#endif
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index ca00078..6afb213 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -32,10 +32,9 @@
  *	Basic initialization for I386 - ISA bus machines.
  */
 
+#include <i386/bitops.h>
 #include <string.h>
-
 #include <device/cons.h>
-
 #include <mach/vm_param.h>
 #include <mach/vm_prot.h>
 #include <mach/machine.h>
@@ -74,6 +73,7 @@
 #include <xen/xen.h>
 #endif	/* MACH_XEN */
 
+
 /* Location of the kernel's symbol table.
    Both of these are 0 if none is available.  */
 #if MACH_KDB
@@ -104,8 +104,11 @@ unsigned long la_shift = VM_MIN_KERNEL_ADDRESS;
 struct multiboot_info boot_info;
 #endif	/* MACH_XEN */
 
+/* To make sure that the boot memory allocator is initialized */
+int boot_allocator_initialized= 0;
 /* Command line supplied to kernel.  */
 char *kernel_cmdline = "";
+int kernel_cmdline_len = 0;
 
 /* This is used for memory initialization:
    it gets bumped up through physical memory
@@ -334,43 +337,54 @@ i386at_init(void)
 	 */
 	mem_size_init();
 
+	init_boot_allocator();
+
 #ifdef MACH_XEN
 	kernel_cmdline = (char*) boot_info.cmd_line;
 #else	/* MACH_XEN */
 	/* Copy content pointed by boot_info before losing access to it when it
 	 * is too far in physical memory.  */
+
 	if (boot_info.flags & MULTIBOOT_CMDLINE) {
 		vm_offset_t addr;
-		int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
-		assert(init_alloc_aligned(round_page(len), &addr));
+		assert(init_alloc_aligned(kernel_cmdline_len, &addr));
 		kernel_cmdline = (char*) phystokv(addr);
-		memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), len);
+		memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), kernel_cmdline_len);
+		bootmap_free(atop(boot_info.cmdline - phys_first_addr), atop(round_page(kernel_cmdline_len)));
 		boot_info.cmdline = addr;
 	}
 
+
+
 	if (boot_info.flags & MULTIBOOT_MODS) {
 		struct multiboot_module *m;
 		vm_offset_t addr;
-		int i;
-
-		assert(init_alloc_aligned(round_page(boot_info.mods_count * sizeof(*m)), &addr));
-		m = (void*) phystokv(addr);
-		memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
+		int i,size;
+		
+		size = boot_info.mods_count * sizeof(*m);
+		assert(init_alloc_aligned(size, &addr));
+		m = (struct multiboot_module *) phystokv(addr);
+		memcpy(m, (void*) phystokv(boot_info.mods_addr), size);
+		bootmap_free(atop(round_page(boot_info.mods_addr)), atop(round_page(size) ));
 		boot_info.mods_addr = addr;
 
+
 		for (i = 0; i < boot_info.mods_count; i++) {
-			vm_size_t size = m[i].mod_end - m[i].mod_start;
-			assert(init_alloc_aligned(round_page(size), &addr));
+			vm_size_t size = m[i].mod_end - m[i].mod_start + 1;
+			assert(init_alloc_aligned(size, &addr));
 			memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
+			bootmap_free(atop(m[i].mod_start),atop(round_page(size)));
 			m[i].mod_start = addr;
-			m[i].mod_end = addr + size;
-
+			m[i].mod_end = addr + size - 1;
 			size = strlen((char*) phystokv(m[i].string)) + 1;
-			assert(init_alloc_aligned(round_page(size), &addr));
+			assert(init_alloc_aligned(size, &addr));
 			memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
+			bootmap_free(atop(m[i].string-phys_first_addr),atop(round_page(size)));
 			m[i].string = addr;
 		}
 	}
+
+
 #endif	/* MACH_XEN */
 
 	/*
@@ -379,8 +393,11 @@ i386at_init(void)
 	 *	Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
 	 *	XXX make the BIOS page (page 0) read-only.
 	 */
+
 	pmap_bootstrap();
 
+
+
 	/*
 	 * Turn paging on.
 	 * We'll have to temporarily install a direct mapping
@@ -395,7 +412,7 @@ i386at_init(void)
 #if PAE
 	/* PAE page tables are 2MB only */
 	kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + 1] =
-		kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + 1];
+	kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + 1];
 	kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + 2] =
 		kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + 2];
 #endif	/* PAE */
@@ -486,6 +503,7 @@ i386at_init(void)
 	   as the interrupt stack for now.  Later this will have to change,
 	   because the init stack will get freed after bootup.  */
 	asm("movl %%esp,%0" : "=m" (int_stack_top));
+
 }
 
 /*
@@ -569,6 +587,7 @@ void c_boot_entry(vm_offset_t bi)
 	  }
 #endif	/* MACH_KDB */
 
+
 	machine_slot[0].is_cpu = TRUE;
 	machine_slot[0].running = TRUE;
 	machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386;
@@ -593,6 +612,7 @@ void c_boot_entry(vm_offset_t bi)
 	/*
 	 * Start the system.
 	 */
+
 	setup_main();
 
 }
@@ -650,127 +670,184 @@ unsigned int pmap_free_pages(void)
 	return atop(avail_remaining);
 }
 
-/* Always returns page-aligned regions.  */
-boolean_t
-init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+typedef struct init_bootmap {
+	vm_offset_t bitmap;
+	vm_offset_t next_avail;
+} init_bootmap_t;
+
+static init_bootmap_t bootmap;
+
+
+void
+bootmap_free(unsigned long sidx, unsigned long pages)
 {
-	vm_offset_t addr;
+	unsigned long eidx = sidx + pages;
+	for (; sidx < eidx ; sidx++)
+		clear_bit(sidx,bootmap.bitmap);
+}
+
+void
+bootmap_allocate(unsigned long sidx, unsigned long pages)
+{
+	unsigned long eidx = sidx + pages;
+	for (; sidx < eidx ; sidx++)
+		set_bit(sidx,bootmap.bitmap);
+}
+
+void
+init_boot_allocator(void){
+
+	unsigned long sidx,eidx;
+	extern char start[], end[];
+	unsigned long map_size;
+
+	map_size = ((atop(round_page(phys_last_addr - phys_first_addr))) + 7) >> 3;
+	map_size = (map_size + sizeof(long) - 1UL) & ~(sizeof(long) -1UL);
+	bootmap.bitmap = 0x4000; /* Second Page*/
+	bootmap.next_avail = 16*1024*1024;
+	memset((void *)bootmap.bitmap, 0x00, map_size); /* map is initially free */
 
 #ifdef MACH_HYP
 	/* There is none */
-	if (!avail_next)
-		avail_next = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
-#else	/* MACH_HYP */
-	extern char start[], end[];
-	int i;
-	static int wrapped = 0;
-
-	/* Memory regions to skip.  */
-	vm_offset_t cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
-		? boot_info.cmdline : 0;
-	vm_offset_t cmdline_end_pa = cmdline_start_pa
-		? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
-		: 0;
-	vm_offset_t mods_start_pa = boot_info.flags & MULTIBOOT_MODS
-		? boot_info.mods_addr : 0;
-	vm_offset_t mods_end_pa = mods_start_pa
-		? mods_start_pa
-		  + boot_info.mods_count * sizeof(struct multiboot_module)
-		: 0;
-
-	retry:
+	if (bootmap.next_avail == 0)
+		bootmap.next_avail = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
 #endif	/* MACH_HYP */
 
-	/* Page-align the start address.  */
-	avail_next = round_page(avail_next);
 
-#ifndef MACH_HYP
-	/* Start with memory above 16MB, reserving the low memory for later. */
-	/* Don't care on Xen */
-	if (!wrapped && phys_last_addr > 16 * 1024*1024)
-	  {
-	    if (avail_next < 16 * 1024*1024)
-	      avail_next = 16 * 1024*1024;
-	    else if (avail_next == phys_last_addr)
-	      {
-		/* We have used all the memory above 16MB, so now start on
-		   the low memory.  This will wind up at the end of the list
-		   of free pages, so it should not have been allocated to any
-		   other use in early initialization before the Linux driver
-		   glue initialization needs to allocate low memory.  */
-		avail_next = 0x1000;
-		wrapped = 1;
-	      }
-	  }
-#endif	/* MACH_HYP */
 
-	/* Check if we have reached the end of memory.  */
-        if (avail_next == 
-		(
+
 #ifndef MACH_HYP
-		wrapped ? 16 * 1024*1024 : 
-#endif	/* MACH_HYP */
-		phys_last_addr))
-		return FALSE;
 
-	/* Tentatively assign the current location to the caller.  */
-	addr = avail_next;
 
-	/* Bump the pointer past the newly allocated region
-	   and see where that puts us.  */
-	avail_next += size;
 
-#ifndef MACH_HYP
-	/* Skip past the I/O and ROM area.  */
-	if ((avail_next > (boot_info.mem_lower * 0x400)) && (addr < 0x100000))
-	{
-		avail_next = 0x100000;
-		goto retry;
-	}
+	/* Reserve the first Page */
+	bootmap_allocate(0,1);
 
-	/* Skip our own kernel code, data, and bss.  */
-	if ((avail_next > (vm_offset_t)start) && (addr < (vm_offset_t)end))
-	{
-		avail_next = (vm_offset_t)end;
-		goto retry;
-	}
+	/* Reserve the area of the bootmap.bitmap*/
+	bootmap_allocate(atop(bootmap.bitmap-phys_first_addr),atop(round_page(map_size)));
 
-	/* Skip any areas occupied by valuable boot_info data.  */
-	if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
-	{
-		avail_next = cmdline_end_pa;
-		goto retry;
-	}
-	if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
-	{
-		avail_next = mods_end_pa;
-		goto retry;
-	}
-	if ((avail_next > kern_sym_start) && (addr < kern_sym_end))
+	/* Reserve the I/O and ROM area.  */
+	sidx = atop(boot_info.mem_lower * 0x400);
+	eidx = atop(0x100000);
+	bootmap_allocate(sidx, eidx-sidx+1);
+
+
+
+	/* Reserve our own kernel code, data, and bss.  */
+	sidx = atop(start);
+	eidx = atop(round_page(end));
+	bootmap_allocate(sidx, eidx-sidx+1);
+
+
+
+
+	/* Reserve any areas occupied by valuable boot_info data.  */
+	sidx = atop(kern_sym_start);
+	eidx = atop(round_page(kern_sym_end));
+	bootmap_allocate(sidx, eidx-sidx+1);
+
+
+
+	if (boot_info.flags & MULTIBOOT_CMDLINE)
 	{
-		avail_next = kern_sym_end;
-		goto retry;
+		kernel_cmdline_len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
+		sidx = atop(boot_info.cmdline - phys_first_addr);
+		bootmap_allocate(sidx, atop(round_page(kernel_cmdline_len)));
 	}
+
+
+
+	/* Reserve some area for loaded modules */
 	if (boot_info.flags & MULTIBOOT_MODS)
 	{
 		struct multiboot_module *m = (struct multiboot_module *)
 			phystokv(boot_info.mods_addr);
+
+		sidx = atop(boot_info.mods_addr - phys_first_addr);
+
+		bootmap_allocate(sidx, atop(round_page(boot_info.mods_count * sizeof(*m))));
+
+		int i;
 		for (i = 0; i < boot_info.mods_count; i++)
 		{
-			if ((avail_next > m[i].mod_start)
-			    && (addr < m[i].mod_end))
-			{
-				avail_next = m[i].mod_end;
-				goto retry;
-			}
-			/* XXX string */
+
+			sidx = atop(m[i].mod_start - phys_first_addr);
+			eidx = atop(round_page(m[i].mod_end) - phys_first_addr );
+			bootmap_allocate(sidx, eidx-sidx+1);
+			sidx = atop(m[i].string - phys_first_addr);
+			bootmap_allocate(sidx, atop(round_page(strlen((char*)phystokv(m[i].string))+1)));
 		}
 	}
+
+
+
 #endif	/* MACH_HYP */
+	
+	boot_allocator_initialized = 1;
+}
+
+boolean_t
+init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+{
+	size = round_page(size);
+	if(!_init_alloc_aligned(size, addrp,bootmap.next_avail)){
+		if(_init_alloc_aligned(size,addrp,0)){
+			bootmap.next_avail = *addrp + size;
+			return TRUE;
+		}else{
+			return FALSE;
+		}
+	}else{
+		bootmap.next_avail = *addrp + size;
+		return TRUE;
+	}
+}
+
+boolean_t
+_init_alloc_aligned(vm_size_t size, vm_offset_t *addrp, vm_offset_t goal)
+{
+	if (!boot_allocator_initialized){
+		init_boot_allocator();
+	}
+
+
+	vm_offset_t map = bootmap.bitmap;
+	unsigned long sidx=0, idx=0,last_page,first_page;	
+	vm_size_t size_found = 0;
+
+	size = round_page(size);
+	goal = round_page(goal);
+	last_page = atop(phys_last_addr);
+	first_page = atop(phys_first_addr);
+
+	if (goal >= phys_first_addr && goal+size <= phys_last_addr){
+		sidx = idx = atop(goal - phys_first_addr);
+	}else{
+		sidx = idx = 0;
+	}
+
+	while(1){
+		if(idx+first_page >= last_page)
+			return FALSE;
+				
+		if(test_bit(idx,map)){
+			sidx = ++idx;
+			size_found = 0;
+			continue;
+		}
+
+		size_found += PAGE_SIZE;
+		if (size_found >= size)
+			break;
+		
+		idx++;
+	}
+
 
 	avail_remaining -= size;
+	*addrp = ptoa(sidx) + phys_first_addr;
 
-	*addrp = addr;
+	bootmap_allocate(sidx,atop(size));
 	return TRUE;
 }
 

Reply via email to