--- agpgart_be.c.ORIG	Tue Dec 18 21:10:04 2001
+++ agpgart_be.c	Tue Dec 18 22:23:12 2001
@@ -844,11 +844,13 @@
 
 #define AGP_DCACHE_MEMORY 1
 #define AGP_PHYS_MEMORY   2
+#define AGP_SHARED_MEMORY 3
 
 static gatt_mask intel_i810_masks[] =
 {
 	{I810_PTE_VALID, 0},
 	{(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
+	{I810_PTE_VALID, 0},
 	{I810_PTE_VALID, 0}
 };
 
@@ -856,6 +858,10 @@
 	struct pci_dev *i810_dev;	/* device one */
 	volatile u8 *registers;
 	int num_dcache_entries;
+	int num_shared;              /* Number of pages for type shared */
+	int shared_bound_ref;        /* Number of times shared is bound*/
+	int shared_alloc_ref;        /* Number of times shared is alloced*/
+	unsigned long *shared_mem;   /* the *memory to use for shared  */
 } intel_i810_private;
 
 static int intel_i810_fetch_size(void)
@@ -918,11 +924,41 @@
 				 agp_bridge.scratch_page);
 		}
 	}
+
+	intel_i810_private.num_shared = 0;
+	intel_i810_private.shared_bound_ref = 0;
+	intel_i810_private.shared_alloc_ref = 0;
+	intel_i810_private.shared_mem = NULL;
+
 	return 0;
 }
 
 static void intel_i810_cleanup(void)
 {
+	int i;
+	unsigned long *mem = intel_i810_private.shared_mem;
+
+	/* If all our shared clients free'd their memory but a
+	 * client still had it bound (Probably a client that
+	 * didn't know about shared. We will still have the
+	 * pages allocated. Free them now.
+	 */
+	if(mem == NULL) {
+		intel_i810_private.num_shared = 0;
+	}
+	for(i=0; i<intel_i810_private.num_shared; i++) {
+		if(!mem[i]) {
+			continue;
+		}
+		agp_bridge.agp_destroy_page((unsigned long)
+					    phys_to_virt(mem[i]));
+	}
+	if(mem) {
+		vfree(mem);
+	}
+	intel_i810_private.num_shared = 0;
+	intel_i810_private.shared_mem = NULL;
+
 	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
 	iounmap((void *) intel_i810_private.registers);
 }
@@ -949,8 +985,14 @@
 	if ((pg_start + mem->page_count) > num_entries) {
 		return -EINVAL;
 	}
+	/* Return Busy if the pages being inserted are already occupied,
+	 * shared pages don't return busy. If we have allocated shared
+	 * pages but they are not bound return -Ebusy otherwise someone
+	 * who doesn't know about shared could hit a bad page */
 	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+		if((!PGE_EMPTY(agp_bridge.gatt_table[j])) &&
+		   ((j >= intel_i810_private.num_shared) ||
+		    (!intel_i810_private.shared_bound_ref))){
 			return -EBUSY;
 		}
 	}
@@ -973,7 +1015,46 @@
 		}
 	        if((type == AGP_PHYS_MEMORY) &&
 		   (mem->type == AGP_PHYS_MEMORY)) {
-		   goto insert;
+			goto insert;
+		}
+		if((type == AGP_SHARED_MEMORY) &&
+		   (mem->type == AGP_SHARED_MEMORY)) {
+			if(intel_i810_private.shared_bound_ref) {
+				if(mem->page_count != 
+				   intel_i810_private.num_shared) {
+					return -EINVAL;
+				}
+				printk(KERN_INFO "Insert SHARED 2\n");
+				/* Already Set up, just return */
+				intel_i810_private.shared_bound_ref++;
+				return 0;
+			}
+			else {
+				/* This is the first bind */
+				if(pg_start != 0) {
+					return -EINVAL;
+				}
+				if(!PGE_EMPTY(agp_bridge.gatt_table[0])) {
+					/* Someone is using entry 0 and it
+					   isn't shared. Tough luck. */
+					printk(KERN_INFO "Cannot allocate shared agp memory. Another client is occupying page 0.\n");
+					return -EINVAL;
+				}
+				intel_i810_private.shared_bound_ref++;
+				printk(KERN_INFO "Insert SHARED\n");
+
+				CACHE_FLUSH();
+				for (i = 0, j = pg_start; 
+				     i < mem->page_count; i++, j++) {
+					OUTREG32(intel_i810_private.registers,
+						 I810_PTE_BASE + (j * 4),
+						 mem->memory[i]);
+				}
+				CACHE_FLUSH();
+				
+				agp_bridge.tlb_flush(mem);
+				return 0;
+			}
 		}
 		return -EINVAL;
 	}
@@ -981,8 +1062,16 @@
 insert:
    	CACHE_FLUSH();
 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		OUTREG32(intel_i810_private.registers,
-			 I810_PTE_BASE + (j * 4), mem->memory[i]);
+		if(j >= intel_i810_private.num_shared){
+			OUTREG32(intel_i810_private.registers,
+				 I810_PTE_BASE + (j * 4), mem->memory[i]);
+		}
+	}
+	/* Increase ref count even though they didn't ask for shared.
+	   We gave them shared so it is the right thing to do. */
+	if(pg_start < intel_i810_private.num_shared) {
+		printk(KERN_INFO "Insert Regular as SHARED\n");
+		intel_i810_private.shared_bound_ref++;
 	}
 	CACHE_FLUSH();
 
@@ -995,10 +1084,23 @@
 {
 	int i;
 
+	/* Remove ref count */
+	if(pg_start < intel_i810_private.num_shared) {
+		if(!type) {
+			printk(KERN_INFO "Remove Regular as SHARED\n");
+		}
+		else {
+			printk(KERN_INFO "Remove SHARED\n");
+		}
+		intel_i810_private.shared_bound_ref--;
+	}
 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		OUTREG32(intel_i810_private.registers,
-			 I810_PTE_BASE + (i * 4),
-			 agp_bridge.scratch_page);
+		if((i >= intel_i810_private.num_shared) ||
+		   (!intel_i810_private.shared_bound_ref)) {
+			OUTREG32(intel_i810_private.registers,
+				 I810_PTE_BASE + (i * 4),
+				 agp_bridge.scratch_page);
+		}
 	}
 
 	CACHE_FLUSH();
@@ -1009,6 +1111,7 @@
 static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
 {
 	agp_memory *new;
+	int scratch_pages,i;
 
 	if (type == AGP_DCACHE_MEMORY) {
 		if (pg_count != intel_i810_private.num_dcache_entries) {
@@ -1058,18 +1161,87 @@
 	        new->physical = virt_to_phys((void *) new->memory[0]);
 	   	return new;
 	}
+	if(type == AGP_SHARED_MEMORY) {
+		/* The I810 has no framebuffer. We use this type as a
+		 * shared framebuffer. It is only allocated once and after
+		 * that we just report the amount that we have.
+		 */
+	   	if(intel_i810_private.shared_mem) {
+			/* We are not first, just report what we have */
+			new = agp_create_memory(1);
+			
+			if(new == NULL) {
+				return NULL;
+			}
+			new->type = AGP_SHARED_MEMORY;
+			new->page_count = intel_i810_private.num_shared;
+			new->num_scratch_pages = 0;
+			vfree(new->memory);
+			new->memory = intel_i810_private.shared_mem;
+			intel_i810_private.shared_alloc_ref++;
+			MOD_INC_USE_COUNT;
+			return new;
+		}
+		printk(KERN_INFO "Alloc SHARED\n");
+
+		scratch_pages = (pg_count + ENTRIES_PER_PAGE - 1) /
+			ENTRIES_PER_PAGE;
+
+		new = agp_create_memory(scratch_pages);
+		if(new == NULL) {
+			return NULL;
+		}
+
+		new->type = AGP_SHARED_MEMORY;
+		for(i=0; i<pg_count; i++) {
+			new->memory[i] = agp_bridge.agp_alloc_page();
+
+			if(new->memory[i] == 0) {
+				/* Free this structure */
+				agp_free_memory(new);
+				return NULL;
+			}
+			new->memory[i] =
+				agp_bridge.mask_memory(
+					virt_to_phys((void *) new->memory[i]),
+					type);
+			new->page_count++;
+		}
+		intel_i810_private.shared_mem = new->memory;
+		intel_i810_private.num_shared = pg_count;
+		intel_i810_private.shared_alloc_ref++;
+		MOD_INC_USE_COUNT;
+		return new;
+
+	}
    
 	return NULL;
 }
 
 static void intel_i810_free_by_type(agp_memory * curr)
 {
+	int i;
+
 	agp_free_key(curr->key);
    	if(curr->type == AGP_PHYS_MEMORY) {
 	   	agp_bridge.agp_destroy_page((unsigned long)
 				 phys_to_virt(curr->memory[0]));
 		vfree(curr->memory);
 	}
+	if(curr->type == AGP_SHARED_MEMORY) {
+		intel_i810_private.shared_alloc_ref--;
+		if((!intel_i810_private.shared_alloc_ref) &&
+		   (!intel_i810_private.shared_bound_ref)) {
+			printk(KERN_INFO "Free SHARED\n");
+			for(i=0; i<curr->page_count; i++) {
+				agp_bridge.agp_destroy_page((unsigned long)
+                                       phys_to_virt(curr->memory[i]));
+			}
+			vfree(curr->memory);
+		}
+		intel_i810_private.num_shared = 0;
+		intel_i810_private.shared_mem = NULL;
+	}
 	kfree(curr);
    	MOD_DEC_USE_COUNT;
 }
@@ -1085,7 +1257,7 @@
 	intel_i810_private.i810_dev = i810_dev;
 
 	agp_bridge.masks = intel_i810_masks;
-	agp_bridge.num_of_masks = 2;
+	agp_bridge.num_of_masks = 3;
 	agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
 	agp_bridge.size_type = FIXED_APER_SIZE;
 	agp_bridge.num_aperture_sizes = 2;
@@ -3248,7 +3420,7 @@
 			}
 			printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
 			       "Chipset.\n");
-			agp_bridge.type = INTEL_I810;
+			agp_bridge.type = INTEL_I815;
 			return intel_i810_setup(i810_dev);
 
 		default:
