Index: linux-2.6.9/include/linux/sched.h
===================================================================
--- linux-2.6.9.orig/include/linux/sched.h	2004-12-01 13:15:05.000000000 -0800
+++ linux-2.6.9/include/linux/sched.h	2004-12-01 13:15:07.000000000 -0800
@@ -539,6 +539,8 @@
 #endif
 
 	struct list_head tasks;
+	unsigned long anon_fault_next_addr;
+	int anon_fault_order;
 	/*
 	 * ptrace_list/ptrace_children forms the list of my children
 	 * that were stolen by a ptracer.
Index: linux-2.6.9/mm/memory.c
===================================================================
--- linux-2.6.9.orig/mm/memory.c	2004-12-01 13:15:05.000000000 -0800
+++ linux-2.6.9/mm/memory.c	2004-12-01 13:16:41.000000000 -0800
@@ -1417,6 +1417,8 @@
 	return ret;
 }
 
+static int sysctl_max_prealloc_order = 5;
+
 /*
  * We are called with the MM semaphore held.
  */
@@ -1425,59 +1427,88 @@
 		pte_t *page_table, pmd_t *pmd, int write_access,
 		unsigned long addr, pte_t orig_entry)
 {
-	pte_t entry;
-	struct page * page = ZERO_PAGE(addr);
+	unsigned long end_addr;
+
+	addr &= PAGE_MASK;
+
+	/* Check if there is a sequential allocation sequence of pages */
+	if (likely(current->anon_fault_next_addr != addr)) {
+		current->anon_fault_order = 0;
+		end_addr = addr + PAGE_SHIFT;
+	} else {
+		int order = current->anon_fault_order;
+
+		/*
+		 * Calculate the number of pages to preallocate. The order of preallocations
+		 * increases with each successful prediction
+		 */
+		if (order < sysctl_max_prealloc_order)
+			order++;
+
+		current->anon_fault_order = order;
+
+		end_addr = addr + (PAGE_SIZE << order);
 
-	/* Read-only mapping of ZERO_PAGE. */
-	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+		if (end_addr > vma->vm_end)
+			end_addr = vma->vm_end;
+
+		if ((addr & PMD_MASK) != (end_addr & PMD_MASK))
+			end_addr &= PMD_MASK;
+	}
 
-	/* ..except if it's a write access */
 	if (write_access) {
-		/* Allocate our own private page. */
-		pte_unmap(page_table);
 
 		if (unlikely(anon_vma_prepare(vma)))
-			goto no_mem;
-		page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
-		if (!page)
-			goto no_mem;
-		clear_user_highpage(page, addr);
+			return VM_FAULT_OOM;
 
-		page_table = pte_offset_map(pmd, addr);
+		do {
+			struct page *page;
+			pte_t entry;
 
-		entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
-							 vma->vm_page_prot)),
+			page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+			if (!page)
+				return VM_FAULT_OOM;
+
+			clear_user_highpage(page, addr);
+
+			entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
+						 vma->vm_page_prot)),
 				      vma);
-		mark_page_accessed(page);
-	}
+			mark_page_accessed(page);
 
-	/* update the entry */
-	if (!ptep_cmpxchg(vma, addr, page_table, orig_entry, entry)) {
-		if (write_access) {
-			pte_unmap(page_table);
-			page_cache_release(page);
-		}
-		goto out;
-	}
-	if (write_access) {
-		/*
-		 * These two functions must come after the cmpxchg
-		 * because if the page is on the LRU then try_to_unmap may come
-		 * in and unmap the pte.
-		 */
-		lru_cache_add_active(page);
-		page_add_anon_rmap(page, vma, addr);
-		atomic_inc(&mm->mm_rss);
+			/* update the entry */
+			if (!ptep_cmpxchg(vma, addr, page_table, orig_entry, entry)) {
+				pte_unmap(page_table);
+				page_cache_release(page);
+				break;
+			}
+
+			lru_cache_add_active(page);
+			page_add_anon_rmap(page, vma, addr);
+			atomic_inc(&mm->mm_rss);
 	
-	}
-	pte_unmap(page_table);
+			pte_unmap(page_table);
+			/* No need to invalidate - it was non-present before */
+			update_mmu_cache(vma, addr, entry);
+			addr += PAGE_SIZE;
 
-	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, addr, entry);
-out:
+		} while (addr < end_addr && pte_none(orig_entry = *++page_table));
+	} else {
+		/* Read */
+		do {
+			pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+			if (!ptep_cmpxchg(vma, addr, page_table, orig_entry, entry))
+				break;
+
+			pte_unmap(page_table);
+			/* No need to invalidate - it was non-present before */
+			update_mmu_cache(vma, addr, entry);
+			addr += PAGE_SIZE;
+		
+		} while (addr < end_addr && pte_none(orig_entry = *++page_table));
+	}
+	current->anon_fault_next_addr = addr;
 	return VM_FAULT_MINOR;
-no_mem:
-	return VM_FAULT_OOM;
 }
 
 /*