This frees higher-order pages while initializing highpages for NUMA-Q.
NUMA-Q has a lot of highmem, so speeding this up by small factors
helps boot times a bit.

 discontig.c |   49 +++++++++++++++++++++++++++++++++++++++++--------
 init.c      |    4 ++--
 2 files changed, 43 insertions(+), 10 deletions(-)


diff -urpN wli-2.5.51-bk1-7/arch/i386/mm/discontig.c wli-2.5.51-bk1-8/arch/i386/mm/discontig.c
--- wli-2.5.51-bk1-7/arch/i386/mm/discontig.c	2002-12-09 18:46:10.000000000 -0800
+++ wli-2.5.51-bk1-8/arch/i386/mm/discontig.c	2002-12-11 18:36:07.000000000 -0800
@@ -323,28 +323,61 @@ void __init zone_sizes_init(void)
 	return;
 }
 
+#ifndef CONFIG_HIGHMEM
+void __init set_highmem_pages_init(int bad_ppro) { }
+#else
+int page_kills_ppro(unsigned long);
+int page_is_ram(unsigned long);
 void __init set_highmem_pages_init(int bad_ppro) 
 {
-#ifdef CONFIG_HIGHMEM
 	int nid;
 
 	for (nid = 0; nid < numnodes; nid++) {
-		unsigned long node_pfn, node_high_size, zone_start_pfn;
-		struct page * zone_mem_map;
+		unsigned long start_pfn, end_pfn, zone_max_pfn, zone_start_pfn;
+		struct page *page, *zone_mem_map;
 		
-		node_high_size = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].spanned_pages;
 		zone_mem_map = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].zone_mem_map;
 		zone_start_pfn = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].zone_start_pfn;
+		zone_max_pfn = zone_start_pfn + NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].spanned_pages;
 
 		printk("Initializing highpages for node %d\n", nid);
-		for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) {
-			one_highpage_init((struct page *)(zone_mem_map + node_pfn),
-					  zone_start_pfn + node_pfn, bad_ppro);
+		start_pfn = end_pfn = zone_start_pfn;
+		while (start_pfn < zone_max_pfn && end_pfn < zone_max_pfn) {
+			page = &zone_mem_map[end_pfn - zone_start_pfn];
+			while (end_pfn < zone_max_pfn
+					&& page_is_ram(end_pfn)
+					&& !(bad_ppro && page_kills_ppro(end_pfn))) {
+				ClearPageReserved(page);
+				set_bit(PG_highmem, &page->flags);
+				set_page_count(page, 1);
+				++page;
+				++end_pfn;
+			}
+			totalhigh_pages += end_pfn - start_pfn;
+			while (start_pfn < end_pfn) {
+				/* for we dare not enter when fls(n) == 0 */
+				unsigned long zpfn = start_pfn - zone_start_pfn;
+				int order = min(MAX_ORDER, min(zpfn ? ffs(zpfn) : MAX_ORDER, ffs(end_pfn - start_pfn))) - 1;
+				__free_pages(&zone_mem_map[zpfn], order);
+				start_pfn += 1 << order;
+			}
+			if (start_pfn != end_pfn)
+				printk("wli screwed up, it will crash!\n");
+
+			if (end_pfn < zone_max_pfn)
+				SetPageReserved(&zone_mem_map[end_pfn - zone_start_pfn]);
+
+			/*
+			 * end_pfn stopped at a reserved page; now they both
+			 * refer to a reserved page. Slide them forward.
+			 */
+			++start_pfn;
+			++end_pfn;
 		}
 	}
 	totalram_pages += totalhigh_pages;
-#endif
 }
+#endif
 
 void __init set_max_mapnr_init(void)
 {
diff -urpN wli-2.5.51-bk1-7/arch/i386/mm/init.c wli-2.5.51-bk1-8/arch/i386/mm/init.c
--- wli-2.5.51-bk1-7/arch/i386/mm/init.c	2002-12-09 18:46:14.000000000 -0800
+++ wli-2.5.51-bk1-8/arch/i386/mm/init.c	2002-12-11 18:34:30.000000000 -0800
@@ -153,14 +153,14 @@ static void __init kernel_physical_mappi
 	}	
 }
 
-static inline int page_kills_ppro(unsigned long pagenr)
+int __init page_kills_ppro(unsigned long pagenr)
 {
 	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
 		return 1;
 	return 0;
 }
 
-static inline int page_is_ram(unsigned long pagenr)
+int __init page_is_ram(unsigned long pagenr)
 {
 	int i;