diff -aurpN -X /home/fletch/.diff.exclude 180-sched_domains/kernel/sched.c 181-sched_balance_fix/kernel/sched.c
--- 180-sched_domains/kernel/sched.c	Wed Feb 11 09:02:46 2004
+++ 181-sched_balance_fix/kernel/sched.c	Wed Feb 11 09:02:50 2004
@@ -1398,14 +1398,13 @@ find_busiest_group(struct sched_domain *
 		total_nr_cpus += nr_cpus;
 		avg_load /= nr_cpus;
 
+		if (avg_load > max_load)
+			max_load = avg_load;
+
 		if (local_group) {
 			this_load = avg_load;
-			goto nextgroup;
-		}
-
-		if (avg_load >= max_load) {
+		} else if (avg_load >= max_load) {
 			busiest = group;
-			max_load = avg_load;
 			busiest_nr_cpus = nr_cpus;
 		}
 nextgroup:
@@ -1428,11 +1427,18 @@ nextgroup:
 	 * reduce the max loaded cpu below the average load, as either of these
 	 * actions would just result in more rebalancing later, and ping-pong
 	 * tasks around. Thus we look for the minimum possible imbalance.
+	 * Negative imbalances (*we* are more loaded than anyone else) will
+	 * be counted as no imbalance for these purposes -- we can't fix that
+	 * by pulling tasks to us.  Be careful of negative numbers as they'll
+	 * appear as very large values with unsigned longs.
 	 */
-	*imbalance = min(max_load - avg_load, avg_load - this_load);
-
-	/* Get rid of the scaling factor now, rounding *up* as we divide */
-	*imbalance = (*imbalance + SCHED_LOAD_SCALE - 1) >> SCHED_LOAD_SHIFT;
+	if (avg_load >= this_load) {
+		*imbalance = min(max_load - avg_load, avg_load - this_load);
+		/* Get rid of the scaling factor, rounding *up* as we divide */
+		*imbalance = (*imbalance + SCHED_LOAD_SCALE - 1)
+						>> SCHED_LOAD_SHIFT;
+	} else
+		*imbalance = 0;
 
 	if (*imbalance == 0) {
 		if (package_idle != NOT_IDLE && domain->flags & SD_FLAG_IDLE