diff -purN -X /home/mbligh/.diff.exclude 380-protocol254/mm/slab.c 390-slabtune/mm/slab.c
--- 380-protocol254/mm/slab.c	2003-10-27 10:41:16.000000000 -0800
+++ 390-slabtune/mm/slab.c	2003-12-02 14:58:37.000000000 -0800
@@ -479,6 +479,19 @@ static struct cache_names {
 #undef CACHE
 };
 
+/* Adjustments to cache size limit based on memory size */
+static int cache_limit_multiplier_norm;
+static int cache_limit_multiplier_dma;
+
+struct cache_multipliers {
+	int	memsize;
+	int	mult;
+} cache_multipliers[] = {
+	{0x40000, 4},
+	{0x10000, 2},
+	{0x0, 1}
+};
+
 struct arraycache_init initarray_cache __initdata = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 struct arraycache_init initarray_generic __initdata = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 
@@ -661,6 +674,9 @@ void __init kmem_cache_init(void)
 	size_t left_over;
 	struct cache_sizes *sizes;
 	struct cache_names *names;
+	unsigned long	dmasize, normsize;
+	pg_data_t *pgdat;
+	int i;
 
 	/*
 	 * Fragmentation resistance on low memory - only use bigger
@@ -669,7 +685,21 @@ void __init kmem_cache_init(void)
 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
 
-	
+	/*
+	 * Increase cache limits based on the amount of memory in various
+	 * zones.
+	 */
+	dmasize = normsize = 0;
+	for_each_pgdat(pgdat) {
+		dmasize += pgdat->node_zones[ZONE_DMA].present_pages;
+		normsize += pgdat->node_zones[ZONE_NORMAL].present_pages;
+	}
+	for (i = 0; dmasize < cache_multipliers[i].memsize; i++);
+	cache_limit_multiplier_dma = cache_multipliers[i].mult;
+	normsize += dmasize;
+	for (i = 0; normsize < cache_multipliers[i].memsize; i++);
+	cache_limit_multiplier_norm = cache_multipliers[i].mult;
+
 	/* Bootstrap is tricky, because several objects are allocated
 	 * from caches that do not exist yet:
 	 * 1) initialize the cache_cache cache: it contains the kmem_cache_t
@@ -2356,6 +2386,11 @@ static void enable_cpucache (kmem_cache_
 	else
 		limit = 120;
 
+	if (cachep->gfpflags & GFP_DMA)
+		limit *= cache_limit_multiplier_dma;
+	else
+		limit *= cache_limit_multiplier_norm;
+
 	/* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
 	 * allocation behaviour: Most allocs on one cpu, most free operations
 	 * on another cpu. For these cases, an efficient object passing between
@@ -2544,7 +2579,7 @@ static void *s_start(struct seq_file *m,
 		seq_puts(m, "slabinfo - version: 2.0\n");
 #endif
 		seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
-		seq_puts(m, " : tunables <batchcount> <limit> <sharedfactor>");
+		seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
 		seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #if STATS
 		seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <freelimit>");