--- zzzz-none-000/linux-2.6.13.1/mm/slab.c 2005-09-10 02:42:58.000000000 +0000 +++ ohio-7170-487/linux-2.6.13.1/mm/slab.c 2007-07-27 14:30:20.000000000 +0000 @@ -1206,6 +1206,23 @@ { size_t left_over, slab_size, ralign; kmem_cache_t *cachep = NULL; +#if 1 /* AVM */ + unsigned current_slab_break_gfp_order = slab_break_gfp_order; + + if (5440 == size && current_slab_break_gfp_order < 2) { + current_slab_break_gfp_order = 2; + } + if (4256+8 == size && current_slab_break_gfp_order < 7) { + current_slab_break_gfp_order = 7; + } + if (1728+8 == size && current_slab_break_gfp_order < 7) { + current_slab_break_gfp_order = 7; + } + if (2208+8 == size && current_slab_break_gfp_order < 5) { + current_slab_break_gfp_order = 5; + } +#endif + /* * Sanity checks... these are all serious usage bugs. @@ -1376,11 +1393,17 @@ * Large num of objs is good, but v. large slabs are * currently bad for the gfp()s. */ - if (cachep->gfporder >= slab_break_gfp_order) +#if 1 /* AVM */ + if (cachep->gfporder >= current_slab_break_gfp_order) { +#else + if (cachep->gfporder >= slab_break_gfp_order) { +#endif break; - - if ((left_over*8) <= (PAGE_SIZE<gfporder)) + } + + if ((left_over*8) <= (PAGE_SIZE<gfporder)) { break; /* Acceptable internal fragmentation. */ + } next: cachep->gfporder++; } while (1); @@ -2486,6 +2509,8 @@ * Then kmalloc uses the uninlined functions instead of the inline * functions. */ + /*--- if(size > 1024) ---*/ + /*--- printk("[kmalloc:%u]", size); ---*/ cachep = __find_general_cachep(size, flags); if (unlikely(cachep == NULL)) return NULL;