--- zzzz-none-000/linux-2.6.19.2/mm/page_alloc.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5505/linux-2.6.19.2/mm/page_alloc.c 2008-06-19 07:46:28.000000000 +0000 @@ -390,7 +390,7 @@ static inline void __free_one_page(struct page *page, struct zone *zone, unsigned int order) { - unsigned long page_idx; + unsigned long page_idx, index; int order_size = 1 << order; if (unlikely(PageCompound(page))) @@ -401,6 +401,11 @@ VM_BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(bad_range(zone, page)); +#ifdef CONFIG_PAX_MEMORY_SANITIZE + for (index = order_size; index; --index) + clear_highpage(page + index - 1); +#endif + zone->free_pages += order_size; while (order < MAX_ORDER-1) { unsigned long combined_idx; @@ -853,7 +858,7 @@ pcp = &zone_pcp(zone, cpu)->pcp[cold]; local_irq_save(flags); if (!pcp->count) { - pcp->count = rmqueue_bulk(zone, 0, + pcp->count += rmqueue_bulk(zone, 0, pcp->batch, &pcp->list); if (unlikely(!pcp->count)) goto failed; @@ -1687,8 +1692,9 @@ unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) { - if (!early_pfn_valid(pfn)) + if (!early_pfn_valid(pfn)) { continue; + } if (!early_pfn_in_nid(pfn, nid)) continue; page = pfn_to_page(pfn); @@ -2261,7 +2267,7 @@ /* Account for ranges past physical memory on this node */ if (range_end_pfn > prev_end_pfn) - hole_pages += range_end_pfn - + hole_pages = range_end_pfn - max(range_start_pfn, prev_end_pfn); return hole_pages; @@ -2407,7 +2413,7 @@ zone->zone_pgdat = pgdat; zone->free_pages = 0; - zone->prev_priority = DEF_PRIORITY; + zone->temp_priority = zone->prev_priority = DEF_PRIORITY; zone_pcp_init(zone); INIT_LIST_HEAD(&zone->active_list); @@ -2450,8 +2456,22 @@ end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); - if (!map) + if (!map) { +#if defined(CONFIG_MIPS) +#if defined(CONFIG_MIPS_UR8) + unsigned int ram_start = CONFIG_MIPS_UR8_PHY_MEMSTART; +#endif /*--- #if defined(CONFIG_MIPS_UR8) ---*/ + printk(KERN_ERR "[mem_map-hack]: reduce size from %d", size); + size = (end - (ram_start >> PAGE_SHIFT)) * sizeof(struct page); + printk(" %d Bytes\n", size); + map = alloc_bootmem_node(pgdat, size); + printk(KERN_ERR "[mem_map-hack]: move map base from 0x%x", map); + map -= (ram_start >> PAGE_SHIFT); + printk(" to 0x%x\n", map); +#else map = alloc_bootmem_node(pgdat, size); +#endif + } pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifdef CONFIG_FLATMEM @@ -2612,9 +2632,6 @@ { int i; - /* Regions in the early_node_map can be in any order */ - sort_node_map(); - /* Assuming a sorted map, the first range found has the starting pfn */ for_each_active_range_index_in_nid(i, nid) return early_node_map[i].start_pfn; @@ -2683,6 +2700,9 @@ max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); } + /* Regions in the early_node_map can be in any order */ + sort_node_map(); + /* Print out the zone ranges */ printk("Zone PFN ranges:\n"); for (i = 0; i < MAX_NR_ZONES; i++)