--- zzzz-none-000/linux-3.10.107/mm/page_alloc.c 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/mm/page_alloc.c 2021-11-10 11:53:56.000000000 +0000 @@ -1384,6 +1384,9 @@ void split_page(struct page *page, unsigned int order) { int i; +#if defined(CONFIG_AVM_PAGE_TRACE) + unsigned long current_pc = avm_get_page_current_pc(page); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ VM_BUG_ON(PageCompound(page)); VM_BUG_ON(!page_count(page)); @@ -1396,9 +1399,12 @@ if (kmemcheck_page_is_tracked(page)) split_page(virt_to_page(page[0].shadow), order); #endif - - for (i = 1; i < (1 << order); i++) + for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i,current_pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + } } EXPORT_SYMBOL_GPL(split_page); @@ -2608,7 +2614,11 @@ */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) + struct zonelist *zonelist, nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + ) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); struct zone *preferred_zone; @@ -2685,7 +2695,11 @@ goto retry_cpuset; memcg_kmem_commit_charge(page, memcg, order); - +#if defined(CONFIG_AVM_PAGE_TRACE) + if(likely(page)) { + avm_set_page_current_pc(page,pc); + } +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -2981,6 +2995,9 @@ printk("(%s) ", tmp); } +#if defined(CONFIG_AVM_ENHANCED) +extern unsigned int get_used_vmalloc_mem(void); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ /* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the @@ -3006,17 +3023,41 @@ printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", cpu, pageset->pcp.high, - pageset->pcp.batch, pageset->pcp.count); + pageset->pcp.batch, pageset->pcp.count); } } - +#if defined(CONFIG_AVM_ENHANCED) + { + unsigned int sum_pages, v_mem; + sum_pages = global_page_state(NR_ACTIVE_ANON)+ + global_page_state(NR_INACTIVE_ANON)+ + global_page_state(NR_ISOLATED_ANON)+ + global_page_state(NR_ACTIVE_FILE)+ + global_page_state(NR_INACTIVE_FILE)+ + global_page_state(NR_ISOLATED_FILE)+ + global_page_state(NR_UNEVICTABLE)+ + global_page_state(NR_FILE_DIRTY)+ + global_page_state(NR_WRITEBACK)+ + global_page_state(NR_UNSTABLE_NFS)+ + global_page_state(NR_FREE_PAGES)+ + global_page_state(NR_SLAB_RECLAIMABLE)+ + global_page_state(NR_SLAB_UNRECLAIMABLE)+ + global_page_state(NR_FILE_MAPPED)+ + global_page_state(NR_SHMEM)+ + global_page_state(NR_PAGETABLE)+ + global_page_state(NR_BOUNCE)+ + global_page_state(NR_FREE_CMA_PAGES); + v_mem = get_used_vmalloc_mem(); + printk("global_page_sum %ukB(%u pages) + vmalloc-used = %ukB (%u pages)\n", sum_pages * 4, sum_pages, v_mem >> 10, v_mem >> PAGE_SHIFT); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" - " free_cma:%lu\n", + " free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON),