--- zzzz-none-000/linux-4.9.279/mm/page_alloc.c 2021-08-08 06:38:54.000000000 +0000 +++ puma7-atom-6591-750/linux-4.9.279/mm/page_alloc.c 2023-02-08 11:43:43.000000000 +0000 @@ -71,6 +71,10 @@ #include #include "internal.h" +#if defined(CONFIG_AVM_ENHANCED) +#include +#endif + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -2528,6 +2532,9 @@ void split_page(struct page *page, unsigned int order) { int i; +#if defined(CONFIG_AVM_PAGE_TRACE) + unsigned long current_pc = avm_get_page_current_pc(page); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); @@ -2540,9 +2547,12 @@ if (kmemcheck_page_is_tracked(page)) split_page(virt_to_page(page[0].shadow), order); #endif - - for (i = 1; i < (1 << order); i++) + for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i, current_pc); +#endif + } split_page_owner(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -3765,7 +3775,11 @@ */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) + struct zonelist *zonelist, nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + ) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; @@ -3858,6 +3872,11 @@ trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); +#if defined(CONFIG_AVM_PAGE_TRACE) + if (likely(page)) { + avm_set_page_current_pc(page, pc); + } +#endif return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -4304,7 +4323,32 @@ for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; } - +#if defined(CONFIG_AVM_ENHANCED) + { + unsigned int sum_pages, v_mem; + sum_pages = global_page_state(NR_ACTIVE_ANON)+ + global_page_state(NR_INACTIVE_ANON)+ + global_page_state(NR_ISOLATED_ANON)+ + global_page_state(NR_ACTIVE_FILE)+ + global_page_state(NR_INACTIVE_FILE)+ + global_page_state(NR_ISOLATED_FILE)+ + global_page_state(NR_UNEVICTABLE)+ + global_page_state(NR_FILE_DIRTY)+ + global_page_state(NR_WRITEBACK)+ + global_page_state(NR_UNSTABLE_NFS)+ + global_page_state(NR_FREE_PAGES)+ + global_page_state(NR_SLAB_RECLAIMABLE)+ + global_page_state(NR_SLAB_UNRECLAIMABLE)+ + global_page_state(NR_FILE_MAPPED)+ + global_page_state(NR_SHMEM)+ + global_page_state(NR_PAGETABLE)+ + global_page_state(NR_BOUNCE)+ + global_page_state(NR_FREE_CMA_PAGES); + v_mem = get_used_vmalloc_mem(); + printk("global_page_sum %ukB(%u pages) + vmalloc-used = %ukB (%u pages)\n", sum_pages * 4, sum_pages, + v_mem >> 10, v_mem >> PAGE_SHIFT); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"