--- zzzz-none-000/linux-5.15.111/mm/page_alloc.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-arm-6670-761/linux-5.15.111/mm/page_alloc.c 2024-02-07 09:28:09.000000000 +0000 @@ -14,7 +14,10 @@ * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) */ - +/* + * Includes Maxlinear's changes dated: 2022. + * Changed portions - Copyright © 2022, MaxLinear, Inc. + */ #include #include #include @@ -338,7 +341,14 @@ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; +/* + * Disable boost_watermark() otherwise OOM-killer will occur + */ +#ifdef CONFIG_ARM_AVALANCHE_SOC +int watermark_boost_factor __read_mostly = 0; +#else int watermark_boost_factor __read_mostly = 15000; +#endif int watermark_scale_factor = 10; static unsigned long nr_kernel_pages __initdata; @@ -3489,12 +3499,19 @@ void split_page(struct page *page, unsigned int order) { int i; +#if defined(CONFIG_AVM_PAGE_TRACE) + unsigned long current_pc = avm_get_page_current_pc(page); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); - for (i = 1; i < (1 << order); i++) + for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i,current_pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + } split_page_owner(page, 1 << order); split_page_memcg(page, 1 << order); } @@ -5364,7 +5381,11 @@ local_unlock_irqrestore(&pagesets.lock, flags); failed: - page = __alloc_pages(gfp, 0, preferred_nid, nodemask); + page = __alloc_pages(gfp, 0, preferred_nid, nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , 0 +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + ); if (page) { if (page_list) list_add(&page->lru, page_list); @@ -5381,7 +5402,11 @@ * This is the 'heart' of the zoned buddy allocator. */ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, - nodemask_t *nodemask) + nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + ) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; @@ -5442,6 +5467,12 @@ trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); +#if defined(CONFIG_AVM_PAGE_TRACE) + if(likely(page)) { + avm_set_page_current_pc(page,pc); + } +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + return page; } EXPORT_SYMBOL(__alloc_pages); @@ -5920,6 +5951,8 @@ printk(KERN_CONT "(%s) ", tmp); } +#define global_page_state(a) global_zone_page_state(a) + /* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the @@ -5943,7 +5976,30 @@ for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; } - +#if defined(CONFIG_AVM_ENHANCED) + { + unsigned int sum_pages; + sum_pages = global_page_state(NR_ACTIVE_ANON)+ + global_page_state(NR_INACTIVE_ANON)+ + global_page_state(NR_ISOLATED_ANON)+ + global_page_state(NR_ACTIVE_FILE)+ + global_page_state(NR_INACTIVE_FILE)+ + global_page_state(NR_ISOLATED_FILE)+ + global_page_state(NR_UNEVICTABLE)+ + global_page_state(NR_FILE_DIRTY)+ + global_page_state(NR_WRITEBACK)+ + global_page_state(NR_FREE_PAGES)+ + PFN_DOWN(global_page_state(NR_SLAB_RECLAIMABLE_B))+ + PFN_DOWN(global_page_state(NR_SLAB_UNRECLAIMABLE_B))+ + global_page_state(NR_FILE_MAPPED)+ + global_page_state(NR_SHMEM)+ + global_page_state(NR_PAGETABLE)+ + global_page_state(NR_BOUNCE)+ + global_page_state(NR_FREE_CMA_PAGES); + + printk("global_page_sum %ukB(%u pages)\n", sum_pages * 4, sum_pages); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu dirty:%lu writeback:%lu\n"