--- zzzz-none-000/linux-4.4.271/mm/page_alloc.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/mm/page_alloc.c 2023-04-19 10:22:30.000000000 +0000 @@ -239,6 +239,9 @@ }; int min_free_kbytes = 1024; +#ifndef CONFIG_NUMA +int min_max_free_kbytes[2]; +#endif /* !CONFIG_NUMA */ int user_min_free_kbytes = -1; static unsigned long __meminitdata nr_kernel_pages; @@ -2161,7 +2164,11 @@ * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ -void split_page(struct page *page, unsigned int order) +void __split_page(struct page *page, unsigned int order +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif +) { int i; gfp_t gfp_mask; @@ -2175,17 +2182,27 @@ * otherwise free the whole shadow. */ if (kmemcheck_page_is_tracked(page)) - split_page(virt_to_page(page[0].shadow), order); + __split_page(virt_to_page(page[0].shadow), order +#if defined(CONFIG_AVM_PAGE_TRACE) + , pc +#endif + ); #endif gfp_mask = get_page_owner_gfp(page); set_page_owner(page, 0, gfp_mask); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page, pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); set_page_owner(page + i, 0, gfp_mask); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i, pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ } } -EXPORT_SYMBOL_GPL(split_page); +EXPORT_SYMBOL_GPL(__split_page); int __isolate_free_page(struct page *page, unsigned int order) { @@ -2745,6 +2762,7 @@ pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", current->comm, order, gfp_mask); + add_taint(TAINT_ALLOC_FAIL, LOCKDEP_STILL_OK); dump_stack(); if (!should_suppress_show_mem()) @@ -3240,7 +3258,11 @@ */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) + struct zonelist *zonelist, nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif + ) { struct zoneref *preferred_zoneref; struct page *page = NULL; @@ -3320,6 +3342,11 @@ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; +#if defined(CONFIG_AVM_PAGE_TRACE) + if (likely(page)) { + avm_set_page_current_pc(page,pc); + } +#endif return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -3520,13 +3547,21 @@ } static void *make_alloc_exact(unsigned long addr, unsigned int order, - size_t size) + size_t size +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif + ) { if (addr) { unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long used = addr + PAGE_ALIGN(size); - split_page(virt_to_page((void *)addr), order); + __split_page(virt_to_page((void *)addr), order +#if defined(CONFIG_AVM_PAGE_TRACE) + , pc +#endif + ); while (used < alloc_end) { free_page(used); used += PAGE_SIZE; @@ -3554,7 +3589,11 @@ unsigned long addr; addr = __get_free_pages(gfp_mask, order); - return make_alloc_exact(addr, order, size); + return make_alloc_exact(addr, order, size +#if defined(CONFIG_AVM_PAGE_TRACE) + , _RET_IP_ +#endif + ); } EXPORT_SYMBOL(alloc_pages_exact); @@ -3574,7 +3613,11 @@ struct page *p = alloc_pages_node(nid, gfp_mask, order); if (!p) return NULL; - return make_alloc_exact((unsigned long)page_address(p), order, size); + return make_alloc_exact((unsigned long)page_address(p), order, size +#if defined(CONFIG_AVM_PAGE_TRACE) + , 0L +#endif + ); } /** @@ -3691,6 +3734,13 @@ available += global_page_state(NR_SLAB_RECLAIMABLE) - min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); + /* + * Part of the kernel memory, which can be released under memory + * pressure. + */ + available += global_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >> + PAGE_SHIFT; + if (available < 0) available = 0; return available; @@ -3797,6 +3847,12 @@ unsigned long free_pcp = 0; int cpu; struct zone *zone; +#if defined(CONFIG_AVM_ENHANCED) + int v_mem = get_used_vmalloc_mem(); + if(v_mem >= 0) { + printk(KERN_ERR"vmalloc-used: %dKiB (%d pages)\n", v_mem >> 10, v_mem >> PAGE_SHIFT); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ for_each_populated_zone(zone) { if (skip_free_areas_node(filter, zone_to_nid(zone))) @@ -6170,8 +6226,23 @@ zone->watermark[WMARK_MIN] = tmp; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); +#ifndef CONFIG_NUMA + if ((zone == &zone->zone_pgdat->node_zones[ZONE_NORMAL]) && + min_max_free_kbytes[0] && min_max_free_kbytes[1]) { + zone->watermark[WMARK_LOW] = min_max_free_kbytes[0]; + zone->watermark[WMARK_HIGH] = min_max_free_kbytes[1]; + pr_info("Modified watermark limit:low:%lukB\thigh:%lukB\n", + K(zone->watermark[WMARK_LOW]), + K(zone->watermark[WMARK_HIGH])); + } else { +#endif /* !CONFIG_NUMA */ + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + (tmp >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + (tmp >> 1); +#ifndef CONFIG_NUMA + } +#endif /* !CONFIG_NUMA */ __mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - @@ -6312,6 +6383,62 @@ return 0; } +#ifndef CONFIG_NUMA +unsigned int default_min_wmark; + +static int __init default_minmax_wmark(void) +{ + struct zone *zone; + + /* There is only one ZONE_NORMAL */ + for_each_zone(zone) { + if (zone == &zone->zone_pgdat->node_zones[ZONE_NORMAL]) { + default_min_wmark = zone->watermark[WMARK_LOW]; + } + } + + return 0; +} +fs_initcall(default_minmax_wmark); + +/* + * min_max_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() + * so that we can call two helper functions whenever min_max_free_kbytes + * changes. + */ +int min_max_free_kbytes_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + int tmp = min_max_free_kbytes[0]; + int tmp1 = min_max_free_kbytes[1]; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + if (write) { + if (min_max_free_kbytes[0] >= min_max_free_kbytes[1]) { + pr_warn("min should be less than max\n"); + min_max_free_kbytes[0] = tmp; + min_max_free_kbytes[1] = tmp1; + return -EINVAL; + } + + if (min_max_free_kbytes[0] < default_min_wmark) { + pr_warn("min must be > %dkB\n", default_min_wmark * 4); + min_max_free_kbytes[0] = tmp; + min_max_free_kbytes[1] = tmp1; + return -EINVAL; + } + + setup_per_zone_wmarks(); + } + + return 0; +} +#endif /* !CONFIG_NUMA */ + #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos)