--- zzzz-none-000/linux-4.4.60/mm/page_alloc.c 2017-04-08 07:53:53.000000000 +0000 +++ scorpion-1750e-727/linux-4.4.60/mm/page_alloc.c 2021-02-04 17:41:59.000000000 +0000 @@ -68,6 +68,8 @@ #include #include "internal.h" +#include + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -239,6 +241,7 @@ }; int min_free_kbytes = 1024; +int low_free_kbytes_ratio; int user_min_free_kbytes = -1; static unsigned long __meminitdata nr_kernel_pages; @@ -2111,7 +2114,11 @@ * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ -void split_page(struct page *page, unsigned int order) +void __split_page(struct page *page, unsigned int order +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif +) { int i; gfp_t gfp_mask; @@ -2125,17 +2132,27 @@ * otherwise free the whole shadow. */ if (kmemcheck_page_is_tracked(page)) - split_page(virt_to_page(page[0].shadow), order); + __split_page(virt_to_page(page[0].shadow), order +#if defined(CONFIG_AVM_PAGE_TRACE) + , pc +#endif + ); #endif gfp_mask = get_page_owner_gfp(page); set_page_owner(page, 0, gfp_mask); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page, pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); set_page_owner(page + i, 0, gfp_mask); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i, pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ } } -EXPORT_SYMBOL_GPL(split_page); +EXPORT_SYMBOL_GPL(__split_page); int __isolate_free_page(struct page *page, unsigned int order) { @@ -2695,6 +2712,7 @@ pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", current->comm, order, gfp_mask); + add_taint(TAINT_ALLOC_FAIL, LOCKDEP_STILL_OK); dump_stack(); if (!should_suppress_show_mem()) @@ -3192,7 +3210,11 @@ */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) + struct zonelist *zonelist, nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif + ) { struct zoneref *preferred_zoneref; struct page *page = NULL; @@ -3272,6 +3294,11 @@ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; +#if defined(CONFIG_AVM_PAGE_TRACE) + if (likely(page)) { + avm_set_page_current_pc(page,pc); + } +#endif return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -3324,6 +3351,7 @@ EXPORT_SYMBOL(free_pages); +static int page_frag_refill_only_page_order0 __maybe_unused; /* * Page Fragment: * An arbitrary-length arbitrary-offset area of memory which resides @@ -3344,8 +3372,9 @@ #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, - PAGE_FRAG_CACHE_MAX_ORDER); + if (likely(page_frag_refill_only_page_order0 == 0)) + page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, + PAGE_FRAG_CACHE_MAX_ORDER); nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; #endif if (unlikely(!page)) @@ -3472,13 +3501,21 @@ } static void *make_alloc_exact(unsigned long addr, unsigned int order, - size_t size) + size_t size +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif + ) { if (addr) { unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long used = addr + PAGE_ALIGN(size); - split_page(virt_to_page((void *)addr), order); + __split_page(virt_to_page((void *)addr), order +#if defined(CONFIG_AVM_PAGE_TRACE) + , pc +#endif + ); while (used < alloc_end) { free_page(used); used += PAGE_SIZE; @@ -3506,7 +3543,11 @@ unsigned long addr; addr = __get_free_pages(gfp_mask, order); - return make_alloc_exact(addr, order, size); + return make_alloc_exact(addr, order, size +#if defined(CONFIG_AVM_PAGE_TRACE) + , _RET_IP_ +#endif + ); } EXPORT_SYMBOL(alloc_pages_exact); @@ -3526,7 +3567,11 @@ struct page *p = alloc_pages_node(nid, gfp_mask, order); if (!p) return NULL; - return make_alloc_exact((unsigned long)page_address(p), order, size); + return make_alloc_exact((unsigned long)page_address(p), order, size +#if defined(CONFIG_AVM_PAGE_TRACE) + , 0L +#endif + ); } /** @@ -3706,6 +3751,12 @@ unsigned long free_pcp = 0; int cpu; struct zone *zone; +#if defined(CONFIG_AVM_ENHANCED) + int v_mem = get_used_vmalloc_mem(); + if(v_mem >= 0) { + printk(KERN_ERR"vmalloc-used: %dKiB (%d pages)\n", v_mem >> 10, v_mem >> PAGE_SHIFT); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ for_each_populated_zone(zone) { if (skip_free_areas_node(filter, zone_to_nid(zone))) @@ -6040,6 +6091,7 @@ static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low; unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -6079,8 +6131,22 @@ zone->watermark[WMARK_MIN] = tmp; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + if ((zone == &zone->zone_pgdat->node_zones[ZONE_NORMAL]) && + low_free_kbytes_ratio) { + pages_low = (zone->managed_pages * + low_free_kbytes_ratio) / 100; + zone->watermark[WMARK_LOW] = pages_low; + zone->watermark[WMARK_HIGH] = pages_low + + (pages_low >> 1); + pr_info("Modified watermark limit:low:%lukB\thigh:%lukB\n", + K(zone->watermark[WMARK_LOW]), + K(zone->watermark[WMARK_HIGH])); + } else { + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + (tmp >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + (tmp >> 1); + } __mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - @@ -6221,6 +6287,34 @@ return 0; } +/* + * low_free_kbytes_ratio_sysctl_handler - just a wrapper around proc_dointvec() + * so that we can call two helper functions whenever low_free_kbytes_ratio + * changes. + */ +int low_free_kbytes_ratio_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + int tmp = low_free_kbytes_ratio; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + if (write) { + if (low_free_kbytes_ratio && (low_free_kbytes_ratio < 10 || + low_free_kbytes_ratio > 40)) { + pr_warn("low_free_kbytes_ratio is not in the permissible range\n"); + low_free_kbytes_ratio = tmp; + return -EINVAL; + } + setup_per_zone_wmarks(); + } + + return 0; +} + #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -6934,3 +7028,22 @@ return order < MAX_ORDER; } #endif + +#if defined(CONFIG_ATH79) +/** + * JZ-82551: + * AVM-Repeater mit nur wenig Speicher (64 MByte) laufen in einer verseuchten + * Wlan-Umgebung bei Update über 'reboot_for_update' u.U. in einen OOM. + * Abhilfe schafft hier eine Reduzierung der Page-Order auf 0 + * + */ +static __init int workaround_page_frag_refill_page_order0(void) +{ + if (avm_reset_status() == RS_REBOOT_FOR_UPDATE) { + pr_err("AVM-WORKAROUND: alloc only Page-Order-0 packets\n"); + page_frag_refill_only_page_order0 = 1; + } + return 0; +} +subsys_initcall(workaround_page_frag_refill_page_order0); +#endif