--- zzzz-none-000/linux-4.4.60/mm/page_alloc.c 2017-04-08 07:53:53.000000000 +0000 +++ dragonfly-4020-701/linux-4.4.60/mm/page_alloc.c 2018-11-08 13:36:17.000000000 +0000 @@ -239,6 +239,7 @@ }; int min_free_kbytes = 1024; +int low_free_kbytes_ratio; int user_min_free_kbytes = -1; static unsigned long __meminitdata nr_kernel_pages; @@ -2115,6 +2116,9 @@ { int i; gfp_t gfp_mask; +#if defined(CONFIG_AVM_PAGE_TRACE) + unsigned long current_pc = avm_get_page_current_pc(page); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); @@ -2133,6 +2137,9 @@ for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); set_page_owner(page + i, 0, gfp_mask); +#if defined(CONFIG_AVM_PAGE_TRACE) + avm_set_page_current_pc(page + i,current_pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ } } EXPORT_SYMBOL_GPL(split_page); @@ -3192,7 +3199,11 @@ */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) + struct zonelist *zonelist, nodemask_t *nodemask +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + ) { struct zoneref *preferred_zoneref; struct page *page = NULL; @@ -3272,6 +3283,12 @@ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; +#if defined(CONFIG_AVM_PAGE_TRACE) + if(likely(page)) { + avm_set_page_current_pc(page,pc); + } +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -3706,6 +3723,12 @@ unsigned long free_pcp = 0; int cpu; struct zone *zone; +#if defined(CONFIG_AVM_ENHANCED) + int v_mem = get_used_vmalloc_mem(); + if(v_mem >= 0) { + printk(KERN_ERR"vmalloc-used: %dKiB (%d pages)\n", v_mem >> 10, v_mem >> PAGE_SHIFT); + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ for_each_populated_zone(zone) { if (skip_free_areas_node(filter, zone_to_nid(zone))) @@ -6040,6 +6063,7 @@ static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low; unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -6079,8 +6103,22 @@ zone->watermark[WMARK_MIN] = tmp; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + if ((zone == &zone->zone_pgdat->node_zones[ZONE_NORMAL]) && + low_free_kbytes_ratio) { + pages_low = (zone->managed_pages * + low_free_kbytes_ratio) / 100; + zone->watermark[WMARK_LOW] = pages_low; + zone->watermark[WMARK_HIGH] = pages_low + + (pages_low >> 1); + pr_info("Modified watermark limit:low:%lukB\thigh:%lukB\n", + K(zone->watermark[WMARK_LOW]), + K(zone->watermark[WMARK_HIGH])); + } else { + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + (tmp >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + (tmp >> 1); + } __mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - @@ -6221,6 +6259,34 @@ return 0; } +/* + * low_free_kbytes_ratio_sysctl_handler - just a wrapper around proc_dointvec() + * so that we can call two helper functions whenever low_free_kbytes_ratio + * changes. + */ +int low_free_kbytes_ratio_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + int tmp = low_free_kbytes_ratio; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + if (write) { + if (low_free_kbytes_ratio && (low_free_kbytes_ratio < 10 || + low_free_kbytes_ratio > 40)) { + pr_warn("low_free_kbytes_ratio is not in the permissible range\n"); + low_free_kbytes_ratio = tmp; + return -EINVAL; + } + setup_per_zone_wmarks(); + } + + return 0; +} + #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos)