--- zzzz-none-000/linux-4.4.60/mm/slab_common.c 2017-04-08 07:53:53.000000000 +0000 +++ wasp-540e-714/linux-4.4.60/mm/slab_common.c 2019-07-03 09:21:34.000000000 +0000 @@ -19,6 +19,14 @@ #include #include #include +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +#include +#include +#include + +unsigned int flag_debug_slab_avm_lite; +static void show_debug_slab_avm_lite(void); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #define CREATE_TRACE_POINTS #include @@ -1176,6 +1184,63 @@ return 0; } module_init(slab_proc_init); + +#if defined(CONFIG_AVM_ENHANCED) +#define SKIP_SPACES(p) while((*p == ' ') || (*p == '\t')) p++ +#define SKIP_NONSPACES(p) while(*p && (*p != ' ') && (*p != '\t')) p++ +/*--------------------------------------------------------------------------------*\ + * kernel-printk-show for slabinfo + * any context allowed +\*--------------------------------------------------------------------------------*/ +void show_slab(void) { + unsigned int active_objs; + char *ptxt; + void *p; + loff_t pos; + char buf[512 + 1]; + struct seq_file seq; + + memset(&seq, 0, sizeof(seq)); + seq.size = sizeof(buf) - 1; + seq.buf = buf; + pos = 0; + + if (!mutex_trylock(&slab_mutex)) { + return; + } + print_slabinfo_header(&seq); + p = seq_list_start(&slab_caches, pos); + + seq.buf[seq.count] = 0; + printk(KERN_ERR"%s", seq.buf), seq.count = 0; + for(;;) { + struct kmem_cache *s; + if (!p || IS_ERR(p)) { + break; + } + s = list_entry(p, struct kmem_cache, list); + if (is_root_cache(s)) { + cache_show(s, &seq); + seq.buf[seq.count] = 0; + /*--- only if active_objs exist: ---*/ + ptxt = seq.buf; + SKIP_NONSPACES(ptxt); + SKIP_SPACES(ptxt); + sscanf(ptxt, "%u", &active_objs); + if(active_objs) { + printk(KERN_CONT"%s", seq.buf); + } + } + seq.count = 0; + p = seq_list_next(p, &slab_caches, &pos); + } + mutex_unlock(&slab_mutex); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + show_debug_slab_avm_lite(); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ +} +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + #endif /* CONFIG_SLABINFO */ static __always_inline void *__do_krealloc(const void *p, size_t new_size, @@ -1278,3 +1343,277 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); EXPORT_TRACEPOINT_SYMBOL(kfree); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); + + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +/** + */ +static char *human_time(char *buf, int len, unsigned long secs) +{ + unsigned long seconds, minutes, hours; + + seconds = secs % 60; + secs /= 60; + minutes = secs % 60; + secs /= 60; + hours = secs % 24; + if (hours) { + snprintf(buf, len, "%lu h %2lu min %2lu s", hours, minutes, + seconds); + } else if (minutes) { + snprintf(buf, len, "%2lu min %2lu s", minutes, seconds); + } else { + snprintf(buf, len, "%2lu s", seconds); + } + return buf; +} + +/** + * @brief show memory-usage-caller for cachepool + * @param cachep cachepool + * @param m seq-pointer + * @param threshsize only cache-pool-memory-usage greater this + * return void + */ +#define local_print(seq, args...) \ + if (seq) { \ + seq_printf(seq, args); \ + } else { \ + pr_err(args); \ + } +static void get_slab_toplist(struct kmem_cache *cachep, struct seq_file *m, + unsigned long threshsize) +{ + struct _slab_avm_enh_toplist *toplist; + unsigned int i; + char tmp[128]; + + toplist = kmalloc(sizeof(*toplist), GFP_ATOMIC); + if (!toplist) + return; + + debug_slab_avm_lite_toplist(toplist, cachep, 0); + if ((toplist->sum_count == 0) || + ((toplist->sum_count * cachep->object_size) < threshsize)) { + kfree(toplist); + return; + } + for (i = 0; i < ARRAY_SIZE(toplist->entry); i++) { + struct _slab_avm_top_entry *p = &toplist->entry[i]; + unsigned long long avg = p->sum_time; + if ((i == 0) || + (p->count * cachep->object_size) > threshsize / 4) { + if (i == 0) { + local_print(m, "%s: %5lu KiB\n", cachep->name, + (cachep->object_size * + toplist->sum_count) >> + 10); + } + do_div(avg, (p->count * HZ)); + local_print( + m, + " \t%6u entries (%5u KiB - avg-time %s) %pS\n", + p->count, + (cachep->object_size * p->count) >> 10, + human_time(tmp, sizeof(tmp), + (unsigned long)avg), + (void *)p->caller); + } else { + break; + } + } + if (toplist->ignored) { + if (i) { + local_print( + m, + "... but %d callers ignored (too much different callers)\n", + toplist->ignored); + } + } + + kfree(toplist); +} +static unsigned int thresh_allocsize = SZ_1M; + +/** + * @brief switch on avm-specific memory-usage-feature + * (the original linux-code switched off if aligment > 8) + * @param name: NULL all + * @param flag_set, flag_unset + * @return void + */ +static void slab_debug_avm_lite(const char *name, unsigned int flag_set, + unsigned int flag_unset) +{ + struct kmem_cache *cachep = NULL; + int changed = 0, entries = 0; + if (name == NULL) { + flag_debug_slab_avm_lite &= + ~flag_unset; /*--- on/off for future pools ---*/ + flag_debug_slab_avm_lite |= + flag_set; /*--- on/off for future pools ---*/ + } + list_for_each_entry (cachep, &slab_caches, list) { + if ((name == NULL) || (strcmp(name, cachep->name) == 0)) { + unsigned int old_flags = cachep->flags; + if (cachep->ctor && + (cachep->flags & SLAB_POISON) && + (flag_unset & SLAB_POISON)) { + /*--- if ctor exists: do not switch poison off, because nobody calls ctor() if poisoned ---*/ + pr_err("%s: can't switch off poison for %s, because ctor() exist\n", + __func__, cachep->name); + cachep->flags &= ~(flag_unset & ~SLAB_POISON); + cachep->flags |= flag_set; + } else { + cachep->flags &= ~flag_unset; + cachep->flags |= flag_set; + } + entries++; + if(old_flags != cachep->flags) { + changed++; + } + if (name) { + break; + } + } + } + if (entries == 0) { + pr_err("error: cachep %s found\n", name ? name : "all"); + } else if (changed) { + pr_err("change cachep-flag from %s (%d): %s%s%s%s%s\n", name ? name : "all", changed, + (flag_set & SLAB_POISON_WRITE_AFTER_FREE) ? "poison+ on (include write after free check) " : + (flag_set & SLAB_POISON) ? "poison on " : "", + (flag_set & SLAB_STORE_USER_AND_TIME) ? "trace on " : "", + (flag_unset & SLAB_POISON) ? "poison off " : "", + (flag_unset & SLAB_POISON_WRITE_AFTER_FREE) ? "poison+ off " : "", + (flag_unset & SLAB_STORE_USER_AND_TIME) ? "trace off" : "" + ); + } else { + pr_err("no cachep %s changed (flags identical/locked)\n", name ? name : "all"); + } +} +/** + * @brief show all memory-usage-caller + * @param m seq-pointer + * @param threshsize only cachep greater this + * return void + */ +static void proc_show_debug_slab_avm_lite(struct seq_file *m, + unsigned long threshsize) +{ + struct kmem_cache *cachep = NULL; + struct slabinfo sinfo; + + list_for_each_entry (cachep, &slab_caches, list) { + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(cachep, &sinfo); + memcg_accumulate_slabinfo(cachep, &sinfo); + if (sinfo.active_objs * cachep->object_size >= threshsize) { + get_slab_toplist(cachep, m, threshsize); + } + } +} +/** + * @brief show all heavy memory-usage-caller + * use kernel-printk + */ +static void show_debug_slab_avm_lite(void) +{ + if (!mutex_trylock(&slab_mutex)) { + return; + } + proc_show_debug_slab_avm_lite(NULL, thresh_allocsize); + mutex_unlock(&slab_mutex); +} +/** + * @brief show allocator-statistic + * @param m seq-pointer + * @param priv + * return void + */ +static void lproc_slab_allocators(struct seq_file *m, void *priv __maybe_unused) +{ + /*--- unsigned int threshsize = *((unsigned int *)priv); ---*/ + mutex_lock(&slab_mutex); + proc_show_debug_slab_avm_lite(m, 0); + mutex_unlock(&slab_mutex); +} +/** + */ +static int lproc_slab_allocator_on(char *txt, void *priv __maybe_unused) +{ + char name[64]; + unsigned int flag_set = 0, flag_unset = 0; + char *p; + int on = -1; + int mode = SLAB_STORE_USER_AND_TIME; + + SKIP_SPACES(txt); + strcpy(name, "none"); + if (txt == strstr(txt, "all")) { + name[0] = 0; + } else { + p = txt; + SKIP_NONSPACES(p); + strlcpy(name, txt, min(sizeof(name), (size_t)(p - txt + 1))); + txt = p; + } + if ((p = strstr(txt, "poison+"))) { + mode = SLAB_POISON | SLAB_POISON_WRITE_AFTER_FREE; + txt = p + sizeof("poison+") - 1; + } else if ((p = strstr(txt, "poison"))) { + mode = SLAB_POISON; + flag_unset = SLAB_POISON_WRITE_AFTER_FREE; + txt = p + sizeof("poison") - 1; + } + if (strstr(txt, "on")) { + on = 1; + } else if (strstr(txt, "off")) { + on = 0; + } else if (strstr(txt, "thresh")) { + txt += sizeof("thresh") - 1; + SKIP_SPACES(txt); + sscanf(txt, "%d", &thresh_allocsize); + pr_err("slab_allocator: new thresh_allocsize=%u\n", + thresh_allocsize); + } else { + pr_err("slab_allocator - invalid param: use []/[all] [poison]/[poison+] on/off\n\tthresh (only oom)\n"); + } + if (on >= 0) { + if (on) { + flag_set = mode; + } else { + flag_unset = mode; + } + mutex_lock(&slab_mutex); + slab_debug_avm_lite(name[0] ? name : NULL, flag_set, + flag_unset); + mutex_unlock(&slab_mutex); + } + return 0; +} +/** + * @brief delayed slab_allocator-trace on timer-context + */ +static void slab_allocator_on(unsigned long data __maybe_unused) +{ + pr_err("start slab_allocator-trace now (use cat /proc/slab_allocators)\n"); + slab_debug_avm_lite(NULL, SLAB_STORE_USER_AND_TIME, 0); +} +/** + */ +static DEFINE_TIMER(slab_allocator_timer, slab_allocator_on, 0, 0); +/** + * @brief install /proc/slab_allocators + * return 0 + */ +int __init avm_proc_debug_slab_avm_lite_init(void) +{ + /*--- pr_err("%s()\n", __func__); ---*/ + mod_timer(&slab_allocator_timer, jiffies + 45 * HZ); + add_simple_proc_file("slab_allocators", lproc_slab_allocator_on, + lproc_slab_allocators, &thresh_allocsize); + return 0; +} +late_initcall(avm_proc_debug_slab_avm_lite_init); +#endif /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/