--- zzzz-none-000/linux-5.4.213/mm/slab_common.c 2022-09-15 10:04:56.000000000 +0000 +++ alder-5690pro-762/linux-5.4.213/mm/slab_common.c 2024-08-14 09:02:12.000000000 +0000 @@ -28,6 +28,15 @@ #include "slab.h" +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) +#include +#include +#include +#endif + +#if defined(CONFIG_AVM_ENHANCED) +#include +#endif enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); @@ -49,9 +58,10 @@ /* * Set of flags that will prevent slab merging */ -#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | SLAB_KASAN) +#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | \ + SLAB_STORE_USER | SLAB_STORE_USER_LITE | \ + SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ + SLAB_FAILSLAB | SLAB_KASAN) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) @@ -1071,12 +1081,14 @@ s->name = name; s->size = s->object_size = size; +#ifndef CONFIG_AVM_ENHANCED /* * For power of two sizes, guarantee natural alignment for kmalloc * caches, regardless of SL*B debugging options. */ if (is_power_of_2(size)) align = max(align, size); +#endif s->align = calculate_alignment(flags, align, size); s->useroffset = useroffset; @@ -1828,3 +1840,219 @@ return 0; } ALLOW_ERROR_INJECTION(should_failslab, ERRNO); +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) +/** + */ +static char *human_time(char *buf, int len, unsigned long secs) +{ + unsigned long seconds, minutes, hours; + + seconds = secs % 60; + secs /= 60; + minutes = secs % 60; + secs /= 60; + hours = secs % 24; + if (hours) { + snprintf(buf, len, "%lu h %2lu min %2lu s", hours, minutes, + seconds); + } else if (minutes) { + snprintf(buf, len, "%2lu min %2lu s", minutes, seconds); + } else { + snprintf(buf, len, "%2lu s", seconds); + } + return buf; +} + +/** + * @brief show memory-usage-caller for cachepool + * @param cachep cachepool + * @param m seq-pointer + * @param threshsize only cache-pool-memory-usage greater this + * return void + */ +static void show_cache_toplist(struct kmem_cache *cachep, struct seq_file *m, + struct _slab_avm_topalloclist *toplist, + unsigned long threshsize) +{ + unsigned int i; + char tmp[128]; + + cache_avm_topalloc_list(toplist, cachep, 0); + if ((toplist->sum_count == 0) || + ((toplist->sum_count * cachep->object_size) < threshsize)) + return; + + sseq_printf(m, "%s: %5lu KiB (%lu)\n", cachep->name, + (cachep->object_size * toplist->sum_count) >> 10, + toplist->sum_count); + for (i = 0; i < toplist->entries; i++) { + struct _slab_avm_alloc_entry *p = &toplist->entry[i]; + unsigned long long avg = p->sum_time; + + if ((p->count * cachep->object_size) > threshsize / ARRAY_SIZE(toplist->entry)) { + do_div(avg, (p->count * HZ)); + sseq_printf(m, " \t%6u entries (%5u KiB - avg-time %s) %pS\n", + p->count, + (cachep->object_size * p->count) >> 10, + human_time(tmp, sizeof(tmp), (unsigned long)avg), + (void *)p->caller); + } + } + if (toplist->ignored && toplist->entries) + sseq_printf(m, "... but %d callers ignored (too much different callers)\n", + toplist->ignored); +} +/** + * @brief show all memory-usage-caller + * @param m seq-pointer + * @param threshsize only cachep greater this + * return void + */ +static void proc_show_slab_allocator(struct seq_file *m, + struct _slab_avm_topalloclist *ptoplist, + unsigned long threshsize) +{ + struct kmem_cache *cachep = NULL; + struct slabinfo sinfo; + unsigned long sum = 0; + unsigned int init = 0; + + list_for_each_entry(cachep, &slab_caches, list) { + + if (threshsize && (init == 0)) { + sseq_printf(m, "show all cache-pools greater %lu KiB:\n", threshsize >> 10); + init = 1; + } + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(cachep, &sinfo); + memcg_accumulate_slabinfo(cachep, &sinfo); + if ((sinfo.active_objs * cachep->object_size) >= threshsize) { + show_cache_toplist(cachep, m, ptoplist, threshsize); + } + sum += sinfo.active_objs * cachep->object_size; + } + sseq_printf(m, "slab-pools use %lu MiB (netto)\n", sum >> 20); +} + +static struct _slab_avm_topalloclist local_toplist; +unsigned int slab_track_time; + +/** + * @brief show allocator-statistic + * @param m seq-pointer + * @param priv + * return void + */ +static void lproc_slab_allocators(struct seq_file *m, void *priv __maybe_unused) +{ + struct _slab_avm_topalloclist *ptoplist; + + ptoplist = kzalloc(sizeof(struct _slab_avm_topalloclist), GFP_KERNEL); + if (ptoplist == NULL) { + return; + } + mutex_lock(&slab_mutex); + proc_show_slab_allocator(m, ptoplist, 0); + mutex_unlock(&slab_mutex); + kfree(ptoplist); +} + +/** + * @brief delayed slab_allocator-trace on timer-context + */ +static void slab_allocator_on(struct timer_list *unused __maybe_unused) +{ + pr_err("start slab_allocator-trace now (use cat /proc/slab_allocators)\n"); + slab_track_time = 1; +} + +static DEFINE_TIMER(slab_allocator_timer, slab_allocator_on); +/** + * @brief install /proc/slab_allocators + * return 0 + */ +int __init avm_proc_slaballocator(void) +{ + add_simple_proc_file("slab_allocators", NULL, + lproc_slab_allocators, NULL); + + mod_timer(&slab_allocator_timer, jiffies + 45 * HZ); + return 0; +} +late_initcall(avm_proc_slaballocator); +#endif/*--- #if defined(CONFIG_SLUB_AVM_ALLOC_LIST) ---*/ + +#ifdef CONFIG_SLABINFO + +#define SKIP_SPACES(p) { while ((*p == ' ') || (*p == '\t')) p++; } +#define SKIP_NONSPACES(p) { while (*p && (*p != ' ') && (*p != '\t')) p++; } + +/** + * print out /proc/slabinfo per printk + */ +static void __show_slab(void) +{ + unsigned int active_objs = 0; + char *ptxt; + void *p; + loff_t pos; + char buf[512 + 1]; + struct seq_file seq; + + memset(&seq, 0, sizeof(seq)); + seq.size = sizeof(buf) - 1; + seq.buf = buf; + pos = 0; + + print_slabinfo_header(&seq); + p = seq_list_start(&slab_caches, pos); + + seq.buf[seq.count] = 0; + pr_err("%s", seq.buf); + seq.count = 0; + for (;;) { + struct kmem_cache *s; + + if (!p || IS_ERR(p)) { + break; + } + s = list_entry(p, struct kmem_cache, list); + if (is_root_cache(s)) { + cache_show(s, &seq); + seq.buf[seq.count] = 0; + /* only if active_objs exist: */ + ptxt = seq.buf; + SKIP_NONSPACES(ptxt); + SKIP_SPACES(ptxt); + if (sscanf(ptxt, "%u", &active_objs) && active_objs) + pr_err("%s", seq.buf); + } + seq.count = 0; + p = seq_list_next(p, &slab_caches, &pos); + } +} +#endif /* CONFIG_SLABINFO */ + +/** + * @brief show slabinfo and all heavy memory-usage-caller + * use kernel-printk + * used in oom-notifier + */ +void show_slab(void) +{ + char buf[256]; + struct semi_seq sseq; + struct seq_file *seq; + + if (!mutex_trylock(&slab_mutex)) { + return; + } + seq = sseq_create(&sseq, KERN_ERR, buf, sizeof(buf)); +#ifdef CONFIG_SLABINFO + __show_slab(); +#endif /* CONFIG_SLABINFO */ +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) + proc_show_slab_allocator(seq, &local_toplist, SZ_1M); +#endif + mutex_unlock(&slab_mutex); +}