--- zzzz-none-000/linux-2.6.39.4/mm/slab.c 2011-08-03 19:43:28.000000000 +0000 +++ puma6-atom-6490-729/linux-2.6.39.4/mm/slab.c 2021-11-10 13:38:18.000000000 +0000 @@ -115,6 +115,16 @@ #include #include #include +#if defined(CONFIG_AVM_ENHANCED) +#include +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +static void show_debug_slab_avm_lite(void); +static unsigned int flag_debug_slab_avm_lite; +#include +#include +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #include #include @@ -229,6 +239,9 @@ }; struct slab_rcu __slab_cover_slab_rcu; }; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + void *slab_enh; +#endif /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ }; /* @@ -543,6 +556,53 @@ return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +struct _slab_enh { + unsigned long caller; + unsigned long jiffies; + unsigned long free_caller; + unsigned long free_jiffies; +}; +/** + * @brief set caller in slab_enh-array + * @param cacheppointer cachepool-pointer + * @param slabp slab-pointer + * @param objnr index for slab_enh-array + * @param caller caller + * @param free_caller free_caller + * @return void + */ +static __always_inline void __set_slab_enh(struct kmem_cache *cachep, struct slab *slabp, unsigned int objnr, + void *caller, void *free_caller) { + struct _slab_enh *pslab_enh = (struct _slab_enh *)slabp->slab_enh + objnr; + if((caller == NULL) && (free_caller == NULL)) { + memset(pslab_enh, 0, sizeof(*pslab_enh)); + } else if(caller) { + pslab_enh->caller = (unsigned long)caller; + pslab_enh->jiffies = jiffies; + } else if(free_caller) { + pslab_enh->free_caller = (unsigned long)free_caller; + pslab_enh->free_jiffies = jiffies; + } +#if 0 + if(caller == 0) { + printk(KERN_DEBUG"%s %s: %pS\n", __func__, cachep->name, (void *)caller); + dump_stack(); + } +#endif +} +/** + * @brief get slab_enh-entry + * @param objnr index for slab_enh-array + * @param slabp slab-pointer + * @param caller caller + * @return slab_enh-entry + */ +static inline struct _slab_enh *get_slab_enh(struct slab *slabp, unsigned int objnr) { + return (struct _slab_enh *)slabp->slab_enh + objnr; +} +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + /* * These are the default caches for kmalloc. Custom caches can have other sizes. */ @@ -720,7 +780,11 @@ static size_t slab_mgmt_size(size_t nr_objs, size_t align) { - return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); + return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + + nr_objs * sizeof(struct _slab_enh) +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + , align); } /* @@ -765,7 +829,11 @@ * into account. */ nr_objs = (slab_size - sizeof(struct slab)) / - (buffer_size + sizeof(kmem_bufctl_t)); + (buffer_size + sizeof(kmem_bufctl_t) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + + sizeof(struct _slab_enh) +#endif /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); /* * This calculated number will be either the right @@ -789,7 +857,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { - printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", + printk(KERN_ERR "slab error in %s(): cache '%s': %s\n", function, cachep->name, msg); dump_stack(); } @@ -1514,6 +1582,9 @@ cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + cache_cache.num * sizeof(struct _slab_enh) + +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ @@ -1885,8 +1956,8 @@ /* Print header */ if (lines == 0) { printk(KERN_ERR - "Slab corruption: %s start=%p, len=%d\n", - cachep->name, realobj, size); + "Slab corruption (%s): %s start=%p, len=%d\n", + print_tainted(), cachep->name, realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ @@ -2047,7 +2118,11 @@ * looping condition in cache_grow(). */ offslab_limit = size - sizeof(struct slab); - offslab_limit /= sizeof(kmem_bufctl_t); + offslab_limit /= sizeof(kmem_bufctl_t) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + + sizeof(struct _slab_enh) +#endif /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ; if (num > offslab_limit) break; @@ -2180,7 +2255,9 @@ name); BUG(); } - +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + flags |= flag_debug_slab_avm_lite; +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ /* * We use cache_chain_mutex to ensure a consistent view of * cpu_online_mask as well. Please see cpuup_callback @@ -2364,6 +2441,9 @@ goto oops; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + + cachep->num * sizeof(struct _slab_enh) +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + sizeof(struct slab), align); /* @@ -2378,7 +2458,11 @@ if (flags & CFLGS_OFF_SLAB) { /* really off slab. No need for manual alignment */ slab_size = - cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); + cachep->num * sizeof(kmem_bufctl_t) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + + cachep->num * sizeof(struct _slab_enh) +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + + sizeof(struct slab); #ifdef CONFIG_PAGE_POISONING /* If we're going to use the generic kernel_map_pages() @@ -2675,6 +2759,10 @@ slabp->inuse = 0; slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + slabp->slab_enh = (void *)((kmem_bufctl_t *)(slabp + 1) + cachep->num); +/*--- DBG_TRC("%s: slabp %s: num=%u obj_size=%u 0x%p 0x%p 0x%p 0x%p\n", __func__, cachep->name, cachep->num, obj_size(cachep), slabp, slabp + 1, slabp->slab_enh, slabp->s_mem); ---*/ +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ slabp->nodeid = nodeid; slabp->free = 0; return slabp; @@ -2692,6 +2780,9 @@ for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slabp, i); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + __set_slab_enh(cachep, slabp, i, NULL, NULL); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2997,6 +3088,117 @@ BUG(); } } +#elif defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +#define kfree_debugcheck(x) do { } while(0) + +#define snprintf_add(ptxt, txtlen, args...) if(ptxt == NULL) printk(args); else { int local_add_len;\ + if((local_add_len = snprintf(ptxt, txtlen, args)) > 0) { \ + int tail = min((int)txtlen, local_add_len); \ + (ptxt) += tail, (txtlen) -= tail; \ + } \ + } +/** + * @brief dump integer array and break after line_c entries and mark entry mark_idx + * @param p array + * @param entries elements of array + * @param line_c break after line_c-elements + * @param mark_idx mark element (-1 if not used) + */ +static void dump_int_array(unsigned int *p, unsigned int entries, unsigned line_c, int mark_idx) { + char tmp[256], *ptxt; + unsigned int i, ii, txtlen; + unsigned int *pstart; + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + ii = 0; + for(i = 0; i < entries; i++) { + snprintf_add(ptxt, txtlen, "%s%08x%s", i == mark_idx ? "\b<" : "", *p++, i == mark_idx ? ">" : " "); + ii++; + if(ii >= line_c) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + ii = 0; + } + } + if(tmp[0]) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + } +} +/** + * @brief dump slab and mark objnr in slab_bufctl/slab_enh + * @param prefix + */ +static void dump_slab(char *prefix, struct slab *slabp, struct kmem_cache *cachep, int objnr) { + printk(KERN_ERR"%s:slabhead %p:\n", prefix, slabp); + dump_int_array((unsigned int *)slabp, sizeof(struct slab) / sizeof(unsigned int), 8, -1); + printk(KERN_ERR"slab_bufctl: %p num=%u\n", slab_bufctl(slabp), cachep->num); + dump_int_array((unsigned int *)slab_bufctl(slabp), cachep->num, 8, objnr); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + printk(KERN_ERR"slab_enh: %p\n", slab_bufctl(slabp) + cachep->num); + dump_int_array((unsigned int *)((slab_bufctl(slabp) + cachep->num)), + cachep->num * sizeof(struct _slab_enh) / sizeof(unsigned int), 8, + objnr == -1 ? objnr : objnr * sizeof(struct _slab_enh) / sizeof(unsigned int)); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ +} + +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, + void *caller) +{ + struct page *page; + unsigned int objnr, bufctl; + struct slab *slabp; + + BUG_ON(virt_to_cache(objp) != cachep); + page = virt_to_head_page(objp); + slabp = page_get_slab(page); + + objnr = obj_to_index(cachep, slabp, objp); + bufctl = slab_bufctl(slabp)[objnr]; + if (unlikely(bufctl != BUFCTL_ACTIVE)) { + char tmp[256]; + if((bufctl == BUFCTL_FREE) || (bufctl <= cachep->num)) { + snprintf(tmp, sizeof(tmp), "double free detected: slabp=%p objp=%p objnr=%u bufctl=0x%x" +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + "\n(last freed from %pS before %lu jiffies)" + "\n(alloced by %pS before %lu jiffies)" +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + , slabp, objp, objnr, bufctl +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + , (void *)get_slab_enh(slabp, objnr)->free_caller, + jiffies - get_slab_enh(slabp, objnr)->free_jiffies, + (void *)get_slab_enh(slabp, objnr)->caller, + jiffies - get_slab_enh(slabp, objnr)->jiffies +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); + slab_error(cachep, tmp); + dump_slab("double free detected", slabp, cachep, objnr); + } else { + snprintf(tmp, sizeof(tmp), "corrupt-slab: slabp=%p objp=%p objnr=%u bufctl=0x%x", + slabp, objp, objnr, bufctl); + slab_error(cachep, tmp); + dump_slab("corrupt slab", slabp, cachep, objnr); + } + } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + __set_slab_enh(cachep, slabp, objnr, NULL, caller); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + slab_bufctl(slabp)[objnr] = BUFCTL_FREE; + return objp; +} + +static void slab_corrupt(struct kmem_cache *cachep, struct slab *slabp_err, int do_panic) { + char tmp[128]; + snprintf(tmp, sizeof(tmp), "corrupt-slab: slabp=%p slab->inuse=%u incorrect", slabp_err, slabp_err->inuse); + slab_error(cachep, tmp); + dump_slab("corrupt-slab:", slabp_err, cachep, -1); + if(do_panic) { + panic(tmp); + } +} +#define check_slabp(x,y) do { } while(0) #else #define kfree_debugcheck(x) do { } while(0) #define cache_free_debugcheck(x,objp,z) (objp) @@ -3055,7 +3257,13 @@ * there must be at least one object available for * allocation. */ +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) + if(unlikely(slabp->inuse >= cachep->num)) { + slab_corrupt(cachep, slabp, 1); + } +#else BUG_ON(slabp->inuse >= cachep->num); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) ---*/ while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); @@ -3139,7 +3347,7 @@ *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) { struct slab *slabp; unsigned objnr; @@ -3161,8 +3369,31 @@ return objp; } #else +#if defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) || defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, + gfp_t flags, void *objp, void *caller) +{ + if (!objp) + return objp; + { + struct slab *slabp; + unsigned objnr; + + slabp = page_get_slab(virt_to_head_page(objp)); + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; + slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + __set_slab_enh(cachep, slabp, objnr, caller, NULL); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + } + return objp; +} +#else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif +#endif static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { @@ -3728,7 +3959,13 @@ #else void *__kmalloc_node(size_t size, gfp_t flags, int node) { - return __do_kmalloc_node(size, flags, node, NULL); + return __do_kmalloc_node(size, flags, node, +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + (void *)_RET_IP_ +#else/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + 0 +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); } EXPORT_SYMBOL(__kmalloc_node); #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ @@ -3779,7 +4016,13 @@ #else void *__kmalloc(size_t size, gfp_t flags) { - return __do_kmalloc(size, flags, NULL); + return __do_kmalloc(size, flags, +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + (void *)_RET_IP_ +#else/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + 0 +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); } EXPORT_SYMBOL(__kmalloc); #endif @@ -4368,12 +4611,22 @@ .release = seq_release, }; -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) -static void *leaks_start(struct seq_file *m, loff_t *pos) +static void show_symbol(struct seq_file *m, unsigned long address) { - mutex_lock(&cache_chain_mutex); - return seq_list_start(&cache_chain, *pos); +#ifdef CONFIG_KALLSYMS + unsigned long offset, size; + char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; + + if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { + seq_printf(m, "%s+%#lx/%#lx", name, offset, size); + if (modname[0]) + seq_printf(m, " [%s]", modname); + return; + } +#endif + seq_printf(m, "%p", (void *)address); } static inline int add_caller(unsigned long *n, unsigned long v) @@ -4406,6 +4659,17 @@ return 1; } + +#endif + +#ifdef CONFIG_DEBUG_SLAB_LEAK + +static void *leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&cache_chain_mutex); + return seq_list_start(&cache_chain, *pos); +} + static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) { void *p; @@ -4420,22 +4684,6 @@ } } -static void show_symbol(struct seq_file *m, unsigned long address) -{ -#ifdef CONFIG_KALLSYMS - unsigned long offset, size; - char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; - - if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { - seq_printf(m, "%s+%#lx/%#lx", name, offset, size); - if (modname[0]) - seq_printf(m, " [%s]", modname); - return; - } -#endif - seq_printf(m, "%p", (void *)address); -} - static int leaks_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); @@ -4528,15 +4776,183 @@ }; #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + +#include + +static void *skbuff_leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&cache_chain_mutex); + return seq_list_start(&cache_chain, *pos); +} + +static void skbuff_handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) +{ + void *p; + int i; + if (n[0] == n[1]) + return; + for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { + struct sk_buff *skb; + if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) + continue; + skb = (struct sk_buff *)p; + if (!add_caller(n, (unsigned long)skb->last_user)) + return; + } +} + +static int skbuff_leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); + struct slab *slabp; + struct kmem_list3 *l3; + const char *name; + unsigned long *n = m->private; + int node; + int i; + + if (strncmp(cachep->name, "skbuff_", 7) != 0) + return 0; + + /* OK, we can do it */ + n[1] = 0; + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (!l3) + continue; + + check_irq_on(); + spin_lock_irq(&l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) + skbuff_handle_slab(n, cachep, slabp); + list_for_each_entry(slabp, &l3->slabs_partial, list) + skbuff_handle_slab(n, cachep, slabp); + spin_unlock_irq(&l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { + /* Increase the buffer size */ + mutex_unlock(&cache_chain_mutex); + m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = n; + mutex_lock(&cache_chain_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = n[0] * 2; + kfree(n); + mutex_lock(&cache_chain_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < n[1]; i++) { + seq_printf(m, "%s: %lu ", name, n[2*i+3]); + show_symbol(m, n[2*i+2]); + seq_putc(m, '\n'); + } + + return 0; +} + +static const struct seq_operations skbuffstats_op = { + .start = skbuff_leaks_start, + .next = s_next, + .stop = s_stop, + .show = skbuff_leaks_show, +}; + +static int skbuffstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &skbuffstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static const struct file_operations proc_skbuffstats_operations = { + .open = skbuffstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; +#endif /* CONFIG_NET_DEBUG_SKBUFF_LEAK */ + static int __init slab_proc_init(void) { proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + proc_create("skbuff_last_user", 0, NULL, &proc_skbuffstats_operations); +#endif return 0; } module_init(slab_proc_init); + +#define SKIP_SPACES(p) while((*p == ' ') || (*p == '\t')) p++ +#define SKIP_NONSPACES(p) while(*p && (*p != ' ') && (*p != '\t')) p++ +/*--------------------------------------------------------------------------------*\ + * kernel-printk-show for slabinfo + * any context allowed +\*--------------------------------------------------------------------------------*/ +void show_slab(void) { + unsigned int active_objs; + char *ptxt; + void *p; + loff_t pos; + char buf[512 + 1]; + struct seq_file seq; + + memset(&seq, 0, sizeof(seq)); + seq.size = sizeof(buf) - 1; + seq.buf = buf; + pos = 0; + + if (!mutex_trylock(&cache_chain_mutex)) { + return; + } + print_slabinfo_header(&seq); + p = seq_list_start(&cache_chain, pos); + + seq.buf[seq.count] = 0; + printk(KERN_ERR"%s", seq.buf), seq.count = 0; + for(;;) { + if (!p || IS_ERR(p)) { + break; + } + s_show(&seq, p); + seq.buf[seq.count] = 0; + /*--- only if active_objs exist: ---*/ + ptxt = seq.buf; + SKIP_NONSPACES(ptxt); + SKIP_SPACES(ptxt); + sscanf(ptxt, "%u", &active_objs); + if(active_objs) { + printk(KERN_CONT"%s", seq.buf); + } + seq.count = 0; + p = seq_list_next(p, &cache_chain, &pos); + } + mutex_unlock(&cache_chain_mutex); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + show_debug_slab_avm_lite(); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ +} #endif /** @@ -4560,3 +4976,403 @@ return obj_size(virt_to_cache(objp)); } EXPORT_SYMBOL(ksize); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +struct _slab_avm_top_entry { + unsigned long caller; + unsigned int count; + unsigned long long sum_time; +}; +struct _slab_avm_enh_toplist { + struct _slab_avm_top_entry entry[96]; + unsigned int entries; + unsigned int ignored; /*--- not enough entries ---*/ + unsigned long sum_count; /*--- all caller-count + ignored ---*/ +}; +/** + * @brief sorted on caller + * divide et impera to make caller-history (like add_caller()-function but this struct ;-)* + * @param ptoplist pointer for toplist to fill + * @param caller caller + * @return != 0 if no place in toplist + */ +#define TOP_TOIDX(p) ((p) - (&ptoplist->entry[0])) +static int mark_in_toplist(struct _slab_avm_enh_toplist *ptoplist, unsigned long caller, unsigned long act_diff) { + unsigned int i, elements, idx; + struct _slab_avm_top_entry *q, *p; + elements = ptoplist->entries; + p = &ptoplist->entry[0]; + while(elements) { + i = elements / 2; + q = &p[i]; + if(q->caller == caller) { + q->count++; + q->sum_time += (unsigned long long)act_diff; + return 0; + } + if (q->caller > caller) { + elements = i; + } else { + p = q + 1; + elements -= i + 1; + } + } + if(ptoplist->entries >= ARRAY_SIZE(ptoplist->entry)) { + ptoplist->ignored++; + return 1; + } + idx = TOP_TOIDX(p); + memmove(&p[1], p, (ptoplist->entries - idx) * sizeof(ptoplist->entry[0])); + ptoplist->entries++; + ptoplist->entry[idx].caller = caller; + ptoplist->entry[idx].sum_time = act_diff; + ptoplist->entry[idx].count = 1; + return 0; +} +/** + * @brief sum caller-toplist entries + * @param ptoplistpointer + * @return void + */ +static unsigned long sum_toplist_entries(struct _slab_avm_enh_toplist *ptoplist) { + unsigned long sum_count = 0; + unsigned int i; + + for(i = 0; i < ptoplist->entries; i++) { + sum_count += ptoplist->entry[i].count; + } + return sum_count; +} +/** + * @brief sort caller-toplist (greater first) + * @param ptoplistpointer for toplist to fill + * @return void + */ +static void sort_toplist(struct _slab_avm_enh_toplist *ptoplist) { + unsigned int i, max_count, max_idx, idx = 0; + + for(;;) { + struct _slab_avm_top_entry tmp; + max_count = 0; + for(i = idx; i < ptoplist->entries; i++) { + if(ptoplist->entry[i].count > max_count) { + max_count = ptoplist->entry[i].count; + max_idx = i; + } + } + if(max_count == 0) { + break; + } + /*--- swap ---*/ + memcpy(&tmp, &ptoplist->entry[idx], sizeof(tmp)); + memcpy(&ptoplist->entry[idx], &ptoplist->entry[max_idx], sizeof(tmp)); + memcpy(&ptoplist->entry[max_idx], &tmp, sizeof(tmp)); + idx++; + } +} +/** + * @brief fill toplist for cachep + * @param ptoplist pointer for toplist to fill + * @param cachep cachepool + * @param tresh_jiffiesdiff only if caller older than ... + * @return void + */ +void debug_slab_avm_lite_toplist(struct _slab_avm_enh_toplist *ptoplist, const struct kmem_cache *cachep, + unsigned long tresh_jiffiesdiff __maybe_unused){ + unsigned long flags; + unsigned long act_jiffies = jiffies; + unsigned long act_diff; + unsigned int ii, i; + int node; + struct _slab_enh *slab_enhp; + struct kmem_list3 *l3; + struct slab *slabp; + struct list_head *slablist[2]; + + memset(ptoplist, 0, sizeof(*ptoplist)); + + for_each_online_node(node) { + + l3 = cachep->nodelists[node]; + if (!l3) { + continue; + } + slablist[0] = &l3->slabs_full; + slablist[1] = &l3->slabs_partial; + spin_lock_irqsave(&l3->list_lock, flags); + for(i = 0; i < ARRAY_SIZE(slablist); i++) { + list_for_each_entry(slabp, slablist[i], list) { + for (ii = 0; ii < cachep->num; ii++) { + if (slab_bufctl(slabp)[ii] != BUFCTL_ACTIVE) { + continue; + } + slab_enhp = get_slab_enh(slabp, ii); + if(slab_enhp->caller == 0) { + continue; + } + act_diff = act_jiffies - slab_enhp->jiffies; + if(act_diff <= tresh_jiffiesdiff) { + /*--- too young ---*/ + continue; + } + mark_in_toplist(ptoplist, slab_enhp->caller, act_diff); + } + } + } + spin_unlock_irqrestore(&l3->list_lock, flags); + } + sort_toplist(ptoplist); + ptoplist->sum_count = sum_toplist_entries(ptoplist) + ptoplist->ignored; +} +/** + */ +static char *human_time(char *buf, int len, unsigned long secs) { + unsigned long seconds, minutes, hours; + + seconds = secs % 60; secs /= 60; + minutes = secs % 60; secs /= 60; + hours = secs % 24; + if(hours) { + snprintf(buf, len, "%lu h %2lu min %2lu s", hours, minutes, seconds); + } else if(minutes) { + snprintf(buf, len, "%2lu min %2lu s", minutes, seconds); + } else { + snprintf(buf, len, "%2lu s", seconds); + } + return buf; +} + +/** + * @brief show memory-usage-caller for cachepool + * @param cachep cachepool + * @param m seq-pointer + * @param threshsize only cache-pool-memory-usage greater this + * return void + */ +#define local_print(seq, args ...) if(seq) { seq_printf(seq, args); } else { pr_err(args); } +static void get_slab_toplist(struct kmem_cache *cachep, struct seq_file *m, unsigned long threshsize) { + struct _slab_avm_enh_toplist toplist; + unsigned int i; + char tmp[128]; + + debug_slab_avm_lite_toplist(&toplist, cachep, 0); + if((toplist.sum_count == 0) || ((toplist.sum_count * obj_size(cachep)) < threshsize)) { + return; + } + for(i = 0; i < ARRAY_SIZE(toplist.entry); i++) { + struct _slab_avm_top_entry *p = &toplist.entry[i]; + unsigned long long avg = p->sum_time; + if((i == 0) || (p->count * obj_size(cachep)) > threshsize / 4) { + if(i == 0) { + local_print(m, "%s: %5lu KiB\n", cachep->name, (obj_size(cachep) * toplist.sum_count) >> 10); + } + do_div(avg, (p->count * HZ)); + local_print(m, " \t%6u entries (%5u KiB - avg-time %s) %pS\n", p->count, + (obj_size(cachep) * p->count) >> 10, + human_time(tmp, sizeof(tmp), (unsigned long)avg), + (void *)p->caller); + } else { + break; + } + } + if(toplist.ignored) { + if(i) { + local_print(m, "... but %d callers ignored (too much different callers)\n", toplist.ignored); + } + } +} +static unsigned int thresh_allocsize = 1 << 20; + +/** + * @brief switch on avm-specific memory-usage-feature + * (the original linux-code switched off if aligment > 8) + * @param on 0/1 + * @return void + */ +static void slab_debug_avm_lite_on(unsigned int on) { + struct kmem_cache *cachep = NULL; + + flag_debug_slab_avm_lite = on ? SLAB_STORE_USER_AND_TIME : 0; /*--- trace on/off for future pools ---*/ + list_for_each_entry(cachep, &cache_chain, next) { + if(on) { + /*--- pr_err("%s: %s trace on\n", __func__, cachep->name); ---*/ + cachep->flags |= SLAB_STORE_USER_AND_TIME; + } else { + /*--- pr_err("%s: %s trace off\n", __func__, cachep->name); ---*/ + cachep->flags &= ~SLAB_STORE_USER_AND_TIME; + } + } +} +/** + */ +static unsigned long get_active_obj_per_cache(struct kmem_cache *cachep) { + int node; + struct slab *slabp; + unsigned long active_objs = 0; + struct kmem_list3 *l3; + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (!l3) + continue; + + if(!spin_trylock_irq(&l3->list_lock)) { + continue; + } + list_for_each_entry(slabp, &l3->slabs_full, list) { + active_objs += cachep->num; + } + list_for_each_entry(slabp, &l3->slabs_partial, list) { + active_objs += slabp->inuse; + } + spin_unlock_irq(&l3->list_lock); + } + return active_objs; +} +/** + * @brief show all memory-usage-caller + * @param m seq-pointer + * @param threshsize only cachep greater this + * return void + */ +static void proc_show_debug_slab_avm_lite(struct seq_file *m, unsigned long threshsize) { + struct kmem_cache *cachep = NULL; + + list_for_each_entry(cachep, &cache_chain, next) { + if(get_active_obj_per_cache(cachep) * obj_size(cachep) >= threshsize) { + get_slab_toplist(cachep, m, threshsize); + } + } +} +/** + * @brief show all heavy memory-usage-caller + * use kernel-printk + */ +static void show_debug_slab_avm_lite(void) { + if (!mutex_trylock(&cache_chain_mutex)) { + return; + } + proc_show_debug_slab_avm_lite(NULL, thresh_allocsize); + mutex_unlock(&cache_chain_mutex); +} +/** + * @brief show allocator-statistic + * @param m seq-pointer + * @param priv + * return void + */ +static void lproc_slab_allocators(struct seq_file *m, void *priv __maybe_unused){ +/*--- unsigned int threshsize = *((unsigned int *)priv); ---*/ + mutex_lock(&cache_chain_mutex); + proc_show_debug_slab_avm_lite(m, 0); + mutex_unlock(&cache_chain_mutex); +} +/** + */ +static int lproc_slab_allocator_on(char *txt, void *priv __maybe_unused) { + + SKIP_SPACES(txt); + if(strstr(txt, "on")) { + pr_err("slab_allocator: trace on\n"); + slab_debug_avm_lite_on(1); + } else if(strstr(txt, "off")) { + pr_err("slab_allocator: trace off\n"); + slab_debug_avm_lite_on(0); + } else if(strstr(txt, "thresh")) { + txt += sizeof("thresh") - 1; + SKIP_SPACES(txt); + sscanf(txt, "%d", &thresh_allocsize); + pr_err("slab_allocator: new thresh_allocsize=%u\n", thresh_allocsize); + } else { + pr_err("slab_allocator - invalid param: use on, off, thresh (only oom)\n"); + } + return 0; +} +/** + * @brief delayed slab_allocator-trace on timer-context + */ +static void slab_allocator_on(unsigned long data __maybe_unused){ + pr_err("start slab_allocator-trace now (use cat /proc/slab_allocators)\n"); + slab_debug_avm_lite_on(1); +} +/** + */ +static DEFINE_TIMER(slab_allocator_timer, slab_allocator_on, 0, 0); +/** + * @brief install /proc/slab_allocators + * return 0 + */ +int __init avm_proc_debug_slab_avm_lite_init(void) { +/*--- pr_err("%s()\n", __func__); ---*/ + mod_timer(&slab_allocator_timer, jiffies + 45 * HZ); + add_simple_proc_file( "slab_allocators", lproc_slab_allocator_on, lproc_slab_allocators, &thresh_allocsize); + return 0; +} +late_initcall(avm_proc_debug_slab_avm_lite_init); + +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + +#if defined(CONFIG_AVM_ENHANCED) +/** + * @brief get kmemalloc-area if addr in range + * attention! function unsaved for cachep - zone-page-spinlock necessary + * @return start (zero if not exist) + */ +unsigned long get_kmemalloc_area(unsigned long addr, unsigned long *caller, const char **cache_name, + unsigned long *size, int *freed){ + unsigned long flags, kstart; + struct kmem_list3 *l3; + unsigned int objnr, _freed = 0; + struct kmem_cache *cachep; + struct slab *slabp; + struct page *compound_page, *page = virt_to_head_page((void *)addr); + + if(!virt_addr_valid(page) || !PageSlab(page)) { + return 0; + } + compound_page = compound_head(page); + if(!PageSlab(compound_page)) { + return 0; + } + cachep = page_get_cache(page); + if(!virt_addr_valid(cachep)) { + return 0; + } + slabp = page_get_slab(page); + if(!virt_addr_valid(slabp)) { + return 0; + } + l3 = cachep->nodelists[numa_mem_id()]; + if(!virt_addr_valid(l3)) { + return 0; + } + if(!spin_trylock_irqsave(&l3->list_lock, flags)) { + return 0; + } + objnr = obj_to_index(cachep, slabp, (void *)addr); + if(objnr >= cachep->num) { + spin_unlock_irqrestore(&l3->list_lock, flags); + return 0; + } + if(caller) { +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) + if(slab_bufctl(slabp)[objnr] != BUFCTL_ACTIVE) { + _freed = 1; + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) ---*/ + *caller = (unsigned long) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + _freed ? get_slab_enh(slabp, objnr)->free_caller : get_slab_enh(slabp, objnr)->caller; +#else /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + NULL; +#endif /*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + } +dump_slab("slab", slabp, cachep, objnr); + if(cache_name) *cache_name = cachep->name; + if(size) *size = obj_size(cachep); + if(freed) *freed = _freed; + kstart = (unsigned long)index_to_obj(cachep, slabp, objnr); + spin_unlock_irqrestore(&l3->list_lock, flags); + return kstart; +} +#endif /*--- #if defined(CONFIG_AVM_ENHANCED) ---*/