--- zzzz-none-000/linux-4.4.60/mm/slab.c 2017-04-08 07:53:53.000000000 +0000 +++ wasp-540e-714/linux-4.4.60/mm/slab.c 2019-07-03 09:21:34.000000000 +0000 @@ -122,6 +122,10 @@ #include #include #include +#if defined(CONFIG_AVM_ENHANCED) +#include +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + #include @@ -188,6 +192,9 @@ */ struct array_cache { unsigned int avail; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + unsigned int other; +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ unsigned int limit; unsigned int batchcount; unsigned int touched; @@ -282,7 +289,11 @@ #define CFLGS_OFF_SLAB (0x80000000UL) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +#define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 2, KMALLOC_MIN_SIZE + 1)) +#else/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 5, KMALLOC_MIN_SIZE + 1)) +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #define BATCHREFILL_LIMIT 16 /* @@ -393,8 +404,7 @@ #define OBJECT_FREE (0) #define OBJECT_ACTIVE (1) -#ifdef CONFIG_DEBUG_SLAB_LEAK - +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) static void set_obj_status(struct page *page, int idx, int val) { int freelist_size; @@ -457,6 +467,105 @@ return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +struct _slab_enh { + unsigned long caller; + unsigned long jiffies; +#define CHECK_POISON_WRITE_AFTER_FREE_BIT (0x1 << 0) + unsigned long free_caller; /*--- unterste Bit fuer SLAB_POISON_WRITE_AFTER_FREE missbrauchen ---*/ + unsigned long free_jiffies; +}; +/** + */ +static __always_inline struct _slab_enh *slab_enh_array(const struct kmem_cache *cachep, const struct page *page) { + struct _slab_enh *pslab_enh; + int freelist_size = ALIGN(cachep->num * sizeof(freelist_idx_t) + + cachep->num * sizeof(char), + sizeof(unsigned long)); + pslab_enh= (struct _slab_enh *)((char *)page->freelist + freelist_size); + return pslab_enh; +} +/** + * @brief set caller in slab_enh-array + * @param cachep cachepool-pointer + * @param page slab-page + * @param objnr index for slab_enh-array + * @param caller caller (caller = -1: initialize + * @return void + */ +static __always_inline void __set_slab_enh(const struct kmem_cache *cachep, const struct page *page, + unsigned int objnr, unsigned long caller, unsigned long free_caller) { + + struct _slab_enh *pslab_enh = slab_enh_array(cachep, page) + objnr; + if((caller == 0) && (free_caller == 0)) { + memset(pslab_enh, 0, sizeof(*pslab_enh)); + } else if(caller) { + pslab_enh->caller = caller; + pslab_enh->jiffies = jiffies; + } else if(free_caller) { + pslab_enh->free_caller = free_caller; + pslab_enh->free_jiffies = jiffies; + } +#if 0 + if(caller == 0) { + printk(KERN_DEBUG"%s %s: %pS\n", __func__, cachep->name, (void *)caller); + dump_stack(); + } +#endif +} +/** + * @brief get slab_enh-entry + * @param cachep cachepool-pointer + * @param page slab-page + * @param objnr index for slab_enh-array + * @return slab_enh-entry + */ +static inline struct _slab_enh *get_slab_enh(const struct kmem_cache *cachep, const struct page *page, + unsigned int objnr) { + return slab_enh_array(cachep, page) + objnr; +} +/** + * little bit faster + * start adress is aligned! + */ +static noinline void slab_enh_set_poison(void *objp, unsigned int size, unsigned long caller){ + unsigned int count = size / sizeof(unsigned int) / 8; + unsigned int *p = objp; + + caller &= ~0x80000000; /*--- tricky: restored when read BadVA so get free-caller back ---*/ + while (count--) { + p[0] = caller; p[1] = caller; p[2] = caller; p[3] = caller; + p[4] = caller; p[5] = caller; p[6] = caller; p[7] = caller; + p += 8; + } +} +/** + * @brief check write after free (switch on with "poison+"-Option in proc/slab_allocator) + * + */ +static noinline int slab_enh_check_poison(void *objp, unsigned int size, unsigned long *offset){ + int ret = 0; + unsigned int count = (size / sizeof(unsigned int) / 8) * 8; + unsigned int *p = objp; + unsigned int caller; + + if (likely(count)) { + caller = *p++; + count--; + } + while (count--) { + if (*p != caller) { + *offset = (unsigned long)p - (unsigned long)objp; + ret = 1; + break; + } + p++; + } + return ret; +} +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + + /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, @@ -480,9 +589,14 @@ size_t freelist_size; freelist_size = nr_objs * sizeof(freelist_idx_t); - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) freelist_size += nr_objs * sizeof(char); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + freelist_size = ALIGN(freelist_size, sizeof(unsigned long)); + freelist_size += nr_objs * sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + if (align) freelist_size = ALIGN(freelist_size, align); @@ -497,8 +611,13 @@ size_t freelist_size; int extra_space = 0; - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) extra_space = sizeof(char); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + extra_space += sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + /* * Ignore padding for the initial guess. The padding * is at most @align-1 bytes, and @buffer_size is at @@ -559,13 +678,13 @@ *left_over = slab_size - nr_objs*buffer_size - mgmt_size; } -#if DEBUG +#if DEBUG || defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { - printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", + printk(KERN_ERR "slab error in %s(): cache '%s': %s\n", function, cachep->name, msg); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); @@ -670,6 +789,10 @@ kmemleak_no_scan(ac); if (ac) { ac->avail = 0; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + ac->other = 0; +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ac->limit = limit; ac->batchcount = batch; ac->touched = 0; @@ -692,6 +815,23 @@ return PageSlabPfmemalloc(page); } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +/** + * not perfect but better than lifo + */ +static inline void *__ac_notlastfreed_obj(struct array_cache *ac) { + void *objp; + register unsigned int other = ac->other; + + if(other >= ac->avail) { + other = 0; + } + objp = ac->entry[other]; + ac->entry[other++] = ac->entry[--ac->avail]; + ac->other = other; + return objp; +} +#endif /* Clears pfmemalloc_active if no slabs have pfmalloc set */ static void recheck_pfmemalloc_active(struct kmem_cache *cachep, struct array_cache *ac) @@ -725,7 +865,11 @@ gfp_t flags, bool force_refill) { int i; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + void *objp = __ac_notlastfreed_obj(ac); +#else /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ void *objp = ac->entry[--ac->avail]; +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ if (unlikely(is_obj_pfmemalloc(objp))) { @@ -775,9 +919,13 @@ if (unlikely(sk_memalloc_socks())) objp = __ac_get_obj(cachep, ac, flags, force_refill); - else + else { +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + objp = __ac_notlastfreed_obj(ac); +#else /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ objp = ac->entry[--ac->avail]; - +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + } return objp; } @@ -1951,8 +2099,13 @@ * use off-slab slabs. Needed to avoid a possible * looping condition in cache_grow(). */ - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) freelist_size_per_obj += sizeof(char); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + freelist_size_per_obj += sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + offslab_limit = size; offslab_limit /= freelist_size_per_obj; @@ -2105,6 +2258,9 @@ int err; size_t size = cachep->size; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + flags |= flag_debug_slab_avm_lite; +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #if DEBUG #if FORCED_DEBUG /* @@ -2487,6 +2643,9 @@ for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, page, i); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + __set_slab_enh(cachep, page, i, 0, 0); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2750,9 +2909,155 @@ } return objp; } +#elif defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +#define kfree_debugcheck(x) do { } while(0) +#define snprintf_add(ptxt, txtlen, args...) if(ptxt == NULL) printk(args); else { int local_add_len;\ + if((local_add_len = snprintf(ptxt, txtlen, args)) > 0) { \ + int tail = min((int)txtlen, local_add_len); \ + (ptxt) += tail, (txtlen) -= tail; \ + } \ + } +/** + * @brief dump integer array and break after line_c entries and mark entry mark_idx + * @param p array + * @param el_size 1, 2, 4 + * @param entries elements of array + * @param line_c break after line_c-elements + * @param mark_idx mark element (-1 if not used) + */ +static void dump_el_array(unsigned char *p, unsigned int el_size, unsigned int entries, unsigned line_c, int mark_idx){ + char tmp[256], *ptxt; + unsigned int i, ii, txtlen; + unsigned char *pstart; + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + + ii = 0; + for(i = 0; i < entries; i++) { + switch(el_size) { + case 1: + snprintf_add(ptxt, txtlen, "%s%02x%s", i == mark_idx ? "\b<" : "", *p, + i == mark_idx ? ">" : " "); + break; + case 2: + snprintf_add(ptxt, txtlen, "%s%04x%s", i == mark_idx ? "\b<" : "", *((unsigned short *)p), + i == mark_idx ? ">" : " "); + break; + case 4: + snprintf_add(ptxt, txtlen, "%s%08x%s", i == mark_idx ? "\b<" : "", *((unsigned int *)p), + i == mark_idx ? ">" : " "); + break; + default: + return; + } + p += el_size; + ii++; + if(ii >= line_c) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + ii = 0; + } + } + if(tmp[0]) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + } +} +/** + * @brief dump slab and mark objnr in slab_bufctl/slab_enh + * @param prefix + */ +static void dump_slab(char *prefix, struct page *page, struct kmem_cache *cachep, int objnr) { + int freelist_size; + unsigned char *status; + freelist_size = cachep->num * sizeof(freelist_idx_t); + status = (unsigned char *)page->freelist + freelist_size; + + printk(KERN_ERR"%s:slab_freelist: %p objnr=%d/%u\n", prefix, page->freelist, objnr, cachep->num); + dump_el_array((unsigned char *)page->freelist, sizeof(freelist_idx_t), cachep->num, 8, objnr); + printk(KERN_ERR"slab_status: %p (%d==active)\n", status, OBJECT_ACTIVE); + dump_el_array(status, sizeof(char), cachep->num, 8, objnr); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + printk(KERN_ERR"slab_enh: %p\n", slab_enh_array(cachep, page)); + dump_el_array((unsigned char *)slab_enh_array(cachep, page), sizeof(unsigned int), + cachep->num * sizeof(struct _slab_enh) / sizeof(unsigned int), 8, + objnr == -1 ? objnr : objnr * sizeof(struct _slab_enh) / sizeof(unsigned int)); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ +} + +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, + unsigned long caller) +{ + struct page *page; + unsigned int objnr, bufctl; + + BUG_ON(virt_to_cache(objp) != cachep); + + objp -= obj_offset(cachep); + kfree_debugcheck(objp); + page = virt_to_head_page(objp); + + objnr = obj_to_index(cachep, page, objp); + BUG_ON(objnr >= cachep->num); + BUG_ON(objp != index_to_obj(cachep, page, objnr)); + bufctl = get_obj_status(page, objnr); + if (unlikely(bufctl != OBJECT_ACTIVE)) { + char tmp[196]; + if(bufctl == OBJECT_FREE) { + snprintf(tmp, sizeof(tmp), "double free detected: freelist=%p objp=%p objnr=%u bufctl=0x%x" +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + "\n(last freed from %pS before %lu jiffies)" +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + , page->freelist, objp, objnr, bufctl +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + , (void *)get_slab_enh(cachep, page, objnr)->free_caller, + jiffies - get_slab_enh(cachep, page, objnr)->free_jiffies +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); + slab_error(cachep, tmp); + dump_slab("double free detected", page, cachep, objnr); + } else { + snprintf(tmp, sizeof(tmp), "corrupt-slab: freelist=%p objp=%p objnr=%u bufctl=0x%x", + page->freelist, objp, objnr, bufctl); + slab_error(cachep, tmp); + dump_slab("corrupt slab", page, cachep, objnr); + } +#if defined(CONFIG_SKB_FREE_LOG) + skb_log_show(objp); + BUG(); +#endif + } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely(cachep->flags & SLAB_POISON)) { + slab_enh_set_poison(objp, cachep->object_size, caller); + } + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + if (unlikely(cachep->flags & SLAB_POISON_WRITE_AFTER_FREE)) { + caller |= CHECK_POISON_WRITE_AFTER_FREE_BIT; + } else { + caller &= ~CHECK_POISON_WRITE_AFTER_FREE_BIT; + } + __set_slab_enh(cachep, page, objnr, 0, caller); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + set_obj_status(page, objnr, OBJECT_FREE); + return objp; +} + +static void slab_corrupt(struct kmem_cache *cachep, struct page *page_err, int do_panic) { + char tmp[128]; + snprintf(tmp, sizeof(tmp), "corrupt-slab: page=%p page->active=%u incorrect", page_err, page_err->active); + slab_error(cachep, tmp); + dump_slab("corrupt-slab:", page_err, cachep, -1); + if (do_panic) { + panic(tmp); + } +} +#define check_slabp(x,y) do { } while(0) #else #define kfree_debugcheck(x) do { } while(0) +#define check_slabp(x,y) do { } while(0) #define cache_free_debugcheck(x,objp,z) (objp) #endif @@ -2810,7 +3115,14 @@ * there must be at least one object available for * allocation. */ +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) + if (unlikely(page->active >= cachep->num)) { + slab_corrupt(cachep, page, 1); + } +#else + BUG_ON(page->active >= cachep->num); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) ---*/ while (page->active < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); @@ -2820,6 +3132,7 @@ ac_put_obj(cachep, ac, slab_get_obj(cachep, page, node)); } + check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ list_del(&page->lru); @@ -2913,6 +3226,43 @@ } return objp; } +#elif defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, + gfp_t flags __maybe_unused, void *objp, unsigned long caller) { + struct page *page; + unsigned objnr; + if (!objp) + return objp; + page = virt_to_head_page(objp); + objnr = obj_to_index(cachep, page, objp); + set_obj_status(page, objnr, OBJECT_ACTIVE); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely((cachep->flags & (SLAB_POISON_WRITE_AFTER_FREE | SLAB_POISON)) == + (SLAB_POISON_WRITE_AFTER_FREE | SLAB_POISON))) { + struct _slab_enh *pslab_enh = get_slab_enh(cachep, page, objnr); + if (pslab_enh->free_caller & CHECK_POISON_WRITE_AFTER_FREE_BIT) { + unsigned long offset; + pslab_enh->free_caller &= ~CHECK_POISON_WRITE_AFTER_FREE_BIT; /*--- kill poison-check-bit ---*/ + if (slab_enh_check_poison(objp, cachep->object_size, &offset)) { + printk(KERN_ERR"slab error written after freed from %pS: %pS\n" + "dump(offset=%lu): %*ph\n", + (void *)(pslab_enh->free_caller & ~CHECK_POISON_WRITE_AFTER_FREE_BIT), + (unsigned char *)objp + offset, + offset, + min(cachep->object_size - (int)offset, 32), + objp + offset); + } + } + } + if (cachep->ctor && (cachep->flags & SLAB_POISON)) { + cachep->ctor(objp); + } + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + __set_slab_enh(cachep, page, objnr, caller, 0); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + return objp; +} #else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif @@ -3108,6 +3458,7 @@ page = list_entry(entry, struct page, lru); check_spinlock_acquired_node(cachep, nodeid); + check_slabp(cachep, slabp); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); @@ -3116,6 +3467,7 @@ BUG_ON(page->active == cachep->num); obj = slab_get_obj(cachep, page, nodeid); + check_slabp(cachep, slabp); n->free_objects--; /* move slabp to correct slabp list: */ list_del(&page->lru); @@ -3287,6 +3639,7 @@ slab_put_obj(cachep, page, objp, node); STATS_DEC_ACTIVE(cachep); n->free_objects++; + check_slabp(cachep, page); /* fixup slab chains */ if (page->active == 0) { @@ -3590,6 +3943,13 @@ local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); +#if defined(CONFIG_AVM_ENHANCED) + if (unlikely(c == NULL)) { + printk(KERN_ERR"%s invalid objp %pS\n", __func__, objp); + BUG_ON(c == NULL); + return; + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ debug_check_no_locks_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size); @@ -4068,7 +4428,7 @@ return res; } -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) static inline int add_caller(unsigned long *n, unsigned long v) { @@ -4130,7 +4490,7 @@ return; } #endif - seq_printf(m, "%p", (void *)address); + seq_printf(m, "%pF", (void *)address); } static int leaks_show(struct seq_file *m, void *p) @@ -4218,11 +4578,134 @@ }; #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + +#include + +static void *skbuff_leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&slab_mutex); + return seq_list_start(&slab_caches, *pos); +} + +static void skbuff_handle_slab(unsigned long *n, struct kmem_cache *c, struct page *page) +{ + void *p; + int i; + + if (n[0] == n[1]) + return; + for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { + struct sk_buff *skb; + + if (get_obj_status(page, i) != OBJECT_ACTIVE) + continue; + + skb = (struct sk_buff *)(p + obj_offset(c)); + + if (!add_caller(n, (unsigned long)skb->last_user)) + return; + } +} + +static int skbuff_leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); + struct page *page; + struct kmem_cache_node *n; + const char *name; + unsigned long *x = m->private; + int node; + int i; + + if (strncmp(cachep->name, "skbuff_", 7) != 0) + return 0; + + /* OK, we can do it */ + + x[1] = 0; + + for_each_online_node(node) { + n = cachep->node[node]; + if (!n) + continue; + + check_irq_on(); + spin_lock_irq(&n->list_lock); + + list_for_each_entry(page, &n->slabs_full, lru) + skbuff_handle_slab(x, cachep, page); + list_for_each_entry(page, &n->slabs_partial, lru) + skbuff_handle_slab(x, cachep, page); + spin_unlock_irq(&n->list_lock); + } + name = cachep->name; + if (x[0] == x[1]) { + /* Increase the buffer size */ + mutex_unlock(&slab_mutex); + m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = x; + mutex_lock(&slab_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = x[0] * 2; + kfree(x); + mutex_lock(&slab_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < x[1]; i++) { + seq_printf(m, "%s: %lu ", name, x[2*i+3]); + show_symbol(m, x[2*i+2]); + seq_putc(m, '\n'); + } + + return 0; +} + +static const struct seq_operations skbuffstats_op = { + .start = skbuff_leaks_start, + .next = slab_next, + .stop = slab_stop, + .show = skbuff_leaks_show, +}; + +static int skbuffstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &skbuffstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static const struct file_operations proc_skbuffstats_operations = { + .open = skbuffstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; +#endif /* CONFIG_NET_DEBUG_SKBUFF_LEAK */ + static int __init slab_proc_init(void) { #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + proc_create("skbuff_last_user", 0, NULL, &proc_skbuffstats_operations); +#endif return 0; } module_init(slab_proc_init); @@ -4249,3 +4732,198 @@ return virt_to_cache(objp)->object_size; } EXPORT_SYMBOL(ksize); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +/** + * @brief sorted on caller + * divide et impera to make caller-history (like add_caller()-function but this struct ;-)* + * @param ptoplist pointer for toplist to fill + * @param caller caller + * @return != 0 if no place in toplist + */ +#define TOP_TOIDX(p) ((p) - (&ptoplist->entry[0])) +static int mark_in_toplist(struct _slab_avm_enh_toplist *ptoplist, unsigned long caller, unsigned long act_diff) { + unsigned int i, elements, idx; + struct _slab_avm_top_entry *q, *p; + elements = ptoplist->entries; + p = &ptoplist->entry[0]; + while(elements) { + i = elements / 2; + q = &p[i]; + if(q->caller == caller) { + q->count++; + q->sum_time += (unsigned long long)act_diff; + return 0; + } + if (q->caller > caller) { + elements = i; + } else { + p = q + 1; + elements -= i + 1; + } + } + if(ptoplist->entries >= ARRAY_SIZE(ptoplist->entry)) { + ptoplist->ignored++; + return 1; + } + idx = TOP_TOIDX(p); + memmove(&p[1], p, (ptoplist->entries - idx) * sizeof(ptoplist->entry[0])); + ptoplist->entries++; + ptoplist->entry[idx].caller = caller; + ptoplist->entry[idx].sum_time = act_diff; + ptoplist->entry[idx].count = 1; + return 0; +} +/** + * @brief sum caller-toplist entries + * @param ptoplistpointer + * @return void + */ +static unsigned long sum_toplist_entries(struct _slab_avm_enh_toplist *ptoplist) { + unsigned long sum_count = 0; + unsigned int i; + + for(i = 0; i < ptoplist->entries; i++) { + sum_count += ptoplist->entry[i].count; + } + return sum_count; +} +/** + * @brief sort caller-toplist (greater first) + * @param ptoplistpointer for toplist to fill + * @return void + */ +static void sort_toplist(struct _slab_avm_enh_toplist *ptoplist) { + unsigned int i, max_count, max_idx, idx = 0; + + for(;;) { + struct _slab_avm_top_entry tmp; + max_count = 0; + for(i = idx; i < ptoplist->entries; i++) { + if(ptoplist->entry[i].count > max_count) { + max_count = ptoplist->entry[i].count; + max_idx = i; + } + } + if(max_count == 0) { + break; + } + /*--- swap ---*/ + memcpy(&tmp, &ptoplist->entry[idx], sizeof(tmp)); + memcpy(&ptoplist->entry[idx], &ptoplist->entry[max_idx], sizeof(tmp)); + memcpy(&ptoplist->entry[max_idx], &tmp, sizeof(tmp)); + idx++; + } +} +/** + * @brief fill toplist for cachep + * @param ptoplist pointer for toplist to fill + * @param cachep cachepool + * @param tresh_jiffiesdiff only if caller older than ... + * @return void + */ +void debug_slab_avm_lite_toplist(struct _slab_avm_enh_toplist *ptoplist, const struct kmem_cache *cachep, + unsigned long tresh_jiffiesdiff __maybe_unused){ + unsigned long flags; + unsigned long act_jiffies = jiffies; + unsigned long act_diff; + unsigned int ii, i; + int node; + struct _slab_enh *slab_enhp; + struct kmem_cache_node *n; + struct page *page; + struct list_head *slablist[2]; + + memset(ptoplist, 0, sizeof(*ptoplist)); + + for_each_online_node(node) { + + n = cachep->node[node]; + if (!n) { + continue; + } + slablist[0] = &n->slabs_full; + slablist[1] = &n->slabs_partial; + spin_lock_irqsave(&n->list_lock, flags); + for(i = 0; i < ARRAY_SIZE(slablist); i++) { + list_for_each_entry(page, slablist[i], lru) { + for (ii = 0; ii < cachep->num; ii++) { + if (get_obj_status(page, ii) != OBJECT_ACTIVE) { + continue; + } + slab_enhp = get_slab_enh(cachep, page, ii); + if(slab_enhp->caller == 0) { + continue; + } + act_diff = act_jiffies - slab_enhp->jiffies; + if(act_diff <= tresh_jiffiesdiff) { + /*--- too young ---*/ + continue; + } + mark_in_toplist(ptoplist, slab_enhp->caller, act_diff); + } + } + } + spin_unlock_irqrestore(&n->list_lock, flags); + } + sort_toplist(ptoplist); + ptoplist->sum_count = sum_toplist_entries(ptoplist) + ptoplist->ignored; +} + +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + +#if defined(CONFIG_AVM_ENHANCED) +/** + * @brief get kmemalloc-area if addr in range + * attention! function unsaved for cachep - zone-page-spinlock necessary + * @return start (zero if not exist) + */ +unsigned long get_kmemalloc_area(unsigned long addr, unsigned long *caller, const char **cache_name, + unsigned long *size, int *freed){ + unsigned long flags, kstart; + struct kmem_cache_node *n; + unsigned int objnr, _freed = 0; + struct kmem_cache *cachep; + struct page *page = virt_to_head_page((void *)addr); + + if(!virt_addr_valid(page)) { + return 0; + } + cachep = page->slab_cache; + if(!virt_addr_valid(cachep)) { + return 0; + } + n = cachep->node[numa_mem_id()]; + if(!virt_addr_valid(n)) { + return 0; + } + if(!spin_trylock_irqsave(&n->list_lock, flags)) { + return 0; + } + objnr = obj_to_index(cachep, page, (void *)addr); + if(objnr >= cachep->num) { + spin_unlock_irqrestore(&n->list_lock, flags); + return 0; + } + if(caller) { +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) + if (get_obj_status(page, objnr) != OBJECT_ACTIVE) { + _freed = 1; + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) ---*/ + *caller = +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + _freed ? get_slab_enh(cachep, page, objnr)->free_caller : get_slab_enh(cachep, page, objnr)->caller; +#else /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + 0UL; +#endif /*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + } + /*--- dump_slab("slab", page, cachep, objnr); ---*/ + if(cache_name) *cache_name = cachep->name; + if(size) *size = cachep->size; + if(freed) *freed = _freed; + kstart = (unsigned long)index_to_obj(cachep, page, objnr); + spin_unlock_irqrestore(&n->list_lock, flags); + return kstart; +} +#endif /*--- #if defined(CONFIG_AVM_ENHANCED) ---*/