--- zzzz-none-000/linux-4.4.60/mm/slab.c 2017-04-08 07:53:53.000000000 +0000 +++ dragonfly-4020-701/linux-4.4.60/mm/slab.c 2018-11-08 13:36:17.000000000 +0000 @@ -122,6 +122,10 @@ #include #include #include +#if defined(CONFIG_AVM_ENHANCED) +#include +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + #include @@ -282,7 +286,11 @@ #define CFLGS_OFF_SLAB (0x80000000UL) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +#define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 2, KMALLOC_MIN_SIZE + 1)) +#else/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 5, KMALLOC_MIN_SIZE + 1)) +#endif/*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #define BATCHREFILL_LIMIT 16 /* @@ -393,8 +401,7 @@ #define OBJECT_FREE (0) #define OBJECT_ACTIVE (1) -#ifdef CONFIG_DEBUG_SLAB_LEAK - +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) static void set_obj_status(struct page *page, int idx, int val) { int freelist_size; @@ -457,6 +464,65 @@ return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +struct _slab_enh { + unsigned long caller; + unsigned long jiffies; + unsigned long free_caller; + unsigned long free_jiffies; +}; +/** + */ +static __always_inline struct _slab_enh *slab_enh_array(const struct kmem_cache *cachep, const struct page *page) { + struct _slab_enh *pslab_enh; + int freelist_size = ALIGN(cachep->num * sizeof(freelist_idx_t) + + cachep->num * sizeof(char), + sizeof(unsigned long)); + pslab_enh= (struct _slab_enh *)((char *)page->freelist + freelist_size); + return pslab_enh; +} +/** + * @brief set caller in slab_enh-array + * @param cachep cachepool-pointer + * @param page slab-page + * @param objnr index for slab_enh-array + * @param caller caller (caller = -1: initialize + * @return void + */ +static __always_inline void __set_slab_enh(const struct kmem_cache *cachep, const struct page *page, + unsigned int objnr, unsigned long caller, unsigned long free_caller) { + + struct _slab_enh *pslab_enh = slab_enh_array(cachep, page) + objnr; + if((caller == 0) && (free_caller == 0)) { + memset(pslab_enh, 0, sizeof(*pslab_enh)); + } else if(caller) { + pslab_enh->caller = caller; + pslab_enh->jiffies = jiffies; + } else if(free_caller) { + pslab_enh->free_caller = free_caller; + pslab_enh->free_jiffies = jiffies; + } +#if 0 + if(caller == 0) { + printk(KERN_DEBUG"%s %s: %pS\n", __func__, cachep->name, (void *)caller); + dump_stack(); + } +#endif +} +/** + * @brief get slab_enh-entry + * @param cachep cachepool-pointer + * @param page slab-page + * @param objnr index for slab_enh-array + * @return slab_enh-entry + */ +static inline struct _slab_enh *get_slab_enh(const struct kmem_cache *cachep, const struct page *page, + unsigned int objnr) { + return slab_enh_array(cachep, page) + objnr; +} +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + + /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, @@ -480,9 +546,14 @@ size_t freelist_size; freelist_size = nr_objs * sizeof(freelist_idx_t); - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) freelist_size += nr_objs * sizeof(char); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + freelist_size = ALIGN(freelist_size, sizeof(unsigned long)); + freelist_size += nr_objs * sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + if (align) freelist_size = ALIGN(freelist_size, align); @@ -497,8 +568,13 @@ size_t freelist_size; int extra_space = 0; - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) extra_space = sizeof(char); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + extra_space += sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + /* * Ignore padding for the initial guess. The padding * is at most @align-1 bytes, and @buffer_size is at @@ -559,7 +635,7 @@ *left_over = slab_size - nr_objs*buffer_size - mgmt_size; } -#if DEBUG +#if DEBUG || defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) static void __slab_error(const char *function, struct kmem_cache *cachep, @@ -1951,8 +2027,13 @@ * use off-slab slabs. Needed to avoid a possible * looping condition in cache_grow(). */ - if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK) || IS_ENABLED(CONFIG_DEBUG_SLAB_DOUBLE_FREE)) freelist_size_per_obj += sizeof(char); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + freelist_size_per_obj += sizeof(struct _slab_enh); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + offslab_limit = size; offslab_limit /= freelist_size_per_obj; @@ -2105,6 +2186,9 @@ int err; size_t size = cachep->size; +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + flags |= flag_debug_slab_avm_lite; +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #if DEBUG #if FORCED_DEBUG /* @@ -2487,6 +2571,9 @@ for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, page, i); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + __set_slab_enh(cachep, page, i, 0, 0); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2750,6 +2837,134 @@ } return objp; } +#elif defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +#define kfree_debugcheck(x) do { } while(0) +#define snprintf_add(ptxt, txtlen, args...) if(ptxt == NULL) printk(args); else { int local_add_len;\ + if((local_add_len = snprintf(ptxt, txtlen, args)) > 0) { \ + int tail = min((int)txtlen, local_add_len); \ + (ptxt) += tail, (txtlen) -= tail; \ + } \ + } +/** + * @brief dump integer array and break after line_c entries and mark entry mark_idx + * @param p array + * @param el_size 1, 2, 4 + * @param entries elements of array + * @param line_c break after line_c-elements + * @param mark_idx mark element (-1 if not used) + */ +static void dump_el_array(unsigned char *p, unsigned int el_size, unsigned int entries, unsigned line_c, int mark_idx){ + char tmp[256], *ptxt; + unsigned int i, ii, txtlen; + unsigned char *pstart; + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + + ii = 0; + for(i = 0; i < entries; i++) { + switch(el_size) { + case 1: + snprintf_add(ptxt, txtlen, "%s%02x%s", i == mark_idx ? "\b<" : "", *p, + i == mark_idx ? ">" : " "); + break; + case 2: + snprintf_add(ptxt, txtlen, "%s%04x%s", i == mark_idx ? "\b<" : "", *((unsigned short *)p), + i == mark_idx ? ">" : " "); + break; + case 4: + snprintf_add(ptxt, txtlen, "%s%08x%s", i == mark_idx ? "\b<" : "", *((unsigned int *)p), + i == mark_idx ? ">" : " "); + break; + default: + return; + } + p += el_size; + ii++; + if(ii >= line_c) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + tmp[0] = 0, ptxt = tmp, txtlen = sizeof(tmp); + pstart = p; + ii = 0; + } + } + if(tmp[0]) { + printk(KERN_ERR"%p: %s\n", pstart, tmp); + } +} +/** + * @brief dump slab and mark objnr in slab_bufctl/slab_enh + * @param prefix + */ +static void dump_slab(char *prefix, struct page *page, struct kmem_cache *cachep, int objnr) { + int freelist_size; + unsigned char *status; + + freelist_size = cachep->num * sizeof(freelist_idx_t); + status = (unsigned char *)page->freelist + freelist_size; + + printk(KERN_ERR"%s:slab_freelist: %p objnr=%d/%u\n", prefix, page->freelist, objnr, cachep->num); + dump_el_array((unsigned char *)page->freelist, sizeof(freelist_idx_t), cachep->num, 8, objnr); + printk(KERN_ERR"slab_status: %p (%d==active)\n", status, OBJECT_ACTIVE); + dump_el_array(status, sizeof(char), cachep->num, 8, objnr); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + printk(KERN_ERR"slab_enh: %p\n", slab_enh_array(cachep, page)); + dump_el_array((unsigned char *)slab_enh_array(cachep, page), sizeof(unsigned int), + cachep->num * sizeof(struct _slab_enh) / sizeof(unsigned int), 8, + objnr == -1 ? objnr : objnr * sizeof(struct _slab_enh) / sizeof(unsigned int)); +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ +} + +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, + unsigned long caller) +{ + struct page *page; + unsigned int objnr, bufctl; + + BUG_ON(virt_to_cache(objp) != cachep); + + objp -= obj_offset(cachep); + kfree_debugcheck(objp); + page = virt_to_head_page(objp); + + objnr = obj_to_index(cachep, page, objp); + BUG_ON(objnr >= cachep->num); + BUG_ON(objp != index_to_obj(cachep, page, objnr)); + bufctl = get_obj_status(page, objnr); + if (unlikely(bufctl != OBJECT_ACTIVE)) { + char tmp[196]; + if(bufctl == OBJECT_FREE) { + snprintf(tmp, sizeof(tmp), "double free detected: freelist=%p objp=%p objnr=%u bufctl=0x%x" +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + "\n(last freed from %pS before %lu jiffies)" +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + , page->freelist, objp, objnr, bufctl +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + , (void *)get_slab_enh(cachep, page, objnr)->free_caller, + jiffies - get_slab_enh(cachep, page, objnr)->free_jiffies +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + ); + slab_error(cachep, tmp); + dump_slab("double free detected", page, cachep, objnr); + } else { + snprintf(tmp, sizeof(tmp), "corrupt-slab: freelist=%p objp=%p objnr=%u bufctl=0x%x", + page->freelist, objp, objnr, bufctl); + slab_error(cachep, tmp); + dump_slab("corrupt slab", page, cachep, objnr); + } +#if defined(CONFIG_SKB_FREE_LOG) + skb_log_show(objp); + BUG(); +#endif + } +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + __set_slab_enh(cachep, page, objnr, 0, caller); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + set_obj_status(page, objnr, OBJECT_FREE); + return objp; +} + #else #define kfree_debugcheck(x) do { } while(0) @@ -2913,6 +3128,23 @@ } return objp; } +#elif defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, + gfp_t flags __maybe_unused, void *objp, unsigned long caller) { + struct page *page; + + if (!objp) + return objp; + page = virt_to_head_page(objp); + set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + if (unlikely(cachep->flags & SLAB_STORE_USER_AND_TIME)) { + unsigned int objnr = obj_to_index(cachep, page, objp); + __set_slab_enh(cachep, page, objnr, caller, 0); + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + return objp; +} #else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif @@ -3590,6 +3822,9 @@ local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); +#if defined(CONFIG_AVM_ENHANCED) + BUG_ON(c == NULL); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ debug_check_no_locks_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size); @@ -4068,7 +4303,7 @@ return res; } -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) static inline int add_caller(unsigned long *n, unsigned long v) { @@ -4130,7 +4365,7 @@ return; } #endif - seq_printf(m, "%p", (void *)address); + seq_printf(m, "%pF", (void *)address); } static int leaks_show(struct seq_file *m, void *p) @@ -4218,11 +4453,134 @@ }; #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + +#include + +static void *skbuff_leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&slab_mutex); + return seq_list_start(&slab_caches, *pos); +} + +static void skbuff_handle_slab(unsigned long *n, struct kmem_cache *c, struct page *page) +{ + void *p; + int i; + + if (n[0] == n[1]) + return; + for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { + struct sk_buff *skb; + + if (get_obj_status(page, i) != OBJECT_ACTIVE) + continue; + + skb = (struct sk_buff *)(p + obj_offset(c)); + + if (!add_caller(n, (unsigned long)skb->last_user)) + return; + } +} + +static int skbuff_leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); + struct page *page; + struct kmem_cache_node *n; + const char *name; + unsigned long *x = m->private; + int node; + int i; + + if (strncmp(cachep->name, "skbuff_", 7) != 0) + return 0; + + /* OK, we can do it */ + + x[1] = 0; + + for_each_online_node(node) { + n = cachep->node[node]; + if (!n) + continue; + + check_irq_on(); + spin_lock_irq(&n->list_lock); + + list_for_each_entry(page, &n->slabs_full, lru) + skbuff_handle_slab(x, cachep, page); + list_for_each_entry(page, &n->slabs_partial, lru) + skbuff_handle_slab(x, cachep, page); + spin_unlock_irq(&n->list_lock); + } + name = cachep->name; + if (x[0] == x[1]) { + /* Increase the buffer size */ + mutex_unlock(&slab_mutex); + m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = x; + mutex_lock(&slab_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = x[0] * 2; + kfree(x); + mutex_lock(&slab_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < x[1]; i++) { + seq_printf(m, "%s: %lu ", name, x[2*i+3]); + show_symbol(m, x[2*i+2]); + seq_putc(m, '\n'); + } + + return 0; +} + +static const struct seq_operations skbuffstats_op = { + .start = skbuff_leaks_start, + .next = slab_next, + .stop = slab_stop, + .show = skbuff_leaks_show, +}; + +static int skbuffstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &skbuffstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static const struct file_operations proc_skbuffstats_operations = { + .open = skbuffstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; +#endif /* CONFIG_NET_DEBUG_SKBUFF_LEAK */ + static int __init slab_proc_init(void) { #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + proc_create("skbuff_last_user", 0, NULL, &proc_skbuffstats_operations); +#endif return 0; } module_init(slab_proc_init); @@ -4249,3 +4607,198 @@ return virt_to_cache(objp)->object_size; } EXPORT_SYMBOL(ksize); + +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) +/** + * @brief sorted on caller + * divide et impera to make caller-history (like add_caller()-function but this struct ;-)* + * @param ptoplist pointer for toplist to fill + * @param caller caller + * @return != 0 if no place in toplist + */ +#define TOP_TOIDX(p) ((p) - (&ptoplist->entry[0])) +static int mark_in_toplist(struct _slab_avm_enh_toplist *ptoplist, unsigned long caller, unsigned long act_diff) { + unsigned int i, elements, idx; + struct _slab_avm_top_entry *q, *p; + elements = ptoplist->entries; + p = &ptoplist->entry[0]; + while(elements) { + i = elements / 2; + q = &p[i]; + if(q->caller == caller) { + q->count++; + q->sum_time += (unsigned long long)act_diff; + return 0; + } + if (q->caller > caller) { + elements = i; + } else { + p = q + 1; + elements -= i + 1; + } + } + if(ptoplist->entries >= ARRAY_SIZE(ptoplist->entry)) { + ptoplist->ignored++; + return 1; + } + idx = TOP_TOIDX(p); + memmove(&p[1], p, (ptoplist->entries - idx) * sizeof(ptoplist->entry[0])); + ptoplist->entries++; + ptoplist->entry[idx].caller = caller; + ptoplist->entry[idx].sum_time = act_diff; + ptoplist->entry[idx].count = 1; + return 0; +} +/** + * @brief sum caller-toplist entries + * @param ptoplistpointer + * @return void + */ +static unsigned long sum_toplist_entries(struct _slab_avm_enh_toplist *ptoplist) { + unsigned long sum_count = 0; + unsigned int i; + + for(i = 0; i < ptoplist->entries; i++) { + sum_count += ptoplist->entry[i].count; + } + return sum_count; +} +/** + * @brief sort caller-toplist (greater first) + * @param ptoplistpointer for toplist to fill + * @return void + */ +static void sort_toplist(struct _slab_avm_enh_toplist *ptoplist) { + unsigned int i, max_count, max_idx, idx = 0; + + for(;;) { + struct _slab_avm_top_entry tmp; + max_count = 0; + for(i = idx; i < ptoplist->entries; i++) { + if(ptoplist->entry[i].count > max_count) { + max_count = ptoplist->entry[i].count; + max_idx = i; + } + } + if(max_count == 0) { + break; + } + /*--- swap ---*/ + memcpy(&tmp, &ptoplist->entry[idx], sizeof(tmp)); + memcpy(&ptoplist->entry[idx], &ptoplist->entry[max_idx], sizeof(tmp)); + memcpy(&ptoplist->entry[max_idx], &tmp, sizeof(tmp)); + idx++; + } +} +/** + * @brief fill toplist for cachep + * @param ptoplist pointer for toplist to fill + * @param cachep cachepool + * @param tresh_jiffiesdiff only if caller older than ... + * @return void + */ +void debug_slab_avm_lite_toplist(struct _slab_avm_enh_toplist *ptoplist, const struct kmem_cache *cachep, + unsigned long tresh_jiffiesdiff __maybe_unused){ + unsigned long flags; + unsigned long act_jiffies = jiffies; + unsigned long act_diff; + unsigned int ii, i; + int node; + struct _slab_enh *slab_enhp; + struct kmem_cache_node *n; + struct page *page; + struct list_head *slablist[2]; + + memset(ptoplist, 0, sizeof(*ptoplist)); + + for_each_online_node(node) { + + n = cachep->node[node]; + if (!n) { + continue; + } + slablist[0] = &n->slabs_full; + slablist[1] = &n->slabs_partial; + spin_lock_irqsave(&n->list_lock, flags); + for(i = 0; i < ARRAY_SIZE(slablist); i++) { + list_for_each_entry(page, slablist[i], lru) { + for (ii = 0; ii < cachep->num; ii++) { + if (get_obj_status(page, ii) != OBJECT_ACTIVE) { + continue; + } + slab_enhp = get_slab_enh(cachep, page, ii); + if(slab_enhp->caller == 0) { + continue; + } + act_diff = act_jiffies - slab_enhp->jiffies; + if(act_diff <= tresh_jiffiesdiff) { + /*--- too young ---*/ + continue; + } + mark_in_toplist(ptoplist, slab_enhp->caller, act_diff); + } + } + } + spin_unlock_irqrestore(&n->list_lock, flags); + } + sort_toplist(ptoplist); + ptoplist->sum_count = sum_toplist_entries(ptoplist) + ptoplist->ignored; +} + +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + +#if defined(CONFIG_AVM_ENHANCED) +/** + * @brief get kmemalloc-area if addr in range + * attention! function unsaved for cachep - zone-page-spinlock necessary + * @return start (zero if not exist) + */ +unsigned long get_kmemalloc_area(unsigned long addr, unsigned long *caller, const char **cache_name, + unsigned long *size, int *freed){ + unsigned long flags, kstart; + struct kmem_cache_node *n; + unsigned int objnr, _freed = 0; + struct kmem_cache *cachep; + struct page *page = virt_to_head_page((void *)addr); + + if(!virt_addr_valid(page)) { + return 0; + } + cachep = page->slab_cache; + if(!virt_addr_valid(cachep)) { + return 0; + } + n = cachep->node[numa_mem_id()]; + if(!virt_addr_valid(n)) { + return 0; + } + if(!spin_trylock_irqsave(&n->list_lock, flags)) { + return 0; + } + objnr = obj_to_index(cachep, page, (void *)addr); + if(objnr >= cachep->num) { + spin_unlock_irqrestore(&n->list_lock, flags); + return 0; + } + if(caller) { +#if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) + if (get_obj_status(page, objnr) != OBJECT_ACTIVE) { + _freed = 1; + } +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_DOUBLE_FREE) || defined(CONFIG_DEBUG_SLAB_LEAK) ---*/ + *caller = +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + _freed ? get_slab_enh(cachep, page, objnr)->free_caller : get_slab_enh(cachep, page, objnr)->caller; +#else /*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + 0UL; +#endif /*--- #else ---*//*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + } + /*--- dump_slab("slab", page, cachep, objnr); ---*/ + if(cache_name) *cache_name = cachep->name; + if(size) *size = cachep->size; + if(freed) *freed = _freed; + kstart = (unsigned long)index_to_obj(cachep, page, objnr); + spin_unlock_irqrestore(&n->list_lock, flags); + return kstart; +} +#endif /*--- #if defined(CONFIG_AVM_ENHANCED) ---*/