--- zzzz-none-000/linux-2.6.32.61/mm/slab.c 2013-06-10 09:43:48.000000000 +0000 +++ virian-300e-630/linux-2.6.32.61/mm/slab.c 2015-03-04 10:10:52.000000000 +0000 @@ -2915,7 +2915,29 @@ } #else #define kfree_debugcheck(x) do { } while(0) + +#ifdef CONFIG_DEBUG_SLAB_DOUBLE_FREE +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, + void *caller) +{ + struct page *page; + unsigned int objnr; + struct slab *slabp; + + BUG_ON(virt_to_cache(objp) != cachep); + page = virt_to_head_page(objp); + slabp = page_get_slab(page); + + objnr = obj_to_index(cachep, slabp, objp); + + if (slab_bufctl(slabp)[objnr] != BUFCTL_ACTIVE) + slab_error(cachep, "double free detected"); + slab_bufctl(slabp)[objnr] = BUFCTL_FREE; + return objp; +} +#else #define cache_free_debugcheck(x,objp,z) (objp) +#endif #define check_slabp(x,y) do { } while(0) #endif @@ -3053,7 +3075,7 @@ *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) { struct slab *slabp; unsigned objnr; @@ -3075,8 +3097,24 @@ return objp; } #else +#ifdef CONFIG_DEBUG_SLAB_DOUBLE_FREE +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, + gfp_t flags, void *objp, void *caller) +{ + struct slab *slabp; + unsigned objnr; + + if (!objp) + return objp; + slabp = page_get_slab(virt_to_head_page(objp)); + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; + slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; + return objp; +} +#else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif +#endif static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { @@ -4065,7 +4103,7 @@ if (l3->free_touched) l3->free_touched = 0; else { - int freed; + int freed __maybe_unused__; freed = drain_freelist(searchp, l3, (l3->free_limit + 5 * searchp->num - 1) / (5 * searchp->num)); @@ -4309,7 +4347,7 @@ .release = seq_release, }; -#ifdef CONFIG_DEBUG_SLAB_LEAK +#if defined(CONFIG_DEBUG_SLAB_LEAK) || defined(CONFIG_NET_DEBUG_SKBUFF_LEAK) static void *leaks_start(struct seq_file *m, loff_t *pos) { @@ -4469,15 +4507,179 @@ }; #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + +#include + +static void *skbuff_leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&cache_chain_mutex); + return seq_list_start(&cache_chain, *pos); +} + +static void skbuff_handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) +{ + void *p; + int i; + if (n[0] == n[1]) + return; + for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { + struct sk_buff *skb; + if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) + continue; + skb = (struct sk_buff *)p; + if (!add_caller(n, (unsigned long)skb->last_user)) + return; + } +} + +static int skbuff_leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); + struct slab *slabp; + struct kmem_list3 *l3; + const char *name; + unsigned long *n = m->private; + int node; + int i; + + if (strncmp(cachep->name, "skbuff_", 7) != 0) + return 0; + + /* OK, we can do it */ + n[1] = 0; + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (!l3) + continue; + + check_irq_on(); + spin_lock_irq(&l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) + skbuff_handle_slab(n, cachep, slabp); + list_for_each_entry(slabp, &l3->slabs_partial, list) + skbuff_handle_slab(n, cachep, slabp); + spin_unlock_irq(&l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { + /* Increase the buffer size */ + mutex_unlock(&cache_chain_mutex); + m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = n; + mutex_lock(&cache_chain_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = n[0] * 2; + kfree(n); + mutex_lock(&cache_chain_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < n[1]; i++) { + seq_printf(m, "%s: %lu ", name, n[2*i+3]); + show_symbol(m, n[2*i+2]); + seq_putc(m, '\n'); + } + + return 0; +} + +static const struct seq_operations skbuffstats_op = { + .start = skbuff_leaks_start, + .next = s_next, + .stop = s_stop, + .show = skbuff_leaks_show, +}; + +static int skbuffstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &skbuffstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static const struct file_operations proc_skbuffstats_operations = { + .open = skbuffstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; +#endif /* CONFIG_NET_DEBUG_SKBUFF_LEAK */ + static int __init slab_proc_init(void) { proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + proc_create("skbuff_last_user", 0, NULL, &proc_skbuffstats_operations); +#endif return 0; } module_init(slab_proc_init); + +#define SKIP_SPACE(p) while((*p == ' ') || (*p == '\t')) p++ +#define SKIP_NONSPACE(p) while(*p && (*p != ' ') && (*p != '\t')) p++ +/*--------------------------------------------------------------------------------*\ + * kernel-printk-show for slabinfo + * use only with enabled irqs! +\*--------------------------------------------------------------------------------*/ +void show_slab(void) { + unsigned int active_objs; + int err = 0; + char *ptxt; + void *p; + loff_t pos; + char buf[512 + 1]; + struct seq_file seq; + + memset(&seq, 0, sizeof(seq)); + seq.size = sizeof(buf) - 1; + seq.buf = buf; + pos = 0; + + p = s_start(&seq, &pos); + seq.buf[seq.count] = 0; + printk(KERN_ERR"%s", seq.buf), seq.count = 0; + for(;;) { + if (!p || IS_ERR(p)) { + break; + } + err = s_show(&seq, p); + if (err < 0) { + break; + } + seq.buf[seq.count] = 0; + /*--- only if active_objs exist: ---*/ + ptxt = seq.buf; + SKIP_NONSPACE(ptxt); + SKIP_SPACE(ptxt); + sscanf(ptxt, "%u", &active_objs); + if(active_objs) { + printk(KERN_CONT"%s", seq.buf); + } + seq.count = 0; + p = s_next(&seq, p, &pos); + } + s_stop(&seq, p); +} #endif /**