--- zzzz-none-000/linux-2.4.17/mm/slab.c 2001-12-21 17:42:05.000000000 +0000
+++ sangam-fb-401/linux-2.4.17/mm/slab.c 2005-04-12 12:29:16.000000000 +0000
@@ -49,7 +49,9 @@
* constructors and destructors are called without any locking.
* Several members in kmem_cache_t and slab_t never change, they
* are accessed without any locking.
- * The per-cpu arrays are never accessed from the wrong cpu, no locking.
+ * The per-cpu arrays are never accessed from the wrong cpu, no locking,
+ * they are however called with local interrupts disabled so no
+ * preempt_disable needed.
* The non-constant members are protected with a per-cache irq spinlock.
*
* Further notes from the original documentation:
@@ -342,8 +344,10 @@
{ 256, NULL, NULL},
{ 512, NULL, NULL},
{ 1024, NULL, NULL},
+ { 1660, NULL, NULL}, /*--- network buffer ---*/
{ 2048, NULL, NULL},
{ 4096, NULL, NULL},
+ { 4156, NULL, NULL}, /*--- WLAN ---*/
{ 8192, NULL, NULL},
{ 16384, NULL, NULL},
{ 32768, NULL, NULL},
@@ -1511,6 +1515,196 @@
return __kmem_cache_alloc(cachep, flags);
}
+/*------------------------------------------------------------------------------------------*\
+\*------------------------------------------------------------------------------------------*/
+#if defined (CONFIG_AVM_MEM_MONITORING)
+/*
+ * Simple memory usage tracker...
+ */
+
+#define HEADLISTSIZE 521
+
+static unsigned kmem_alloc_count = 0;
+static spinlock_t kmem_spinlock = SPIN_LOCK_UNLOCKED;
+static unsigned headcount = 0;
+static struct klhead head[HEADLISTSIZE] = { { NULL, 0, 0, 0, 0, NULL }, };
+static struct klhead meta = { "", 0, 0, 0, 0, NULL };
+
+int kmem_alloc_read_proc (
+ char * page,
+ char ** start,
+ off_t off,
+ int count,
+ int * eof,
+ void * data
+) {
+ int len = 0;
+ int ix = 0;
+ struct klhead * kl = &meta;
+ unsigned long flags;
+
+ local_irq_save (flags);
+ do {
+ if (kl->name != NULL) {
+ while (kl != NULL) {
+ len += sprintf (
+ page + len,
+ "%-32s K %8u (%8u) V %8u (%8u)\n",
+ kl->name,
+ kl->kalloc, kl->kmax,
+ kl->valloc, kl->vmax
+ );
+ if (len <= off) {
+ off -= len;
+ len = 0;
+ } else if ((len - off) > count) {
+ goto exit;
+ }
+ kl = kl->next;
+ }
+ }
+ kl = &head[ix++];
+ } while (ix != HEADLISTSIZE);
+ len += sprintf (page + len, "\n%-32s T %8u\n", "", kmem_alloc_count);
+exit:
+ local_irq_restore (flags);
+ *start = page + off;
+ len -= (*start - page);
+ if (len <= count) {
+ *eof = 1;
+ }
+ if (len > count) {
+ len = count;
+ }
+ if (len < 0) {
+ len = 0;
+ }
+ return len;
+}
+
+static struct klhead * findhead (const char * file)
+{
+ const char * fp = file;
+ unsigned sl = 1;
+ unsigned hx = 0;
+ int ff = 1;
+ struct klhead * hp;
+
+ do {
+ hx = (hx << 3) + *fp++;
+ sl++;
+ } while (*fp != (char) 0);
+ hx %= HEADLISTSIZE;
+
+ hp = &head[hx];
+ if (hp->name != NULL) {
+ while (0 != strcmp (hp->name, file)) {
+ if (hp->next == NULL) {
+ hp->next = kmalloc0 (sizeof (struct klhead), GFP_ATOMIC, NULL);
+ hp = hp->next;
+ goto init;
+ } else {
+ hp = hp->next;
+ }
+ }
+ ff = 0;
+ }
+init:
+ if (ff && (hp != NULL)) {
+ headcount++;
+ hp->kmax = 0;
+ hp->vmax = 0;
+ hp->kalloc = 0;
+ hp->valloc = 0;
+ hp->next = NULL;
+ if (NULL != (hp->name = kmalloc0 (sl, GFP_ATOMIC, NULL))) {
+ strncpy (hp->name, file, sl);
+ }
+ }
+ return hp;
+}
+
+void * kmem_alloc_notify (void * p, size_t size, const char * file, char attr)
+{
+ struct kmhead * kp;
+ struct klhead * kl;
+ unsigned long flags;
+
+ if (p != NULL) {
+ kp = (struct kmhead *) p;
+ p = kp + 1;
+ kp->size = size;
+ if (file == NULL) {
+ local_irq_save (flags);
+ meta.kalloc += size;
+ if (size > meta.kmax) meta.kmax = size;
+ kl = &meta;
+ local_irq_restore (flags);
+ } else {
+ local_irq_save (flags);
+ kmem_alloc_count += size;
+ if (headcount == 0) {
+ memset (head, 0, HEADLISTSIZE * sizeof (struct klhead));
+ }
+ if (NULL != (kl = findhead (file))) {
+ if (attr == 'K') {
+ kl->kalloc += size;
+ if (size > kl->kmax) kl->kmax = size;
+ } else {
+ kl->valloc += size;
+ if (size > kl->vmax) kl->vmax = size;
+ }
+ }
+ local_irq_restore (flags);
+ }
+ kp->head = kl;
+ }
+ return p;
+}
+
+void * kmem_free_notify (void * p, char attr)
+{
+ struct kmhead * kp = NULL;
+ struct klhead * kl;
+ unsigned long flags;
+
+ if (p != NULL) {
+ kp = ((struct kmhead *) p) - 1;
+ local_irq_save (flags);
+ kmem_alloc_count -= kp->size;
+ if (NULL != (kl = kp->head)) {
+ if (attr == 'K') {
+ kl->kalloc -= kp->size;
+ } else {
+ kl->valloc -= kp->size;
+ }
+ }
+ local_irq_restore (flags);
+ }
+ return kp;
+}
+
+void * kmalloc0 (size_t size, int flags, const char * file)
+{
+ cache_sizes_t * csizep = cache_sizes;
+ size_t size0 = size;
+ void * objp;
+
+ size += sizeof (struct kmhead);
+ for (; csizep->cs_size; csizep++) {
+ if (size > csizep->cs_size)
+ continue;
+ objp = __kmem_cache_alloc(
+ flags & GFP_DMA ? csizep->cs_dmacachep : csizep->cs_cachep,
+ flags
+ );
+ return kmem_alloc_notify (objp, size0, file, 'K');;
+ }
+ return NULL;
+}
+
+#else /*--- #if defined (CONFIG_AVM_MEM_MONITORING) ---*/
+
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
@@ -1536,6 +1730,9 @@
{
cache_sizes_t *csizep = cache_sizes;
+ /*--- if(size > 256) ---*/
+ /*--- __printk("[kmalloc:0x%x]", size); ---*/
+
for (; csizep->cs_size; csizep++) {
if (size > csizep->cs_size)
continue;
@@ -1544,6 +1741,7 @@
}
return NULL;
}
+#endif /*--- #else ---*/ /*--- #if defined (CONFIG_AVM_MEM_MONITORING) ---*/
/**
* kmem_cache_free - Deallocate an object
@@ -1582,6 +1780,9 @@
if (!objp)
return;
local_irq_save(flags);
+#if defined (CONFIG_AVM_MEM_MONITORING)
+ objp = kmem_free_notify (objp, 'K');
+#endif /*--- #if defined (CONFIG_AVM_MEM_MONITORING) ---*/
CHECK_PAGE(virt_to_page(objp));
c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);