--- zzzz-none-000/linux-2.6.39.4/mm/vmalloc.c 2011-08-03 19:43:28.000000000 +0000 +++ puma6-arm-6490-729/linux-2.6.39.4/mm/vmalloc.c 2021-11-10 13:23:11.000000000 +0000 @@ -26,11 +26,31 @@ #include #include #include -#include +#include +#include #include #include #include +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; +static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); + +static void __vunmap(const void *, int); + +static void free_work(struct work_struct *w) +{ + struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); + struct llist_node *llnode = llist_del_all(&p->list); + while (llnode) { + void *p = llnode; + llnode = llist_next(llnode); + __vunmap(p, 1); + } +} + /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) @@ -378,12 +398,12 @@ addr = ALIGN(first->va_end + PAGE_SIZE, align); if (addr < vstart) goto nocache; - if (addr + size - 1 < addr) + if (addr + size < addr) goto overflow; } else { addr = ALIGN(vstart, align); - if (addr + size - 1 < addr) + if (addr + size < addr) goto overflow; n = vmap_area_root.rb_node; @@ -572,6 +592,7 @@ atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); } + /* * Purges all lazily-freed vmap areas. * @@ -665,6 +686,7 @@ { va->flags |= VM_LAZY_FREE; atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); + if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) try_purge_vmap_area_lazy(); } @@ -1164,10 +1186,14 @@ for_each_possible_cpu(i) { struct vmap_block_queue *vbq; + struct vfree_deferred *p; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); + p = &per_cpu(vfree_deferred, i); + init_llist_head(&p->list); + INIT_WORK(&p->wq, free_work); } /* Import existing vmlist entries. */ @@ -1456,7 +1482,7 @@ kfree(area); return; } - + /** * vfree - release memory allocated by vmalloc() * @addr: memory base address @@ -1465,15 +1491,25 @@ * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * - * Must not be called in interrupt context. + * Must not be called in NMI context (strictly speaking, only if we don't + * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling + * conventions for vfree() arch-depenedent would be a really bad idea) + * */ void vfree(const void *addr) { - BUG_ON(in_interrupt()); + BUG_ON(in_nmi()); kmemleak_free(addr); - __vunmap(addr, 1); + if (!addr) + return; + if (unlikely(in_interrupt())) { + struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); + llist_add((struct llist_node *)addr, &p->list); + schedule_work(&p->wq); + } else + __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); @@ -1490,7 +1526,8 @@ { BUG_ON(in_interrupt()); might_sleep(); - __vunmap(addr, 0); + if (addr) + __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); @@ -2575,3 +2612,64 @@ module_init(proc_vmalloc_init); #endif +#if defined(CONFIG_AVM_ENHANCED) +/** + */ +static struct vmap_area *__is_addr_in_vmap_area(unsigned long addr) { + struct rb_node *n = vmap_area_root.rb_node; + + while (n) { + struct vmap_area *va; + if(!virt_addr_valid(n)) { + return NULL; + } + va = rb_entry(n, struct vmap_area, rb_node); + if(!virt_addr_valid(va)) { + return NULL; + } + if((addr >= va->va_start) && (addr < va->va_end)) { + return va; + } + if (addr < va->va_start) + n = n->rb_left; + else if (addr > va->va_start) + n = n->rb_right; + } + return NULL; +} +/** + * get vmalloc-area-infos if addr in range + * @return va_start (zero if not exist) + */ +unsigned long get_vmap_area(unsigned long addr, + unsigned long *caller, + unsigned long *size, + unsigned long *vm_flags + ) { + unsigned long flags; + unsigned long _size = 0, _caller = 0, _va_start = 0, _vm_flags = 0; + struct vmap_area *va; + struct vm_struct *vm; + + if(!spin_trylock_irqsave(&vmap_area_lock, flags)) { + return 0; + } + va = __is_addr_in_vmap_area(addr); + if(va && (va->flags & VM_VM_AREA)) { + vm = (struct vm_struct *)va->private; + if(virt_addr_valid(vm)) { + _caller = (unsigned long)vm->caller; + _vm_flags = vm->flags; + } + _va_start = va->va_start; + _size = va->va_end - va->va_start; + } + spin_unlock_irqrestore(&vmap_area_lock, flags); + + if(caller) *caller = _caller; + if(size) *size = _size; + if(vm_flags) *vm_flags = _vm_flags; + return _va_start; +} +#endif /*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ +