--- zzzz-none-000/linux-3.10.107/include/linux/slub_def.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/slub_def.h 2021-02-04 17:41:59.000000000 +0000 @@ -6,18 +6,12 @@ * * (C) 2007 SGI, Christoph Lameter */ -#include -#include -#include -#include #include -#include - enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ - FREE_FASTPATH, /* Free to cpu slub */ + FREE_FASTPATH, /* Free to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */ FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ @@ -62,6 +56,10 @@ unsigned long x; }; +#if defined(CONFIG_SYSFS) && !defined(CONFIG_AVM_THIN_SYSFS) +#define SLAB_SUPPORTS_SYSFS +#endif + /* * Slab cache management. */ @@ -87,12 +85,15 @@ int reserved; /* Reserved bytes at the end of slabs */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ -#ifdef CONFIG_SYSFS +#ifdef SLAB_SUPPORTS_SYSFS struct kobject kobj; /* For sysfs */ #endif #ifdef CONFIG_MEMCG_KMEM - struct memcg_cache_params *memcg_params; + struct memcg_cache_params memcg_params; int max_attr_size; /* for propagation, maximum size of a stored attr */ +#ifdef SLAB_SUPPORTS_SYSFS + struct kset *memcg_kset; +#endif #endif #ifdef CONFIG_NUMA @@ -104,108 +105,31 @@ struct kmem_cache_node *node[MAX_NUMNODES]; }; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t); -void *__kmalloc(size_t size, gfp_t flags); - -static __always_inline void * -kmalloc_order(size_t size, gfp_t flags, unsigned int order) -{ - void *ret; - - flags |= (__GFP_COMP | __GFP_KMEMCG); - ret = (void *) __get_free_pages(flags, order); - kmemleak_alloc(ret, size, 1, flags); - return ret; -} - -/** - * Calling this on allocated memory will check that the memory - * is expected to be in use, and print warnings if not. - */ -#ifdef CONFIG_SLUB_DEBUG -extern bool verify_mem_not_deleted(const void *x); -#else -static inline bool verify_mem_not_deleted(const void *x) -{ - return true; -} -#endif - -#ifdef CONFIG_TRACING -extern void * -kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); +#if defined(SLAB_SUPPORTS_SYSFS) +void sysfs_slab_remove(struct kmem_cache *); #else -static __always_inline void * -kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) -{ - return kmem_cache_alloc(s, gfpflags); -} - -static __always_inline void * -kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +static inline void sysfs_slab_remove(struct kmem_cache *s) { - return kmalloc_order(size, flags, order); } #endif -static __always_inline void *kmalloc_large(size_t size, gfp_t flags) -{ - unsigned int order = get_order(size); - return kmalloc_order_trace(size, flags, order); -} - -static __always_inline void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size)) { - if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - - if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); - - if (!index) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc_trace(kmalloc_caches[index], - flags, size); - } - } - return __kmalloc(size, flags); -} - -#ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node); -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -#ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, - gfp_t gfpflags, - int node, size_t size); -#else -static __always_inline void * -kmem_cache_alloc_node_trace(struct kmem_cache *s, - gfp_t gfpflags, - int node, size_t size) +/** + * virt_to_obj - returns address of the beginning of object. + * @s: object's kmem_cache + * @slab_page: address of slab page + * @x: address within object memory range + * + * Returns address of the beginning of object + */ +static inline void *virt_to_obj(struct kmem_cache *s, + const void *slab_page, + const void *x) { - return kmem_cache_alloc_node(s, gfpflags, node); + return (void *)x - ((x - slab_page) % s->size); } -#endif -static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && - size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { - int index = kmalloc_index(size); - - if (!index) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc_node_trace(kmalloc_caches[index], - flags, node, size); - } - return __kmalloc_node(size, flags, node); -} -#endif +void object_err(struct kmem_cache *s, struct page *page, + u8 *object, char *reason); #endif /* _LINUX_SLUB_DEF_H */