--- zzzz-none-000/linux-4.1.52/include/linux/slab.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/include/linux/slab.h 2022-03-02 11:37:13.000000000 +0000 @@ -25,8 +25,13 @@ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP) +#define SLAB_CACHE_ACP 0x00008000UL /* Use GFP_ACP memory */ +#endif #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ +#define SLAB_STORE_USER_LITE 0x00020000UL /* AVM: need for slab_allocator usage */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ +#define SLAB_PANIC_CORRUPTION 0x00080000UL /* Panic if alloc/free consistency check fails */ /* * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! * @@ -247,7 +252,6 @@ { if (!size) return 0; - if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; @@ -294,12 +298,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #else -static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node __maybe_unused) { return __kmalloc(size, flags); } -static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) +static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node __maybe_unused) { return kmem_cache_alloc(s, flags); } @@ -316,7 +320,7 @@ static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) + int node __maybe_unused, size_t size) { return kmem_cache_alloc_trace(s, gfpflags, size); }