--- zzzz-none-000/linux-4.1.38/include/linux/slab.h 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/include/linux/slab.h 2020-11-25 10:06:48.000000000 +0000 @@ -25,6 +25,9 @@ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP) +#define SLAB_CACHE_ACP 0x00008000UL /* Use GFP_ACP memory */ +#endif #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ /* @@ -247,7 +250,6 @@ { if (!size) return 0; - if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; @@ -294,12 +296,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #else -static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node __maybe_unused) { return __kmalloc(size, flags); } -static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) +static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node __maybe_unused) { return kmem_cache_alloc(s, flags); } @@ -316,7 +318,7 @@ static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) + int node __maybe_unused, size_t size) { return kmem_cache_alloc_trace(s, gfpflags, size); }