--- zzzz-none-000/linux-3.10.107/mm/slab.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/mm/slab.h 2021-02-04 17:41:59.000000000 +0000 @@ -4,6 +4,41 @@ * Internal slab definitions */ +#ifdef CONFIG_SLOB +/* + * Common fields provided in kmem_cache by all slab allocators + * This struct is either used directly by the allocator (SLOB) + * or the allocator must include definitions for all fields + * provided in kmem_cache_common in their definition of kmem_cache. + * + * Once we can do anonymous structs (C11 standard) we could put a + * anonymous struct definition in these allocators so that the + * separate allocations in the kmem_cache structure of SLAB and + * SLUB is no longer needed. + */ +struct kmem_cache { + unsigned int object_size;/* The original size of the object */ + unsigned int size; /* The aligned/padded/added on size */ + unsigned int align; /* Alignment as calculated */ + unsigned long flags; /* Active flags on the slab */ + const char *name; /* Slab name for sysfs */ + int refcount; /* Use counter */ + void (*ctor)(void *); /* Called on object slot creation */ + struct list_head list; /* List of all slab caches on the system */ +}; + +#endif /* CONFIG_SLOB */ + +#ifdef CONFIG_SLAB +#include +#endif + +#ifdef CONFIG_SLUB +#include +#endif + +#include + /* * State of the slab allocator. * @@ -15,7 +50,6 @@ enum slab_state { DOWN, /* No slab functionality yet */ PARTIAL, /* SLUB: kmem_cache_node available */ - PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ UP, /* Slab caches usable but not all extras yet */ FULL /* Everything is working */ @@ -37,6 +71,7 @@ #ifndef CONFIG_SLOB /* Kmalloc array related functions */ +void setup_kmalloc_cache_index_table(void); void create_kmalloc_caches(unsigned long); /* Find the kmalloc slab corresponding for a certain size */ @@ -52,16 +87,29 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, size_t size, unsigned long flags); -struct mem_cgroup; -#ifdef CONFIG_SLUB +int slab_unmergeable(struct kmem_cache *s); +struct kmem_cache *find_mergeable(size_t size, size_t align, + unsigned long flags, const char *name, void (*ctor)(void *)); +#ifndef CONFIG_SLOB struct kmem_cache * -__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)); +__kmem_cache_alias(const char *name, size_t size, size_t align, + unsigned long flags, void (*ctor)(void *)); + +unsigned long kmem_cache_flags(unsigned long object_size, + unsigned long flags, const char *name, + void (*ctor)(void *)); #else static inline struct kmem_cache * -__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +__kmem_cache_alias(const char *name, size_t size, size_t align, + unsigned long flags, void (*ctor)(void *)) { return NULL; } + +static inline unsigned long kmem_cache_flags(unsigned long object_size, + unsigned long flags, const char *name, + void (*ctor)(void *)) +{ + return flags; +} #endif @@ -72,8 +120,11 @@ #if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #elif defined(CONFIG_SLUB_DEBUG) -#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ +#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | \ + SLAB_STORE_USER | SLAB_STORE_USER_LITE | \ SLAB_TRACE | SLAB_DEBUG_FREE) +#elif defined(CONFIG_DEBUG_SLAB_AVM_LITE) +#define SLAB_DEBUG_FLAGS (SLAB_POISON | SLAB_STORE_USER_AND_TIME) #else #define SLAB_DEBUG_FLAGS (0) #endif @@ -91,6 +142,8 @@ #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) int __kmem_cache_shutdown(struct kmem_cache *); +int __kmem_cache_shrink(struct kmem_cache *, bool); +void slab_kmem_cache_release(struct kmem_cache *); struct seq_file; struct file; @@ -113,39 +166,33 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); -#ifdef CONFIG_MEMCG_KMEM -static inline bool is_root_cache(struct kmem_cache *s) -{ - return !s->memcg_params || s->memcg_params->is_root_cache; -} - -static inline bool cache_match_memcg(struct kmem_cache *cachep, - struct mem_cgroup *memcg) -{ - return (is_root_cache(cachep) && !memcg) || - (cachep->memcg_params->memcg == memcg); -} +/* + * Generic implementation of bulk operations + * These are useful for situations in which the allocator cannot + * perform optimizations. In that case segments of the objecct listed + * may be allocated or freed using these operations. + */ +void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); +int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); -static inline void memcg_bind_pages(struct kmem_cache *s, int order) -{ - if (!is_root_cache(s)) - atomic_add(1 << order, &s->memcg_params->nr_pages); -} +#ifdef CONFIG_MEMCG_KMEM +/* + * Iterate over all memcg caches of the given root cache. The caller must hold + * slab_mutex. + */ +#define for_each_memcg_cache(iter, root) \ + list_for_each_entry(iter, &(root)->memcg_params.list, \ + memcg_params.list) -static inline void memcg_release_pages(struct kmem_cache *s, int order) +static inline bool is_root_cache(struct kmem_cache *s) { - if (is_root_cache(s)) - return; - - if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) - mem_cgroup_destroy_cache(s); + return s->memcg_params.is_root_cache; } static inline bool slab_equal_or_root(struct kmem_cache *s, - struct kmem_cache *p) + struct kmem_cache *p) { - return (p == s) || - (s->memcg_params && (p == s->memcg_params->root_cache)); + return p == s || p == s->memcg_params.root_cache; } /* @@ -156,41 +203,64 @@ static inline const char *cache_name(struct kmem_cache *s) { if (!is_root_cache(s)) - return s->memcg_params->root_cache->name; + s = s->memcg_params.root_cache; return s->name; } -static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) +/* + * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. + * That said the caller must assure the memcg's cache won't go away by either + * taking a css reference to the owner cgroup, or holding the slab_mutex. + */ +static inline struct kmem_cache * +cache_from_memcg_idx(struct kmem_cache *s, int idx) { - if (!s->memcg_params) - return NULL; - return s->memcg_params->memcg_caches[idx]; + struct kmem_cache *cachep; + struct memcg_cache_array *arr; + + rcu_read_lock(); + arr = rcu_dereference(s->memcg_params.memcg_caches); + + /* + * Make sure we will access the up-to-date value. The code updating + * memcg_caches issues a write barrier to match this (see + * memcg_create_kmem_cache()). + */ + cachep = lockless_dereference(arr->entries[idx]); + rcu_read_unlock(); + + return cachep; } static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { if (is_root_cache(s)) return s; - return s->memcg_params->root_cache; -} -#else -static inline bool is_root_cache(struct kmem_cache *s) -{ - return true; + return s->memcg_params.root_cache; } -static inline bool cache_match_memcg(struct kmem_cache *cachep, - struct mem_cgroup *memcg) +static __always_inline int memcg_charge_slab(struct page *page, + gfp_t gfp, int order, + struct kmem_cache *s) { - return true; + if (!memcg_kmem_enabled()) + return 0; + if (is_root_cache(s)) + return 0; + return __memcg_kmem_charge_memcg(page, gfp, order, + s->memcg_params.memcg); } -static inline void memcg_bind_pages(struct kmem_cache *s, int order) -{ -} +extern void slab_init_memcg_params(struct kmem_cache *); + +#else /* !CONFIG_MEMCG_KMEM */ -static inline void memcg_release_pages(struct kmem_cache *s, int order) +#define for_each_memcg_cache(iter, root) \ + for ((void)(iter), (void)(root); 0; ) + +static inline bool is_root_cache(struct kmem_cache *s) { + return true; } static inline bool slab_equal_or_root(struct kmem_cache *s, @@ -204,7 +274,8 @@ return s->name; } -static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) +static inline struct kmem_cache * +cache_from_memcg_idx(struct kmem_cache *s, int idx) { return NULL; } @@ -213,7 +284,17 @@ { return s; } -#endif + +static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, + struct kmem_cache *s) +{ + return 0; +} + +static inline void slab_init_memcg_params(struct kmem_cache *s) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) { @@ -236,13 +317,12 @@ return cachep; pr_err("%s: Wrong slab cache. %s but object is from %s\n", - __FUNCTION__, cachep->name, s->name); + __func__, s->name, cachep->name); WARN_ON_ONCE(1); return s; } -#endif - +#ifndef CONFIG_SLOB /* * The slab lists for all objects. */ @@ -257,7 +337,7 @@ unsigned int free_limit; unsigned int colour_next; /* Per-node cache coloring */ struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ + struct alien_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ #endif @@ -273,3 +353,46 @@ #endif }; + +static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) +{ + return s->node[node]; +} + +/* + * Iterator over all nodes. The body will be executed for each node that has + * a kmem_cache_node structure allocated (which is true for all online nodes) + */ +#define for_each_kmem_cache_node(__s, __node, __n) \ + for (__node = 0; __node < nr_node_ids; __node++) \ + if ((__n = get_node(__s, __node))) + +#endif + +void *slab_start(struct seq_file *m, loff_t *pos); +void *slab_next(struct seq_file *m, void *p, loff_t *pos); +void slab_stop(struct seq_file *m, void *p); +int memcg_slab_show(struct seq_file *m, void *p); + +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) || defined(CONFIG_DEBUG_SLAB_AVM_LITE) +extern unsigned int slab_track_time; + +struct _slab_avm_alloc_entry { + unsigned long caller; + unsigned int count; + unsigned long long sum_time; +}; + +struct _slab_avm_topalloclist { + struct _slab_avm_alloc_entry entry[128]; + unsigned int entries; + unsigned int ignored; /*--- not enough entries ---*/ + unsigned long sum_count; /*--- all caller-count + ignored ---*/ +}; + +void cache_avm_topalloc_list(struct _slab_avm_topalloclist *ptoplist, + struct kmem_cache *cachep, + unsigned long tresh_jiffiesdiff); +#endif/*--- #if defined(CONFIG_SLUB_AVM_ALLOC_LIST) ---*/ + +#endif /* MM_SLAB_H */