--- zzzz-none-000/linux-4.9.218/mm/slab.h 2020-04-02 15:20:41.000000000 +0000 +++ seale-7590ac-750/linux-4.9.218/mm/slab.h 2022-11-30 09:46:20.000000000 +0000 @@ -125,7 +125,8 @@ #if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #elif defined(CONFIG_SLUB_DEBUG) -#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ +#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | \ + SLAB_STORE_USER | SLAB_STORE_USER_LITE | \ SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) #else #define SLAB_DEBUG_FLAGS (0) @@ -376,7 +377,8 @@ * back there or track user information then we can * only use the space before that information. */ - if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) + if (s->flags & (SLAB_DESTROY_BY_RCU | + SLAB_STORE_USER | SLAB_STORE_USER_LITE)) return s->inuse; /* * Else we can use all the padding etc for the allocation @@ -489,4 +491,26 @@ static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } #endif /* CONFIG_SLAB_FREELIST_RANDOM */ +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) +extern unsigned int slab_track_time; + +struct _slab_avm_alloc_entry { + unsigned long caller; + unsigned int count; + unsigned long long sum_time; +}; + +struct _slab_avm_topalloclist { + struct _slab_avm_alloc_entry entry[128]; + struct _slab_avm_alloc_entry *last_entry; + unsigned int entries; + unsigned int ignored; /* not enough entries */ + unsigned long sum_count; /* all caller-count + ignored */ + bool force_watchdog_touch; +}; + +void cache_avm_topalloc_list(struct _slab_avm_topalloclist *ptoplist, + struct kmem_cache *cachep); +#endif/*--- #if defined(CONFIG_SLUB_AVM_ALLOC_LIST) ---*/ + #endif /* MM_SLAB_H */