#ifndef MM_SLAB_H
#define MM_SLAB_H
#include <linux/reciprocal_div.h>
#include <linux/list_lru.h>
#include <linux/local_lock.h>
#include <linux/random.h>
#include <linux/kobject.h>
#include <linux/sched/mm.h>
#include <linux/memcontrol.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
#ifdef CONFIG_64BIT
# ifdef system_has_cmpxchg128
#define system_has_freelist_aba() …
#define try_cmpxchg_freelist …
# endif
#define this_cpu_try_cmpxchg_freelist …
freelist_full_t;
#else
# ifdef system_has_cmpxchg64
#define system_has_freelist_aba …
#define try_cmpxchg_freelist …
# endif
#define this_cpu_try_cmpxchg_freelist …
typedef u64 freelist_full_t;
#endif
#if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
#undef system_has_freelist_aba
#endif
freelist_aba_t;
struct slab { … };
#define SLAB_MATCH …
SLAB_MATCH …;
SLAB_MATCH …;
SLAB_MATCH …;
#ifdef CONFIG_MEMCG
SLAB_MATCH …;
#elif defined(CONFIG_SLAB_OBJ_EXT)
SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
#endif
#undef SLAB_MATCH
static_assert(…);
#if defined(system_has_freelist_aba)
static_assert(…);
#endif
#define folio_slab(folio) …
#define slab_folio(s) …
#define page_slab(p) …
#define slab_page(s) …
static inline bool slab_test_pfmemalloc(const struct slab *slab)
{ … }
static inline void slab_set_pfmemalloc(struct slab *slab)
{ … }
static inline void slab_clear_pfmemalloc(struct slab *slab)
{ … }
static inline void __slab_clear_pfmemalloc(struct slab *slab)
{ … }
static inline void *slab_address(const struct slab *slab)
{ … }
static inline int slab_nid(const struct slab *slab)
{ … }
static inline pg_data_t *slab_pgdat(const struct slab *slab)
{ … }
static inline struct slab *virt_to_slab(const void *addr)
{ … }
static inline int slab_order(const struct slab *slab)
{ … }
static inline size_t slab_size(const struct slab *slab)
{ … }
#ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial …
#define slub_set_percpu_partial …
#define slub_percpu_partial_read_once …
#else
#define slub_percpu_partial(c) …
#define slub_set_percpu_partial(c, p) …
#define slub_percpu_partial_read_once(c) …
#endif
struct kmem_cache_order_objects { … };
struct kmem_cache { … };
#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *s);
void sysfs_slab_release(struct kmem_cache *s);
#else
static inline void sysfs_slab_unlink(struct kmem_cache *s) { … }
static inline void sysfs_slab_release(struct kmem_cache *s) { … }
#endif
void *fixup_red_left(struct kmem_cache *s, void *p);
static inline void *nearest_obj(struct kmem_cache *cache,
const struct slab *slab, void *x)
{ … }
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
void *addr, void *obj)
{ … }
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct slab *slab, void *obj)
{ … }
static inline int objs_per_slab(const struct kmem_cache *cache,
const struct slab *slab)
{ … }
enum slab_state { … };
extern enum slab_state slab_state;
extern struct mutex slab_mutex;
extern struct list_head slab_caches;
extern struct kmem_cache *kmem_cache;
extern const struct kmalloc_info_struct { … } kmalloc_info[];
void setup_kmalloc_cache_index_table(void);
void create_kmalloc_caches(void);
extern u8 kmalloc_size_index[24];
static inline unsigned int size_index_elem(unsigned int bytes)
{ … }
static inline struct kmem_cache *
kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
{ … }
gfp_t kmalloc_fix_flags(gfp_t flags);
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
void __init kmem_cache_init(void);
extern void create_boot_cache(struct kmem_cache *, const char *name,
unsigned int size, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize);
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(unsigned size, unsigned align,
slab_flags_t flags, const char *name, void (*ctor)(void *));
struct kmem_cache *
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *));
slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
static inline bool is_kmalloc_cache(struct kmem_cache *s)
{ … }
#define SLAB_CORE_FLAGS …
#ifdef CONFIG_SLUB_DEBUG
#define SLAB_DEBUG_FLAGS …
#else
#define SLAB_DEBUG_FLAGS …
#endif
#define SLAB_CACHE_FLAGS …
#define CACHE_CREATE_MASK …
#define SLAB_FLAGS_PERMITTED …
bool __kmem_cache_empty(struct kmem_cache *);
int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_release(struct kmem_cache *);
int __kmem_cache_shrink(struct kmem_cache *);
void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
struct file;
struct slabinfo { … };
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
static inline bool __slub_debug_enabled(void)
{
return static_branch_unlikely(&slub_debug_enabled);
}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{ … }
static inline bool __slub_debug_enabled(void)
{ … }
#endif
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{ … }
#ifdef CONFIG_SLAB_OBJ_EXT
static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
{ … }
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab);
#else
static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
{
return NULL;
}
#endif
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
{ … }
#ifdef CONFIG_MEMCG
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
gfp_t flags, size_t size, void **p);
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects, struct slabobj_ext *obj_exts);
#endif
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
{ … }
#ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void);
#else
static inline void dump_unreclaimable_slab(void)
{ … }
#endif
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
#ifdef CONFIG_SLAB_FREELIST_RANDOM
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
gfp_t gfp);
void cache_random_seq_destroy(struct kmem_cache *cachep);
#else
static inline int cache_random_seq_create(struct kmem_cache *cachep,
unsigned int count, gfp_t gfp)
{ … }
static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { … }
#endif
static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
{ … }
static inline bool slab_want_init_on_free(struct kmem_cache *c)
{ … }
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
void debugfs_slab_release(struct kmem_cache *);
#else
static inline void debugfs_slab_release(struct kmem_cache *s) { … }
#endif
#ifdef CONFIG_PRINTK
#define KS_ADDRS_COUNT …
struct kmem_obj_info { … };
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif
void __check_heap_object(const void *ptr, unsigned long n,
const struct slab *slab, bool to_user);
#ifdef CONFIG_SLUB_DEBUG
void skip_orig_size_check(struct kmem_cache *s, const void *object);
#endif
#endif