#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/kfence.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
#include <linux/kmemleak.h>
#include <linux/kasan.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/memcontrol.h>
#include <linux/stackdepot.h>
#include "internal.h"
#include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
enum slab_state slab_state;
LIST_HEAD(…);
DEFINE_MUTEX(…) …;
struct kmem_cache *kmem_cache;
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
slab_caches_to_rcu_destroy_workfn);
#define SLAB_NEVER_MERGE …
#define SLAB_MERGE_SAME …
static bool slab_nomerge = !IS_ENABLED(…);
static int __init setup_slab_nomerge(char *str)
{ … }
static int __init setup_slab_merge(char *str)
{ … }
__setup_param(…);
__setup_param(…);
__setup(…);
__setup(…);
unsigned int kmem_cache_size(struct kmem_cache *s)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, unsigned int size)
{ … }
#else
static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
{
return 0;
}
#endif
static unsigned int calculate_alignment(slab_flags_t flags,
unsigned int align, unsigned int size)
{ … }
int slab_unmergeable(struct kmem_cache *s)
{ … }
struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
slab_flags_t flags, const char *name, void (*ctor)(void *))
{ … }
static struct kmem_cache *create_cache(const char *name,
unsigned int object_size, unsigned int align,
slab_flags_t flags, unsigned int useroffset,
unsigned int usersize, void (*ctor)(void *),
struct kmem_cache *root_cache)
{ … }
struct kmem_cache *
kmem_cache_create_usercopy(const char *name,
unsigned int size, unsigned int align,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{ … }
EXPORT_SYMBOL(…);
struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{ … }
EXPORT_SYMBOL(…);
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
unsigned int useroffset,
unsigned int usersize,
void (*ctor)(void *))
{ … }
EXPORT_SYMBOL(…);
#ifdef SLAB_SUPPORTS_SYSFS
static void kmem_cache_release(struct kmem_cache *s)
{
if (slab_state >= FULL) {
sysfs_slab_unlink(s);
sysfs_slab_release(s);
} else {
slab_kmem_cache_release(s);
}
}
#else
static void kmem_cache_release(struct kmem_cache *s)
{ … }
#endif
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{ … }
static int shutdown_cache(struct kmem_cache *s)
{ … }
void slab_kmem_cache_release(struct kmem_cache *s)
{ … }
void kmem_cache_destroy(struct kmem_cache *s)
{ … }
EXPORT_SYMBOL(…);
int kmem_cache_shrink(struct kmem_cache *cachep)
{ … }
EXPORT_SYMBOL(…);
bool slab_is_available(void)
{ … }
#ifdef CONFIG_PRINTK
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ … }
bool kmem_dump_obj(void *object)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
void __init create_boot_cache(struct kmem_cache *s, const char *name,
unsigned int size, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize)
{ … }
static struct kmem_cache *__init create_kmalloc_cache(const char *name,
unsigned int size,
slab_flags_t flags)
{ … }
kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] __ro_after_init = …;
EXPORT_SYMBOL(…);
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
unsigned long random_kmalloc_seed __ro_after_init;
EXPORT_SYMBOL(random_kmalloc_seed);
#endif
u8 kmalloc_size_index[24] __ro_after_init = …;
size_t kmalloc_size_roundup(size_t size)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_ZONE_DMA
#define KMALLOC_DMA_NAME(sz) …
#else
#define KMALLOC_DMA_NAME …
#endif
#ifdef CONFIG_MEMCG
#define KMALLOC_CGROUP_NAME(sz) …
#else
#define KMALLOC_CGROUP_NAME …
#endif
#ifndef CONFIG_SLUB_TINY
#define KMALLOC_RCL_NAME …
#else
#define KMALLOC_RCL_NAME(sz) …
#endif
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
#define __KMALLOC_RANDOM_CONCAT …
#define KMALLOC_RANDOM_NAME …
#define KMA_RAND_1 …
#define KMA_RAND_2 …
#define KMA_RAND_3 …
#define KMA_RAND_4 …
#define KMA_RAND_5 …
#define KMA_RAND_6 …
#define KMA_RAND_7 …
#define KMA_RAND_8 …
#define KMA_RAND_9 …
#define KMA_RAND_10 …
#define KMA_RAND_11 …
#define KMA_RAND_12 …
#define KMA_RAND_13 …
#define KMA_RAND_14 …
#define KMA_RAND_15 …
#else
#define KMALLOC_RANDOM_NAME(N, sz) …
#endif
#define INIT_KMALLOC_INFO(__size, __short_size) …
const struct kmalloc_info_struct kmalloc_info[] __initconst = …;
void __init setup_kmalloc_cache_index_table(void)
{ … }
static unsigned int __kmalloc_minalign(void)
{ … }
static void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
{ … }
void __init create_kmalloc_caches(void)
{ … }
size_t __ksize(const void *object)
{ … }
gfp_t kmalloc_fix_flags(gfp_t flags)
{ … }
#ifdef CONFIG_SLAB_FREELIST_RANDOM
static void freelist_randomize(unsigned int *list,
unsigned int count)
{
unsigned int rand;
unsigned int i;
for (i = 0; i < count; i++)
list[i] = i;
for (i = count - 1; i > 0; i--) {
rand = get_random_u32_below(i + 1);
swap(list[i], list[rand]);
}
}
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
gfp_t gfp)
{
if (count < 2 || cachep->random_seq)
return 0;
cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
if (!cachep->random_seq)
return -ENOMEM;
freelist_randomize(cachep->random_seq, count);
return 0;
}
void cache_random_seq_destroy(struct kmem_cache *cachep)
{
kfree(cachep->random_seq);
cachep->random_seq = NULL;
}
#endif
#ifdef CONFIG_SLUB_DEBUG
#define SLABINFO_RIGHTS …
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
seq_putc(m, '\n');
}
static void *slab_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&slab_mutex);
return seq_list_start(&slab_caches, *pos);
}
static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}
static void slab_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}
static void cache_show(struct kmem_cache *s, struct seq_file *m)
{
struct slabinfo sinfo;
memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(s, &sinfo);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
s->name, sinfo.active_objs, sinfo.num_objs, s->size,
sinfo.objects_per_slab, (1 << sinfo.cache_order));
seq_printf(m, " : tunables %4u %4u %4u",
sinfo.limit, sinfo.batchcount, sinfo.shared);
seq_printf(m, " : slabdata %6lu %6lu %6lu",
sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
seq_putc(m, '\n');
}
static int slab_show(struct seq_file *m, void *p)
{
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
if (p == slab_caches.next)
print_slabinfo_header(m);
cache_show(s, m);
return 0;
}
void dump_unreclaimable_slab(void)
{
struct kmem_cache *s;
struct slabinfo sinfo;
if (!mutex_trylock(&slab_mutex)) {
pr_warn("excessive unreclaimable slab but cannot dump stats\n");
return;
}
pr_info("Unreclaimable slab info:\n");
pr_info("Name Used Total\n");
list_for_each_entry(s, &slab_caches, list) {
if (s->flags & SLAB_RECLAIM_ACCOUNT)
continue;
get_slabinfo(s, &sinfo);
if (sinfo.num_objs > 0)
pr_info("%-17s %10luKB %10luKB\n", s->name,
(sinfo.active_objs * s->size) / 1024,
(sinfo.num_objs * s->size) / 1024);
}
mutex_unlock(&slab_mutex);
}
static const struct seq_operations slabinfo_op = {
.start = slab_start,
.next = slab_next,
.stop = slab_stop,
.show = slab_show,
};
static int slabinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &slabinfo_op);
}
static const struct proc_ops slabinfo_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = slabinfo_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
static int __init slab_proc_init(void)
{
proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
return 0;
}
module_init(slab_proc_init);
#endif
static __always_inline __realloc_size(2) void *
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
{ … }
void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
{ … }
EXPORT_SYMBOL(…);
void kfree_sensitive(const void *p)
{ … }
EXPORT_SYMBOL(…);
size_t ksize(const void *objp)
{ … }
EXPORT_SYMBOL(…);
EXPORT_TRACEPOINT_SYMBOL(…);
EXPORT_TRACEPOINT_SYMBOL(…);
EXPORT_TRACEPOINT_SYMBOL(…);
EXPORT_TRACEPOINT_SYMBOL(…);