linux/lib/stackdepot.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Stack depot - a stack trace storage that avoids duplication.
 *
 * Internally, stack depot maintains a hash table of unique stacktraces. The
 * stack traces themselves are stored contiguously one after another in a set
 * of separate page allocations.
 *
 * Author: Alexander Potapenko <[email protected]>
 * Copyright (C) 2016 Google, Inc.
 *
 * Based on the code by Dmitry Chernenkov.
 */

#define pr_fmt(fmt)

#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kmsan.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/poison.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>

#define DEPOT_POOLS_CAP
/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
#define DEPOT_MAX_POOLS

static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED();
static bool __stack_depot_early_init_passed __initdata;

/* Use one hash table bucket per 16 KB of memory. */
#define STACK_HASH_TABLE_SCALE
/* Limit the number of buckets between 4K and 1M. */
#define STACK_BUCKET_NUMBER_ORDER_MIN
#define STACK_BUCKET_NUMBER_ORDER_MAX
/* Initial seed for jhash2. */
#define STACK_HASH_SEED

/* Hash table of stored stack records. */
static struct list_head *stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;

/* Array of memory regions that store stack records. */
static void *stack_pools[DEPOT_MAX_POOLS];
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
/* Offset to the unused space in the currently used pool. */
static size_t pool_offset =;
/* Freelist of stack records within stack_pools. */
static LIST_HEAD(free_stacks);
/* The lock must be held when performing pool or freelist modifications. */
static DEFINE_RAW_SPINLOCK(pool_lock);

/* Statistics counters for debugfs. */
enum depot_counter_id {};
static long counters[DEPOT_COUNTER_COUNT];
static const char *const counter_names[] =;
static_assert();

static int __init disable_stack_depot(char *str)
{}
early_param();

void __init stack_depot_request_early_init(void)
{}

/* Initialize list_head's within the hash table. */
static void init_stack_table(unsigned long entries)
{}

/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{}

/* Allocates a hash table via kvcalloc. Can be used after boot. */
int stack_depot_init(void)
{}
EXPORT_SYMBOL_GPL();

/*
 * Initializes new stack pool, and updates the list of pools.
 */
static bool depot_init_pool(void **prealloc)
{}

/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
{}

/*
 * Try to initialize a new stack record from the current pool, a cached pool, or
 * the current pre-allocation.
 */
static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{}

/* Try to find next free usable entry from the freelist. */
static struct stack_record *depot_pop_free(void)
{}

static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
{}

/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{}

static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
{}

/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
{}

/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{}

/*
 * Non-instrumented version of memcmp().
 * Does not check the lexicographical order, only the equality.
 */
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
			unsigned int n)
{}

/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct list_head *bucket,
					      unsigned long *entries, int size,
					      u32 hash, depot_flags_t flags)
{}

depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
					    unsigned int nr_entries,
					    gfp_t alloc_flags,
					    depot_flags_t depot_flags)
{}
EXPORT_SYMBOL_GPL();

depot_stack_handle_t stack_depot_save(unsigned long *entries,
				      unsigned int nr_entries,
				      gfp_t alloc_flags)
{}
EXPORT_SYMBOL_GPL();

struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
{}

unsigned int stack_depot_fetch(depot_stack_handle_t handle,
			       unsigned long **entries)
{}
EXPORT_SYMBOL_GPL();

void stack_depot_put(depot_stack_handle_t handle)
{}
EXPORT_SYMBOL_GPL();

void stack_depot_print(depot_stack_handle_t stack)
{}
EXPORT_SYMBOL_GPL();

int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
		       int spaces)
{}
EXPORT_SYMBOL_GPL();

depot_stack_handle_t __must_check stack_depot_set_extra_bits(
			depot_stack_handle_t handle, unsigned int extra_bits)
{}
EXPORT_SYMBOL();

unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{}
EXPORT_SYMBOL();

static int stats_show(struct seq_file *seq, void *v)
{}
DEFINE_SHOW_ATTRIBUTE();

static int depot_debugfs_init(void)
{}
late_initcall(depot_debugfs_init);