linux/lib/debugobjects.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Generic infrastructure for lifetime debugging of objects.
 *
 * Copyright (C) 2008, Thomas Gleixner <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/debugobjects.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/kmemleak.h>
#include <linux/cpu.h>

#define ODEBUG_HASH_BITS
#define ODEBUG_HASH_SIZE

#define ODEBUG_POOL_SIZE
#define ODEBUG_POOL_MIN_LEVEL
#define ODEBUG_POOL_PERCPU_SIZE
#define ODEBUG_BATCH_SIZE

#define ODEBUG_CHUNK_SHIFT
#define ODEBUG_CHUNK_SIZE
#define ODEBUG_CHUNK_MASK

/*
 * We limit the freeing of debug objects via workqueue at a maximum
 * frequency of 10Hz and about 1024 objects for each freeing operation.
 * So it is freeing at most 10k debug objects per second.
 */
#define ODEBUG_FREE_WORK_MAX
#define ODEBUG_FREE_WORK_DELAY

struct debug_bucket {};

/*
 * Debug object percpu free list
 * Access is protected by disabling irq
 */
struct debug_percpu_free {};

static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);

static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];

static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;

static DEFINE_RAW_SPINLOCK(pool_lock);

static HLIST_HEAD(obj_pool);
static HLIST_HEAD(obj_to_free);

/*
 * Because of the presence of percpu free pools, obj_pool_free will
 * under-count those in the percpu free pools. Similarly, obj_pool_used
 * will over-count those in the percpu free pools. Adjustments will be
 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
 * can be off.
 */
static int			obj_pool_min_free =;
static int			obj_pool_free =;
static int			obj_pool_used;
static int			obj_pool_max_used;
static bool			obj_freeing;
/* The number of objs on the global free list */
static int			obj_nr_tofree;

static int __data_racy			debug_objects_maxchain __read_mostly;
static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
static int __data_racy			debug_objects_fixups __read_mostly;
static int __data_racy			debug_objects_warnings __read_mostly;
static int __data_racy			debug_objects_enabled __read_mostly
					=;
static int __data_racy			debug_objects_pool_size __read_mostly
					=;
static int __data_racy			debug_objects_pool_min_level __read_mostly
					=;

static const struct debug_obj_descr *descr_test  __read_mostly;
static struct kmem_cache	*obj_cache __ro_after_init;

/*
 * Track numbers of kmem_cache_alloc()/free() calls done.
 */
static int			debug_objects_allocated;
static int			debug_objects_freed;

static void free_obj_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);

static int __init enable_object_debug(char *str)
{}

static int __init disable_object_debug(char *str)
{}

early_param();
early_param();

static const char *obj_states[ODEBUG_STATE_MAX] =;

static void fill_pool(void)
{}

/*
 * Lookup an object in the hash bucket.
 */
static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
{}

/*
 * Allocate a new object from the hlist
 */
static struct debug_obj *__alloc_object(struct hlist_head *list)
{}

static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{}

/*
 * workqueue function to free objects.
 *
 * To reduce contention on the global pool_lock, the actual freeing of
 * debug objects will be delayed if the pool_lock is busy.
 */
static void free_obj_work(struct work_struct *work)
{}

static void __free_object(struct debug_obj *obj)
{}

/*
 * Put the object back into the pool and schedule work to free objects
 * if necessary.
 */
static void free_object(struct debug_obj *obj)
{}

#ifdef CONFIG_HOTPLUG_CPU
static int object_cpu_offline(unsigned int cpu)
{}
#endif

/*
 * We run out of memory. That means we probably have tons of objects
 * allocated.
 */
static void debug_objects_oom(void)
{}

/*
 * We use the pfn of the address for the hash. That way we can check
 * for freed objects simply by checking the affected bucket.
 */
static struct debug_bucket *get_bucket(unsigned long addr)
{}

static void debug_print_object(struct debug_obj *obj, char *msg)
{}

/*
 * Try to repair the damage, so we have a better chance to get useful
 * debug output.
 */
static bool
debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
		   void * addr, enum debug_obj_state state)
{}

static void debug_object_is_on_stack(void *addr, int onstack)
{}

static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
						const struct debug_obj_descr *descr,
						bool onstack, bool alloc_ifstatic)
{}

static void debug_objects_fill_pool(void)
{}

static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{}

/**
 * debug_object_init - debug checks when an object is initialized
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_init(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_init_on_stack - debug checks when an object on stack is
 *				initialized
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_activate - debug checks when an object is activated
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 * Returns 0 for success, -EINVAL for check failed.
 */
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_deactivate - debug checks when an object is deactivated
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_destroy - debug checks when an object is destroyed
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_free - debug checks when an object is freed
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_assert_init - debug checks when object should be init-ed
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 */
void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{}
EXPORT_SYMBOL_GPL();

/**
 * debug_object_active_state - debug checks object usage state machine
 * @addr:	address of the object
 * @descr:	pointer to an object specific debug description structure
 * @expect:	expected state
 * @next:	state to move to if expected state is found
 */
void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
			  unsigned int expect, unsigned int next)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{}

void debug_check_no_obj_freed(const void *address, unsigned long size)
{}
#endif

#ifdef CONFIG_DEBUG_FS

static int debug_stats_show(struct seq_file *m, void *v)
{}
DEFINE_SHOW_ATTRIBUTE();

static int __init debug_objects_init_debugfs(void)
{}
__initcall(debug_objects_init_debugfs);

#else
static inline void debug_objects_init_debugfs(void) { }
#endif

#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST

/* Random data structure for the self test */
struct self_test {};

static __initconst const struct debug_obj_descr descr_type_test;

static bool __init is_static_object(void *addr)
{}

/*
 * fixup_init is called when:
 * - an active object is initialized
 */
static bool __init fixup_init(void *addr, enum debug_obj_state state)
{}

/*
 * fixup_activate is called when:
 * - an active object is activated
 * - an unknown non-static object is activated
 */
static bool __init fixup_activate(void *addr, enum debug_obj_state state)
{}

/*
 * fixup_destroy is called when:
 * - an active object is destroyed
 */
static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
{}

/*
 * fixup_free is called when:
 * - an active object is freed
 */
static bool __init fixup_free(void *addr, enum debug_obj_state state)
{}

static int __init
check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
{}

static __initconst const struct debug_obj_descr descr_type_test =;

static __initdata struct self_test obj =;

static void __init debug_objects_selftest(void)
{}
#else
static inline void debug_objects_selftest(void) { }
#endif

/*
 * Called during early boot to initialize the hash buckets and link
 * the static object pool objects into the poll list. After this call
 * the object tracker is fully operational.
 */
void __init debug_objects_early_init(void)
{}

/*
 * Convert the statically allocated objects to dynamic ones:
 */
static int __init debug_objects_replace_static_objects(void)
{}

/*
 * Called after the kmem_caches are functional to setup a dedicated
 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
 * prevents that the debug code is called on kmem_cache_free() for the
 * debug tracker objects to avoid recursive calls.
 */
void __init debug_objects_mem_init(void)
{}