// SPDX-License-Identifier: GPL-2.0-only /* * mm/kmemleak.c * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <[email protected]> * * For more information on the algorithm and kmemleak usage, please see * Documentation/dev-tools/kmemleak.rst. * * Notes on locking * ---------------- * * The following locks and mutexes are used by kmemleak: * * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as * del_state modifications and accesses to the object trees * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The * object_list is the main list holding the metadata (struct * kmemleak_object) for the allocated memory blocks. The object trees are * red black trees used to look-up metadata based on a pointer to the * corresponding memory block. The kmemleak_object structures are added to * the object_list and the object tree root in the create_object() function * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object. * Accesses to the metadata (e.g. count) are protected by this lock. Note * that some members of this structure may be protected by other means * (atomic or kmemleak_lock). This lock is also held when scanning the * corresponding memory block to avoid the kernel freeing it via the * kmemleak_free() callback. This is less heavyweight than holding a global * lock like kmemleak_lock during scanning. * - scan_mutex (mutex): ensures that only one thread may scan the memory for * unreferenced objects at a time. The gray_list contains the objects which * are already referenced or marked as false positives and need to be * scanned. This list is only modified during a scanning episode when the * scan_mutex is held. At the end of a scan, the gray_list is always empty. * Note that the kmemleak_object.use_count is incremented when an object is * added to the gray_list and therefore cannot be freed. This mutex also * prevents multiple users of the "kmemleak" debugfs file together with * modifications to the memory scanning parameters including the scan_thread * pointer * * Locks and mutexes are acquired/nested in the following order: * * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) * * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex * regions. * * The kmemleak_object structures have a use_count incremented or decremented * using the get_object()/put_object() functions. When the use_count becomes * 0, this count can no longer be incremented and put_object() schedules the * kmemleak_object freeing via an RCU callback. All calls to the get_object() * function must be protected by rcu_read_lock() to avoid accessing a freed * structure. */ #define pr_fmt(fmt) … #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/kthread.h> #include <linux/rbtree.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #include <linux/cache.h> #include <linux/percpu.h> #include <linux/memblock.h> #include <linux/pfn.h> #include <linux/mmzone.h> #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/err.h> #include <linux/uaccess.h> #include <linux/string.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <asm/sections.h> #include <asm/processor.h> #include <linux/atomic.h> #include <linux/kasan.h> #include <linux/kfence.h> #include <linux/kmemleak.h> #include <linux/memory_hotplug.h> /* * Kmemleak configuration and common defines. */ #define MAX_TRACE … #define MSECS_MIN_AGE … #define SECS_FIRST_SCAN … #define SECS_SCAN_WAIT … #define MAX_SCAN_SIZE … #define BYTES_PER_POINTER … /* scanning area inside a memory block */ struct kmemleak_scan_area { … }; #define KMEMLEAK_GREY … #define KMEMLEAK_BLACK … /* * Structure holding the metadata for each allocated memory block. * Modifications to such objects should be made while holding the * object->lock. Insertions or deletions from object_list, gray_list or * rb_node are already protected by the corresponding locks or mutex (see * the notes on locking above). These objects are reference-counted * (use_count) and freed using the RCU mechanism. */ struct kmemleak_object { … }; /* flag representing the memory block allocation status */ #define OBJECT_ALLOCATED … /* flag set after the first reporting of an unreference object */ #define OBJECT_REPORTED … /* flag set to not scan the object */ #define OBJECT_NO_SCAN … /* flag set to fully scan the object when scan_area allocation failed */ #define OBJECT_FULL_SCAN … /* flag set for object allocated with physical address */ #define OBJECT_PHYS … /* flag set for per-CPU pointers */ #define OBJECT_PERCPU … /* set when __remove_object() called */ #define DELSTATE_REMOVED … /* set to temporarily prevent deletion from object_list */ #define DELSTATE_NO_DELETE … #define HEX_PREFIX … /* number of bytes to print per line; must be 16 or 32 */ #define HEX_ROW_SIZE … /* number of bytes to print at a time (1, 2, 4, 8) */ #define HEX_GROUP_SIZE … /* include ASCII after the hex output */ #define HEX_ASCII … /* max number of lines to be printed */ #define HEX_MAX_LINES … /* the list of all allocated objects */ static LIST_HEAD(object_list); /* the list of gray-colored objects (see color_gray comment below) */ static LIST_HEAD(gray_list); /* memory pool allocation */ static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; static int mem_pool_free_count = …; static LIST_HEAD(mem_pool_free_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = …; /* search tree for object (with OBJECT_PHYS flag) boundaries */ static struct rb_root object_phys_tree_root = …; /* search tree for object (with OBJECT_PERCPU flag) boundaries */ static struct rb_root object_percpu_tree_root = …; /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */ static DEFINE_RAW_SPINLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; static struct kmem_cache *scan_area_cache; /* set if tracing memory operations is enabled */ static int kmemleak_enabled = …; /* same as above but only for the kmemleak_free() callback */ static int kmemleak_free_enabled = …; /* set in the late_initcall if there were no errors */ static int kmemleak_late_initialized; /* set if a kmemleak warning was issued */ static int kmemleak_warning; /* set if a fatal kmemleak error has occurred */ static int kmemleak_error; /* minimum and maximum address that may be valid pointers */ static unsigned long min_addr = …; static unsigned long max_addr; static struct task_struct *scan_thread; /* used to avoid reporting of recently allocated objects */ static unsigned long jiffies_min_age; static unsigned long jiffies_last_scan; /* delay between automatic memory scannings */ static unsigned long jiffies_scan_wait; /* enables or disables the task stacks scanning */ static int kmemleak_stack_scan = …; /* protects the memory scanning, parameters and debug/kmemleak file access */ static DEFINE_MUTEX(scan_mutex); /* setting kmemleak=on, will set this var, skipping the disable */ static int kmemleak_skip_disable; /* If there are leaks that can be reported */ static bool kmemleak_found_leaks; static bool kmemleak_verbose; module_param_named(verbose, kmemleak_verbose, bool, 0600); static void kmemleak_disable(void); /* * Print a warning and dump the stack trace. */ #define kmemleak_warn(x...) … /* * Macro invoked when a serious kmemleak condition occurred and cannot be * recovered from. Kmemleak will be disabled and further allocation/freeing * tracing no longer available. */ #define kmemleak_stop(x...) … #define warn_or_seq_printf(seq, fmt, ...) … static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { … } /* * Printing of the objects hex dump to the seq file. The number of lines to be * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called * with the object->lock held. */ static void hex_dump_object(struct seq_file *seq, struct kmemleak_object *object) { … } /* * Object colors, encoded with count and min_count: * - white - orphan object, not enough references to it (count < min_count) * - gray - not orphan, not marked as false positive (min_count == 0) or * sufficient references to it (count >= min_count) * - black - ignore, it doesn't contain references (e.g. text section) * (min_count == -1). No function defined for this color. * Newly created objects don't have any color assigned (object->count == -1) * before the next memory scan when they become white. */ static bool color_white(const struct kmemleak_object *object) { … } static bool color_gray(const struct kmemleak_object *object) { … } /* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by * pointers temporarily stored in CPU registers. */ static bool unreferenced_object(struct kmemleak_object *object) { … } /* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. */ static void print_unreferenced(struct seq_file *seq, struct kmemleak_object *object) { … } /* * Print the kmemleak_object information. This function is used mainly for * debugging special cases when kmemleak operations. It must be called with * the object->lock held. */ static void dump_object_info(struct kmemleak_object *object) { … } static struct rb_root *object_tree(unsigned long objflags) { … } /* * Look-up a memory block metadata (kmemleak_object) in the object search * tree based on a pointer value. If alias is 0, only values pointing to the * beginning of the memory block are allowed. The kmemleak_lock must be held * when calling this function. */ static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias, unsigned int objflags) { … } /* Look-up a kmemleak object which allocated with virtual address. */ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) { … } /* * Increment the object use_count. Return 1 if successful or 0 otherwise. Note * that once an object's use_count reached 0, the RCU freeing was already * registered and the object should no longer be used. This function must be * called under the protection of rcu_read_lock(). */ static int get_object(struct kmemleak_object *object) { … } /* * Memory pool allocation and freeing. kmemleak_lock must not be held. */ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) { … } /* * Return the object to either the slab allocator or the memory pool. */ static void mem_pool_free(struct kmemleak_object *object) { … } /* * RCU callback to free a kmemleak_object. */ static void free_object_rcu(struct rcu_head *rcu) { … } /* * Decrement the object use_count. Once the count is 0, free the object using * an RCU callback. Since put_object() may be called via the kmemleak_free() -> * delete_object() path, the delayed RCU freeing ensures that there is no * recursive call to the kernel allocator. Lock-less RCU object_list traversal * is also possible. */ static void put_object(struct kmemleak_object *object) { … } /* * Look up an object in the object search tree and increase its use_count. */ static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias, unsigned int objflags) { … } /* Look up and get an object which allocated with virtual address. */ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) { … } /* * Remove an object from its object tree and object_list. Must be called with * the kmemleak_lock held _if_ kmemleak is still enabled. */ static void __remove_object(struct kmemleak_object *object) { … } static struct kmemleak_object *__find_and_remove_object(unsigned long ptr, int alias, unsigned int objflags) { … } /* * Look up an object in the object search tree and remove it from both object * tree root and object_list. The returned object's use_count should be at * least 1, as initially set by create_object(). */ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias, unsigned int objflags) { … } static noinline depot_stack_handle_t set_track_prepare(void) { … } static struct kmemleak_object *__alloc_object(gfp_t gfp) { … } static int __link_object(struct kmemleak_object *object, unsigned long ptr, size_t size, int min_count, unsigned int objflags) { … } /* * Create the metadata (struct kmemleak_object) corresponding to an allocated * memory block and add it to the object_list and object tree. */ static void __create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp, unsigned int objflags) { … } /* Create kmemleak object which allocated with virtual address. */ static void create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { … } /* Create kmemleak object which allocated with physical address. */ static void create_object_phys(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { … } /* Create kmemleak object corresponding to a per-CPU allocation. */ static void create_object_percpu(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { … } /* * Mark the object as not allocated and schedule RCU freeing via put_object(). */ static void __delete_object(struct kmemleak_object *object) { … } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. */ static void delete_object_full(unsigned long ptr, unsigned int objflags) { … } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. If the memory block is partially freed, the function may create * additional metadata for the remaining parts of the block. */ static void delete_object_part(unsigned long ptr, size_t size, unsigned int objflags) { … } static void __paint_it(struct kmemleak_object *object, int color) { … } static void paint_it(struct kmemleak_object *object, int color) { … } static void paint_ptr(unsigned long ptr, int color, unsigned int objflags) { … } /* * Mark an object permanently as gray-colored so that it can no longer be * reported as a leak. This is used in general to mark a false positive. */ static void make_gray_object(unsigned long ptr) { … } /* * Mark the object as black-colored so that it is ignored from scans and * reporting. */ static void make_black_object(unsigned long ptr, unsigned int objflags) { … } /* * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { … } /* * Any surplus references (object already gray) to 'ptr' are passed to * 'excess_ref'. This is used in the vmalloc() case where a pointer to * vm_struct may be used as an alternative reference to the vmalloc'ed object * (see free_thread_stack()). */ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) { … } /* * Set the OBJECT_NO_SCAN flag for the object corresponding to the give * pointer. Such object will not be scanned by kmemleak but references to it * are searched. */ static void object_no_scan(unsigned long ptr) { … } /** * kmemleak_alloc - register a newly allocated object * @ptr: pointer to beginning of the object * @size: size of the object * @min_count: minimum number of references to this object. If during memory * scanning a number of references less than @min_count is found, * the object is reported as a memory leak. If @min_count is 0, * the object is never reported as a leak. If @min_count is -1, * the object is ignored (not scanned and not reported as a leak) * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is called from the kernel allocators when a new object * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.). */ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_alloc_percpu - register a newly allocated __percpu object * @ptr: __percpu pointer to beginning of the object * @size: size of the object * @gfp: flags used for kmemleak internal memory allocations * * This function is called from the kernel percpu allocator when a new object * (memory block) is allocated (alloc_percpu). */ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_vmalloc - register a newly vmalloc'ed object * @area: pointer to vm_struct * @size: size of the object * @gfp: __vmalloc() flags used for kmemleak internal memory allocations * * This function is called from the vmalloc() kernel allocator when a new * object (memory block) is allocated. */ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_free - unregister a previously registered object * @ptr: pointer to beginning of the object * * This function is called from the kernel allocators when an object (memory * block) is freed (kmem_cache_free, kfree, vfree etc.). */ void __ref kmemleak_free(const void *ptr) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_free_part - partially unregister a previously registered object * @ptr: pointer to the beginning or inside the object. This also * represents the start of the range to be freed * @size: size to be unregistered * * This function is called when only a part of a memory block is freed * (usually from the bootmem allocator). */ void __ref kmemleak_free_part(const void *ptr, size_t size) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_free_percpu - unregister a previously registered __percpu object * @ptr: __percpu pointer to beginning of the object * * This function is called from the kernel percpu allocator when an object * (memory block) is freed (free_percpu). */ void __ref kmemleak_free_percpu(const void __percpu *ptr) { … } EXPORT_SYMBOL_GPL(…); /** * kmemleak_update_trace - update object allocation stack trace * @ptr: pointer to beginning of the object * * Override the object allocation stack trace for cases where the actual * allocation place is not always useful. */ void __ref kmemleak_update_trace(const void *ptr) { … } EXPORT_SYMBOL(…); /** * kmemleak_not_leak - mark an allocated object as false positive * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to no longer * be reported as leak and always be scanned. */ void __ref kmemleak_not_leak(const void *ptr) { … } EXPORT_SYMBOL(…); /** * kmemleak_ignore - ignore an allocated object * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to be * ignored (not scanned and not reported as a leak). This is usually done when * it is known that the corresponding block is not a leak and does not contain * any references to other allocated memory blocks. */ void __ref kmemleak_ignore(const void *ptr) { … } EXPORT_SYMBOL(…); /** * kmemleak_scan_area - limit the range to be scanned in an allocated object * @ptr: pointer to beginning or inside the object. This also * represents the start of the scan area * @size: size of the scan area * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is used when it is known that only certain parts of an object * contain references to other objects. Kmemleak will only scan these areas * reducing the number false negatives. */ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { … } EXPORT_SYMBOL(…); /** * kmemleak_no_scan - do not scan an allocated object * @ptr: pointer to beginning of the object * * This function notifies kmemleak not to scan the given memory block. Useful * in situations where it is known that the given object does not contain any * references to other objects. Kmemleak will not scan such objects reducing * the number of false negatives. */ void __ref kmemleak_no_scan(const void *ptr) { … } EXPORT_SYMBOL(…); /** * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical * address argument * @phys: physical address of the object * @size: size of the object * @gfp: kmalloc() flags used for kmemleak internal memory allocations */ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp) { … } EXPORT_SYMBOL(…); /** * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a * physical address argument * @phys: physical address if the beginning or inside an object. This * also represents the start of the range to be freed * @size: size to be unregistered */ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) { … } EXPORT_SYMBOL(…); /** * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical * address argument * @phys: physical address of the object */ void __ref kmemleak_ignore_phys(phys_addr_t phys) { … } EXPORT_SYMBOL(…); /* * Update an object's checksum and return true if it was modified. */ static bool update_checksum(struct kmemleak_object *object) { … } /* * Update an object's references. object->lock must be held by the caller. */ static void update_refs(struct kmemleak_object *object) { … } /* * Memory scanning is a long process and it needs to be interruptible. This * function checks whether such interrupt condition occurred. */ static int scan_should_stop(void) { … } /* * Scan a memory block (exclusive range) for valid pointers and add those * found to the gray list. */ static void scan_block(void *_start, void *_end, struct kmemleak_object *scanned) { … } /* * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. */ #ifdef CONFIG_SMP static void scan_large_block(void *start, void *end) { … } #endif /* * Scan a memory block corresponding to a kmemleak_object. A condition is * that object->use_count >= 1. */ static void scan_object(struct kmemleak_object *object) { … } /* * Scan the objects already referenced (gray objects). More objects will be * referenced and, if there are no memory leaks, all the objects are scanned. */ static void scan_gray_list(void) { … } /* * Conditionally call resched() in an object iteration loop while making sure * that the given object won't go away without RCU read lock by performing a * get_object() if necessaary. */ static void kmemleak_cond_resched(struct kmemleak_object *object) { … } /* * Scan data sections and all the referenced memory blocks allocated via the * kernel's standard allocators. This function must be called with the * scan_mutex held. */ static void kmemleak_scan(void) { … } /* * Thread function performing automatic memory scanning. Unreferenced objects * at the end of a memory scan are reported but only the first time. */ static int kmemleak_scan_thread(void *arg) { … } /* * Start the automatic memory scanning thread. This function must be called * with the scan_mutex held. */ static void start_scan_thread(void) { … } /* * Stop the automatic memory scanning thread. */ static void stop_scan_thread(void) { … } /* * Iterate over the object_list and return the first valid object at or after * the required position with its use_count incremented. The function triggers * a memory scanning when the pos argument points to the first position. */ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) { … } /* * Return the next object in the object_list. The function decrements the * use_count of the previous object and increases that of the next one. */ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) { … } /* * Decrement the use_count of the last object required, if any. */ static void kmemleak_seq_stop(struct seq_file *seq, void *v) { … } /* * Print the information for an unreferenced object to the seq file. */ static int kmemleak_seq_show(struct seq_file *seq, void *v) { … } static const struct seq_operations kmemleak_seq_ops = …; static int kmemleak_open(struct inode *inode, struct file *file) { … } static int dump_str_object_info(const char *str) { … } /* * We use grey instead of black to ensure we can do future scans on the same * objects. If we did not do future scans these black objects could * potentially contain references to newly allocated objects in the future and * we'd end up with false positives. */ static void kmemleak_clear(void) { … } static void __kmemleak_do_cleanup(void); /* * File write operation to configure kmemleak at run-time. The following * commands can be written to the /sys/kernel/debug/kmemleak file: * off - disable kmemleak (irreversible) * stack=on - enable the task stacks scanning * stack=off - disable the tasks stacks scanning * scan=on - start the automatic memory scanning thread * scan=off - stop the automatic memory scanning thread * scan=... - set the automatic memory scanning period in seconds (0 to * disable it) * scan - trigger a memory scan * clear - mark all current reported unreferenced kmemleak objects as * grey to ignore printing them, or free all kmemleak objects * if kmemleak has been disabled. * dump=... - dump information about the object found at the given address */ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { … } static const struct file_operations kmemleak_fops = …; static void __kmemleak_do_cleanup(void) { … } /* * Stop the memory scanning thread and free the kmemleak internal objects if * no previous scan thread (otherwise, kmemleak may still have some useful * information on memory leaks). */ static void kmemleak_do_cleanup(struct work_struct *work) { … } static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); /* * Disable kmemleak. No memory allocation/freeing will be traced once this * function is called. Disabling kmemleak is an irreversible operation. */ static void kmemleak_disable(void) { … } /* * Allow boot-time kmemleak disabling (enabled by default). */ static int __init kmemleak_boot_config(char *str) { … } early_param(…); /* * Kmemleak initialization. */ void __init kmemleak_init(void) { … } /* * Late initialization function. */ static int __init kmemleak_late_init(void) { … } late_initcall(kmemleak_late_init);