linux/kernel/module/main.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (C) 2002 Richard Henderson
 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
 * Copyright (C) 2023 Luis Chamberlain <[email protected]>
 */

#define INCLUDE_VERMAGIC

#include <linux/export.h>
#include <linux/extable.h>
#include <linux/moduleloader.h>
#include <linux/module_signature.h>
#include <linux/trace_events.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/buildid.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kernel_read_file.h>
#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/set_memory.h>
#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#include <linux/livepatch.h>
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
#include <linux/dynamic_debug.h>
#include <linux/audit.h>
#include <linux/cfi.h>
#include <linux/codetag.h>
#include <linux/debugfs.h>
#include <linux/execmem.h>
#include <uapi/linux/module.h>
#include "internal.h"

#define CREATE_TRACE_POINTS
#include <trace/events/module.h>

/*
 * Mutex protects:
 * 1) List of modules (also safely readable with preempt_disable),
 * 2) module_use links,
 * 3) mod_tree.addr_min/mod_tree.addr_max.
 * (delete and add uses RCU list operations).
 */
DEFINE_MUTEX();
LIST_HEAD();

/* Work queue for freeing init sections in success case */
static void do_free_init(struct work_struct *w);
static DECLARE_WORK(init_free_wq, do_free_init);
static LLIST_HEAD(init_free_list);

struct mod_tree_root mod_tree __cacheline_aligned =;

struct symsearch {};

/*
 * Bounds of module memory, for speeding up __module_address.
 * Protected by module_mutex.
 */
static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base,
				unsigned int size, struct mod_tree_root *tree)
{}

static void mod_update_bounds(struct module *mod)
{}

/* Block module loading/unloading? */
int modules_disabled;
core_param();

/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);

static BLOCKING_NOTIFIER_HEAD(module_notify_list);

int register_module_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

int unregister_module_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

/*
 * We require a truly strong try_module_get(): 0 means success.
 * Otherwise an error is returned due to ongoing or failed
 * initialization etc.
 */
static inline int strong_try_module_get(struct module *mod)
{}

static inline void add_taint_module(struct module *mod, unsigned flag,
				    enum lockdep_ok lockdep_ok)
{}

/*
 * A thread that wants to hold a reference to a module only while it
 * is running can call this to safely exit.
 */
void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
{}
EXPORT_SYMBOL();

/* Find a module section: 0 means not found. */
static unsigned int find_sec(const struct load_info *info, const char *name)
{}

/* Find a module section, or NULL. */
static void *section_addr(const struct load_info *info, const char *name)
{}

/* Find a module section, or NULL.  Fill in number of "objects" in section. */
static void *section_objs(const struct load_info *info,
			  const char *name,
			  size_t object_size,
			  unsigned int *num)
{}

/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
static unsigned int find_any_sec(const struct load_info *info, const char *name)
{}

/*
 * Find a module section, or NULL. Fill in number of "objects" in section.
 * Ignores SHF_ALLOC flag.
 */
static __maybe_unused void *any_section_objs(const struct load_info *info,
					     const char *name,
					     size_t object_size,
					     unsigned int *num)
{}

#ifndef CONFIG_MODVERSIONS
#define symversion
#else
#define symversion(base, idx)
#endif

static const char *kernel_symbol_name(const struct kernel_symbol *sym)
{}

static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
{}

int cmp_name(const void *name, const void *sym)
{}

static bool find_exported_symbol_in_section(const struct symsearch *syms,
					    struct module *owner,
					    struct find_symbol_arg *fsa)
{}

/*
 * Find an exported symbol and return it, along with, (optional) crc and
 * (optional) module which owns it.  Needs preempt disabled or module_mutex.
 */
bool find_symbol(struct find_symbol_arg *fsa)
{}

/*
 * Search for module by name: must hold module_mutex (or preempt disabled
 * for read-only access).
 */
struct module *find_module_all(const char *name, size_t len,
			       bool even_unformed)
{}

struct module *find_module(const char *name)
{}

#ifdef CONFIG_SMP

static inline void __percpu *mod_percpu(struct module *mod)
{}

static int percpu_modalloc(struct module *mod, struct load_info *info)
{}

static void percpu_modfree(struct module *mod)
{}

static unsigned int find_pcpusec(struct load_info *info)
{}

static void percpu_modcopy(struct module *mod,
			   const void *from, unsigned long size)
{}

bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{}

/**
 * is_module_percpu_address() - test whether address is from module static percpu
 * @addr: address to test
 *
 * Test whether @addr belongs to module static percpu area.
 *
 * Return: %true if @addr is from module static percpu area
 */
bool is_module_percpu_address(unsigned long addr)
{}

#else /* ... !CONFIG_SMP */

static inline void __percpu *mod_percpu(struct module *mod)
{
	return NULL;
}
static int percpu_modalloc(struct module *mod, struct load_info *info)
{
	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
	if (info->sechdrs[info->index.pcpu].sh_size != 0)
		return -ENOMEM;
	return 0;
}
static inline void percpu_modfree(struct module *mod)
{
}
static unsigned int find_pcpusec(struct load_info *info)
{
	return 0;
}
static inline void percpu_modcopy(struct module *mod,
				  const void *from, unsigned long size)
{
	/* pcpusec should be 0, and size of that section should be 0. */
	BUG_ON(size != 0);
}
bool is_module_percpu_address(unsigned long addr)
{
	return false;
}

bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{
	return false;
}

#endif /* CONFIG_SMP */

#define MODINFO_ATTR(field)

MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);

static struct {} last_unloaded_module;

#ifdef CONFIG_MODULE_UNLOAD

EXPORT_TRACEPOINT_SYMBOL();

/* MODULE_REF_BASE is the base reference count by kmodule loader. */
#define MODULE_REF_BASE

/* Init the unload section of the module. */
static int module_unload_init(struct module *mod)
{}

/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{}

/*
 * Module a uses b
 *  - we add 'a' as a "source", 'b' as a "target" of module use
 *  - the module_use is added to the list of 'b' sources (so
 *    'b' can walk the list to see who sourced them), and of 'a'
 *    targets (so 'a' can see what modules it targets).
 */
static int add_module_usage(struct module *a, struct module *b)
{}

/* Module a uses b: caller needs module_mutex() */
static int ref_module(struct module *a, struct module *b)
{}

/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
{}

#ifdef CONFIG_MODULE_FORCE_UNLOAD
static inline int try_force_unload(unsigned int flags)
{}
#else
static inline int try_force_unload(unsigned int flags)
{
	return 0;
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */

/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref(struct module *mod)
{}

static int try_stop_module(struct module *mod, int flags, int *forced)
{}

/**
 * module_refcount() - return the refcount or -1 if unloading
 * @mod:	the module we're checking
 *
 * Return:
 *	-1 if the module is in the process of unloading
 *	otherwise the number of references in the kernel to the module
 */
int module_refcount(struct module *mod)
{}
EXPORT_SYMBOL();

/* This exists whether we can unload or not */
static void free_module(struct module *mod);

SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
		unsigned int, flags)
{}

void __symbol_put(const char *symbol)
{}
EXPORT_SYMBOL();

/* Note this assumes addr is a function, which it currently always is. */
void symbol_put_addr(void *addr)
{}
EXPORT_SYMBOL_GPL();

static ssize_t show_refcnt(struct module_attribute *mattr,
			   struct module_kobject *mk, char *buffer)
{}

static struct module_attribute modinfo_refcnt =;

void __module_get(struct module *module)
{}
EXPORT_SYMBOL();

bool try_module_get(struct module *module)
{}
EXPORT_SYMBOL();

void module_put(struct module *module)
{}
EXPORT_SYMBOL();

#else /* !CONFIG_MODULE_UNLOAD */
static inline void module_unload_free(struct module *mod)
{
}

static int ref_module(struct module *a, struct module *b)
{
	return strong_try_module_get(b);
}

static inline int module_unload_init(struct module *mod)
{
	return 0;
}
#endif /* CONFIG_MODULE_UNLOAD */

size_t module_flags_taint(unsigned long taints, char *buf)
{}

static ssize_t show_initstate(struct module_attribute *mattr,
			      struct module_kobject *mk, char *buffer)
{}

static struct module_attribute modinfo_initstate =;

static ssize_t store_uevent(struct module_attribute *mattr,
			    struct module_kobject *mk,
			    const char *buffer, size_t count)
{}

struct module_attribute module_uevent =;

static ssize_t show_coresize(struct module_attribute *mattr,
			     struct module_kobject *mk, char *buffer)
{}

static struct module_attribute modinfo_coresize =;

#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
static ssize_t show_datasize(struct module_attribute *mattr,
			     struct module_kobject *mk, char *buffer)
{
	unsigned int size = 0;

	for_class_mod_mem_type(type, core_data)
		size += mk->mod->mem[type].size;
	return sprintf(buffer, "%u\n", size);
}

static struct module_attribute modinfo_datasize =
	__ATTR(datasize, 0444, show_datasize, NULL);
#endif

static ssize_t show_initsize(struct module_attribute *mattr,
			     struct module_kobject *mk, char *buffer)
{}

static struct module_attribute modinfo_initsize =;

static ssize_t show_taint(struct module_attribute *mattr,
			  struct module_kobject *mk, char *buffer)
{}

static struct module_attribute modinfo_taint =;

struct module_attribute *modinfo_attrs[] =;

size_t modinfo_attrs_count =;

static const char vermagic[] =;

int try_to_force_load(struct module *mod, const char *reason)
{}

/* Parse tag=value strings from .modinfo section */
char *module_next_tag_pair(char *string, unsigned long *secsize)
{}

static char *get_next_modinfo(const struct load_info *info, const char *tag,
			      char *prev)
{}

static char *get_modinfo(const struct load_info *info, const char *tag)
{}

static int verify_namespace_is_imported(const struct load_info *info,
					const struct kernel_symbol *sym,
					struct module *mod)
{}

static bool inherit_taint(struct module *mod, struct module *owner, const char *name)
{}

/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
static const struct kernel_symbol *resolve_symbol(struct module *mod,
						  const struct load_info *info,
						  const char *name,
						  char ownername[])
{}

static const struct kernel_symbol *
resolve_symbol_wait(struct module *mod,
		    const struct load_info *info,
		    const char *name)
{}

void __weak module_arch_cleanup(struct module *mod)
{}

void __weak module_arch_freeing_init(struct module *mod)
{}

static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
{}

static void module_memory_free(struct module *mod, enum mod_mem_type type,
			       bool unload_codetags)
{}

static void free_mod_mem(struct module *mod, bool unload_codetags)
{}

/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{}

void *__symbol_get(const char *symbol)
{}
EXPORT_SYMBOL_GPL();

/*
 * Ensure that an exported symbol [global namespace] does not already exist
 * in the kernel or in some other module's exported symbol table.
 *
 * You must hold the module_mutex.
 */
static int verify_exported_symbols(struct module *mod)
{}

static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
{}

/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols(struct module *mod, const struct load_info *info)
{}

static int apply_relocations(struct module *mod, const struct load_info *info)
{}

/* Additional bytes needed by arch in front of individual sections */
unsigned int __weak arch_mod_section_prepend(struct module *mod,
					     unsigned int section)
{}

long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
				Elf_Shdr *sechdr, unsigned int section)
{}

bool module_init_layout_section(const char *sname)
{}

static void __layout_sections(struct module *mod, struct load_info *info, bool is_init)
{}

/*
 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
 * might -- code, read-only data, read-write data, small data.  Tally
 * sizes, and place the offsets into sh_entsize fields: high bit means it
 * belongs in init.
 */
static void layout_sections(struct module *mod, struct load_info *info)
{}

static void module_license_taint_check(struct module *mod, const char *license)
{}

static void setup_modinfo(struct module *mod, struct load_info *info)
{}

static void free_modinfo(struct module *mod)
{}

bool __weak module_init_section(const char *name)
{}

bool __weak module_exit_section(const char *name)
{}

static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
{}

/*
 * Check userspace passed ELF module against our expectations, and cache
 * useful variables for further processing as we go.
 *
 * This does basic validity checks against section offsets and sizes, the
 * section name string table, and the indices used for it (sh_name).
 *
 * As a last step, since we're already checking the ELF sections we cache
 * useful variables which will be used later for our convenience:
 *
 * 	o pointers to section headers
 * 	o cache the modinfo symbol section
 * 	o cache the string symbol section
 * 	o cache the module section
 *
 * As a last step we set info->mod to the temporary copy of the module in
 * info->hdr. The final one will be allocated in move_module(). Any
 * modifications we make to our copy of the module will be carried over
 * to the final minted module.
 */
static int elf_validity_cache_copy(struct load_info *info, int flags)
{}

#define COPY_CHUNK_SIZE

static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
{}

static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
{}

static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
{}

/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
				  struct load_info *info)
{}

static void free_copy(struct load_info *info, int flags)
{}

static int rewrite_section_headers(struct load_info *info, int flags)
{}

/*
 * These calls taint the kernel depending certain module circumstances */
static void module_augment_kernel_taints(struct module *mod, struct load_info *info)
{}

static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{}

static int find_module_sections(struct module *mod, struct load_info *info)
{}

static int move_module(struct module *mod, struct load_info *info)
{}

static int check_export_symbol_versions(struct module *mod)
{}

static void flush_module_icache(const struct module *mod)
{}

bool __weak module_elf_check_arch(Elf_Ehdr *hdr)
{}

int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
				     Elf_Shdr *sechdrs,
				     char *secstrings,
				     struct module *mod)
{}

/* module_blacklist is a comma-separated list of module names */
static char *module_blacklist;
static bool blacklisted(const char *module_name)
{}
core_param();

static struct module *layout_and_allocate(struct load_info *info, int flags)
{}

/* mod is no longer valid after this! */
static void module_deallocate(struct module *mod, struct load_info *info)
{}

int __weak module_finalize(const Elf_Ehdr *hdr,
			   const Elf_Shdr *sechdrs,
			   struct module *me)
{}

static int post_relocation(struct module *mod, const struct load_info *info)
{}

/* Call module constructors. */
static void do_mod_ctors(struct module *mod)
{}

/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {};

static void do_free_init(struct work_struct *w)
{}

void flush_module_init_free_work(void)
{}

#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX
/* Default value for module->async_probe_requested */
static bool async_probe;
module_param(async_probe, bool, 0644);

/*
 * This is where the real work happens.
 *
 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
 * helper command 'lx-symbols'.
 */
static noinline int do_init_module(struct module *mod)
{}

static int may_init_module(void)
{}

/* Is this module of this name done loading?  No locks held. */
static bool finished_loading(const char *name)
{}

/* Must be called with module_mutex held */
static int module_patient_check_exists(const char *name,
				       enum fail_dup_mod_reason reason)
{}

/*
 * We try to place it in the list now to make sure it's unique before
 * we dedicate too many resources.  In particular, temporary percpu
 * memory exhaustion.
 */
static int add_unformed_module(struct module *mod)
{}

static int complete_formation(struct module *mod, struct load_info *info)
{}

static int prepare_coming_module(struct module *mod)
{}

static int unknown_module_param_cb(char *param, char *val, const char *modname,
				   void *arg)
{}

/* Module within temporary copy, this doesn't do any allocation  */
static int early_mod_check(struct load_info *info, int flags)
{}

/*
 * Allocate and load the module: note that size of section 0 is always
 * zero, and we rely on this for optional sections.
 */
static int load_module(struct load_info *info, const char __user *uargs,
		       int flags)
{}

SYSCALL_DEFINE3(init_module, void __user *, umod,
		unsigned long, len, const char __user *, uargs)
{}

struct idempotent {};

#define IDEM_HASH_BITS
static struct hlist_head idem_hash[1 << IDEM_HASH_BITS];
static DEFINE_SPINLOCK(idem_lock);

static bool idempotent(struct idempotent *u, const void *cookie)
{}

/*
 * We were the first one with 'cookie' on the list, and we ended
 * up completing the operation. We now need to walk the list,
 * remove everybody - which includes ourselves - fill in the return
 * value, and then complete the operation.
 */
static int idempotent_complete(struct idempotent *u, int ret)
{}

/*
 * Wait for the idempotent worker.
 *
 * If we get interrupted, we need to remove ourselves from the
 * the idempotent list, and the completion may still come in.
 *
 * The 'idem_lock' protects against the race, and 'idem.ret' was
 * initialized to -EINTR and is thus always the right return
 * value even if the idempotent work then completes between
 * the wait_for_completion and the cleanup.
 */
static int idempotent_wait_for_completion(struct idempotent *u)
{}

static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
{}

static int idempotent_init_module(struct file *f, const char __user * uargs, int flags)
{}

SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
{}

/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
char *module_flags(struct module *mod, char *buf, bool show_state)
{}

/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr)
{}

/**
 * is_module_address() - is this address inside a module?
 * @addr: the address to check.
 *
 * See is_module_text_address() if you simply want to see if the address
 * is code (not data).
 */
bool is_module_address(unsigned long addr)
{}

/**
 * __module_address() - get the module which contains an address.
 * @addr: the address.
 *
 * Must be called with preempt disabled or module mutex held so that
 * module doesn't get freed during this.
 */
struct module *__module_address(unsigned long addr)
{}

/**
 * is_module_text_address() - is this address inside module code?
 * @addr: the address to check.
 *
 * See is_module_address() if you simply want to see if the address is
 * anywhere in a module.  See kernel_text_address() for testing if an
 * address corresponds to kernel or module code.
 */
bool is_module_text_address(unsigned long addr)
{}

/**
 * __module_text_address() - get the module whose code contains an address.
 * @addr: the address.
 *
 * Must be called with preempt disabled or module mutex held so that
 * module doesn't get freed during this.
 */
struct module *__module_text_address(unsigned long addr)
{}

/* Don't grab lock, we're oopsing. */
void print_modules(void)
{}

#ifdef CONFIG_MODULE_DEBUGFS
struct dentry *mod_debugfs_root;

static int module_debugfs_init(void)
{}
module_init();
#endif