linux/kernel/kprobes.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  Kernel Probes (KProbes)
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <[email protected]> Kernel
 *		Probes initial implementation (includes suggestions from
 *		Rusty Russell).
 * 2004-Aug	Updated by Prasanna S Panchamukhi <[email protected]> with
 *		hlists and exceptions notifier as suggested by Andi Kleen.
 * 2004-July	Suparna Bhattacharya <[email protected]> added jumper probes
 *		interface to access function arguments.
 * 2004-Sep	Prasanna S Panchamukhi <[email protected]> Changed Kprobes
 *		exceptions notifier to be first on the priority list.
 * 2005-May	Hien Nguyen <[email protected]>, Jim Keniston
 *		<[email protected]> and Prasanna S Panchamukhi
 *		<[email protected]> added function-return probes.
 */

#define pr_fmt(fmt)

#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <linux/execmem.h>

#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <linux/uaccess.h>

#define KPROBE_HASH_BITS
#define KPROBE_TABLE_SIZE

#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
#define kprobe_sysctls_init
#endif

static int kprobes_initialized;
/* kprobe_table can be accessed by
 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
 * Or
 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
 */
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];

/* NOTE: change this value only with 'kprobe_mutex' held */
static bool kprobes_all_disarmed;

/* This protects 'kprobe_table' and 'optimizing_list' */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);

kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
					unsigned int __unused)
{}

/*
 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
 * kprobes can not probe.
 */
static LIST_HEAD(kprobe_blacklist);

#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
 * 'kprobe::ainsn.insn' points to the copy of the instruction to be
 * single-stepped. x86_64, POWER4 and above have no-exec support and
 * stepping on the instruction on a vmalloced/kmalloced/data page
 * is a recipe for disaster
 */
struct kprobe_insn_page {};

#define KPROBE_INSN_PAGE_SIZE(slots)

static int slots_per_page(struct kprobe_insn_cache *c)
{}

enum kprobe_slot_state {};

void __weak *alloc_insn_page(void)
{}

static void free_insn_page(void *page)
{}

struct kprobe_insn_cache kprobe_insn_slots =;
static int collect_garbage_slots(struct kprobe_insn_cache *c);

/**
 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 * We allocate an executable page if there's no room on existing ones.
 */
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{}

/* Return true if all garbages are collected, otherwise false. */
static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
{}

static int collect_garbage_slots(struct kprobe_insn_cache *c)
{}

void __free_insn_slot(struct kprobe_insn_cache *c,
		      kprobe_opcode_t *slot, int dirty)
{}

/*
 * Check given address is on the page of kprobe instruction slots.
 * This will be used for checking whether the address on a stack
 * is on a text area or not.
 */
bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
{}

int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
			     unsigned long *value, char *type, char *sym)
{}

#ifdef CONFIG_OPTPROBES
void __weak *alloc_optinsn_page(void)
{}

void __weak free_optinsn_page(void *page)
{}

/* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots =;
#endif
#endif

/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{}

static inline void reset_kprobe_instance(void)
{}

/*
 * This routine is called either:
 *	- under the 'kprobe_mutex' - during kprobe_[un]register().
 *				OR
 *	- with preemption disabled - from architecture specific code.
 */
struct kprobe *get_kprobe(void *addr)
{}
NOKPROBE_SYMBOL(get_kprobe);

static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);

/* Return true if 'p' is an aggregator */
static inline bool kprobe_aggrprobe(struct kprobe *p)
{}

/* Return true if 'p' is unused */
static inline bool kprobe_unused(struct kprobe *p)
{}

/* Keep all fields in the kprobe consistent. */
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
{}

#ifdef CONFIG_OPTPROBES
/* NOTE: This is protected by 'kprobe_mutex'. */
static bool kprobes_allow_optimization;

/*
 * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
 * This must be called from arch-dep optimized caller.
 */
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(opt_pre_handler);

/* Free optimized instructions and optimized_kprobe */
static void free_aggr_kprobe(struct kprobe *p)
{}

/* Return true if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{}

/* Return true if the kprobe is disarmed. Note: p must be on hash list */
bool kprobe_disarmed(struct kprobe *p)
{}

/* Return true if the probe is queued on (un)optimizing lists */
static bool kprobe_queued(struct kprobe *p)
{}

/*
 * Return an optimized kprobe whose optimizing code replaces
 * instructions including 'addr' (exclude breakpoint).
 */
static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
{}

/* Optimization staging list, protected by 'kprobe_mutex' */
static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list);
static LIST_HEAD(freeing_list);

static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY

/*
 * Optimize (replace a breakpoint with a jump) kprobes listed on
 * 'optimizing_list'.
 */
static void do_optimize_kprobes(void)
{}

/*
 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 * if need) kprobes listed on 'unoptimizing_list'.
 */
static void do_unoptimize_kprobes(void)
{}

/* Reclaim all kprobes on the 'freeing_list' */
static void do_free_cleaned_kprobes(void)
{}

/* Start optimizer after OPTIMIZE_DELAY passed */
static void kick_kprobe_optimizer(void)
{}

/* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work)
{}

/* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void)
{}

bool optprobe_queued_unopt(struct optimized_kprobe *op)
{}

/* Optimize kprobe if p is ready to be optimized */
static void optimize_kprobe(struct kprobe *p)
{}

/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{}

/* Unoptimize a kprobe if p is optimized */
static void unoptimize_kprobe(struct kprobe *p, bool force)
{}

/* Cancel unoptimizing for reusing */
static int reuse_unused_kprobe(struct kprobe *ap)
{}

/* Remove optimized instructions */
static void kill_optimized_kprobe(struct kprobe *p)
{}

static inline
void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{}

/* Try to prepare optimized instructions */
static void prepare_optimized_kprobe(struct kprobe *p)
{}

/* Allocate new optimized_kprobe and try to prepare optimized instructions. */
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{}

static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);

/*
 * Prepare an optimized_kprobe and optimize it.
 * NOTE: 'p' must be a normal registered kprobe.
 */
static void try_to_optimize_kprobe(struct kprobe *p)
{}

static void optimize_all_kprobes(void)
{}

#ifdef CONFIG_SYSCTL
static void unoptimize_all_kprobes(void)
{}

static DEFINE_MUTEX(kprobe_sysctl_mutex);
static int sysctl_kprobes_optimization;
static int proc_kprobes_optimization_handler(const struct ctl_table *table,
					     int write, void *buffer,
					     size_t *length, loff_t *ppos)
{}

static struct ctl_table kprobe_sysctls[] =;

static void __init kprobe_sysctls_init(void)
{}
#endif /* CONFIG_SYSCTL */

/* Put a breakpoint for a probe. */
static void __arm_kprobe(struct kprobe *p)
{}

/* Remove the breakpoint of a probe. */
static void __disarm_kprobe(struct kprobe *p, bool reopt)
{}

#else /* !CONFIG_OPTPROBES */

#define optimize_kprobe
#define unoptimize_kprobe
#define kill_optimized_kprobe
#define prepare_optimized_kprobe
#define try_to_optimize_kprobe
#define __arm_kprobe
#define __disarm_kprobe
#define kprobe_disarmed
#define wait_for_kprobe_optimizer

static int reuse_unused_kprobe(struct kprobe *ap)
{
	/*
	 * If the optimized kprobe is NOT supported, the aggr kprobe is
	 * released at the same time that the last aggregated kprobe is
	 * unregistered.
	 * Thus there should be no chance to reuse unused kprobe.
	 */
	WARN_ON_ONCE(1);
	return -EINVAL;
}

static void free_aggr_kprobe(struct kprobe *p)
{
	arch_remove_kprobe(p);
	kfree(p);
}

static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif /* CONFIG_OPTPROBES */

#ifdef CONFIG_KPROBES_ON_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly =;

static struct ftrace_ops kprobe_ipmodify_ops __read_mostly =;

static int kprobe_ipmodify_enabled;
static int kprobe_ftrace_enabled;
bool kprobe_ftrace_disabled;

static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
			       int *cnt)
{}

static int arm_kprobe_ftrace(struct kprobe *p)
{}

static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
				  int *cnt)
{}

static int disarm_kprobe_ftrace(struct kprobe *p)
{}

void kprobe_ftrace_kill(void)
{}
#else	/* !CONFIG_KPROBES_ON_FTRACE */
static inline int arm_kprobe_ftrace(struct kprobe *p)
{
	return -ENODEV;
}

static inline int disarm_kprobe_ftrace(struct kprobe *p)
{
	return -ENODEV;
}
#endif

static int prepare_kprobe(struct kprobe *p)
{}

static int arm_kprobe(struct kprobe *kp)
{}

static int disarm_kprobe(struct kprobe *kp, bool reopt)
{}

/*
 * Aggregate handlers for multiple kprobes support - these handlers
 * take care of invoking the individual kprobe handlers on p->list
 */
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(aggr_pre_handler);

static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
			      unsigned long flags)
{}
NOKPROBE_SYMBOL(aggr_post_handler);

/* Walks the list and increments 'nmissed' if 'p' has child probes. */
void kprobes_inc_nmissed_count(struct kprobe *p)
{}
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);

static struct kprobe kprobe_busy =;

void kprobe_busy_begin(void)
{}

void kprobe_busy_end(void)
{}

/* Add the new probe to 'ap->list'. */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{}

/*
 * Fill in the required fields of the aggregator kprobe. Replace the
 * earlier kprobe in the hlist with the aggregator kprobe.
 */
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
{}

/*
 * This registers the second or subsequent kprobe at the same address.
 */
static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
{}

bool __weak arch_within_kprobe_blacklist(unsigned long addr)
{}

static bool __within_kprobe_blacklist(unsigned long addr)
{}

bool within_kprobe_blacklist(unsigned long addr)
{}

/*
 * arch_adjust_kprobe_addr - adjust the address
 * @addr: symbol base address
 * @offset: offset within the symbol
 * @on_func_entry: was this @addr+@offset on the function entry
 *
 * Typically returns @addr + @offset, except for special cases where the
 * function might be prefixed by a CFI landing pad, in that case any offset
 * inside the landing pad is mapped to the first 'real' instruction of the
 * symbol.
 *
 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
 * instruction at +0.
 */
kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
						unsigned long offset,
						bool *on_func_entry)
{}

/*
 * If 'symbol_name' is specified, look it up and add the 'offset'
 * to it. This way, we can specify a relative address to a symbol.
 * This returns encoded errors if it fails to look up symbol or invalid
 * combination of parameters.
 */
static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
	     unsigned long offset, bool *on_func_entry)
{}

static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{}

/*
 * Check the 'p' is valid and return the aggregator kprobe
 * at the same address.
 */
static struct kprobe *__get_valid_kprobe(struct kprobe *p)
{}

/*
 * Warn and return error if the kprobe is being re-registered since
 * there must be a software bug.
 */
static inline int warn_kprobe_rereg(struct kprobe *p)
{}

static int check_ftrace_location(struct kprobe *p)
{}

static bool is_cfi_preamble_symbol(unsigned long addr)
{}

static int check_kprobe_address_safe(struct kprobe *p,
				     struct module **probed_mod)
{}

int register_kprobe(struct kprobe *p)
{}
EXPORT_SYMBOL_GPL();

/* Check if all probes on the 'ap' are disabled. */
static bool aggr_kprobe_disabled(struct kprobe *ap)
{}

static struct kprobe *__disable_kprobe(struct kprobe *p)
{}

/*
 * Unregister a kprobe without a scheduler synchronization.
 */
static int __unregister_kprobe_top(struct kprobe *p)
{}

static void __unregister_kprobe_bottom(struct kprobe *p)
{}

int register_kprobes(struct kprobe **kps, int num)
{}
EXPORT_SYMBOL_GPL();

void unregister_kprobe(struct kprobe *p)
{}
EXPORT_SYMBOL_GPL();

void unregister_kprobes(struct kprobe **kps, int num)
{}
EXPORT_SYMBOL_GPL();

int __weak kprobe_exceptions_notify(struct notifier_block *self,
					unsigned long val, void *data)
{}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);

static struct notifier_block kprobe_exceptions_nb =;

#ifdef CONFIG_KRETPROBES

#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)

/* callbacks for objpool of kretprobe instances */
static int kretprobe_init_inst(void *nod, void *context)
{
	struct kretprobe_instance *ri = nod;

	ri->rph = context;
	return 0;
}
static int kretprobe_fini_pool(struct objpool_head *head, void *context)
{
	kfree(context);
	return 0;
}

static void free_rp_inst_rcu(struct rcu_head *head)
{
	struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
	struct kretprobe_holder *rph = ri->rph;

	objpool_drop(ri, &rph->pool);
}
NOKPROBE_SYMBOL(free_rp_inst_rcu);

static void recycle_rp_inst(struct kretprobe_instance *ri)
{
	struct kretprobe *rp = get_kretprobe(ri);

	if (likely(rp))
		objpool_push(ri, &rp->rph->pool);
	else
		call_rcu(&ri->rcu, free_rp_inst_rcu);
}
NOKPROBE_SYMBOL(recycle_rp_inst);

/*
 * This function is called from delayed_put_task_struct() when a task is
 * dead and cleaned up to recycle any kretprobe instances associated with
 * this task. These left over instances represent probed functions that
 * have been called but will never return.
 */
void kprobe_flush_task(struct task_struct *tk)
{
	struct kretprobe_instance *ri;
	struct llist_node *node;

	/* Early boot, not yet initialized. */
	if (unlikely(!kprobes_initialized))
		return;

	kprobe_busy_begin();

	node = __llist_del_all(&tk->kretprobe_instances);
	while (node) {
		ri = container_of(node, struct kretprobe_instance, llist);
		node = node->next;

		recycle_rp_inst(ri);
	}

	kprobe_busy_end();
}
NOKPROBE_SYMBOL(kprobe_flush_task);

static inline void free_rp_inst(struct kretprobe *rp)
{
	struct kretprobe_holder *rph = rp->rph;

	if (!rph)
		return;
	rp->rph = NULL;
	objpool_fini(&rph->pool);
}

/* This assumes the 'tsk' is the current task or the is not running. */
static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
						  struct llist_node **cur)
{
	struct kretprobe_instance *ri = NULL;
	struct llist_node *node = *cur;

	if (!node)
		node = tsk->kretprobe_instances.first;
	else
		node = node->next;

	while (node) {
		ri = container_of(node, struct kretprobe_instance, llist);
		if (ri->ret_addr != kretprobe_trampoline_addr()) {
			*cur = node;
			return ri->ret_addr;
		}
		node = node->next;
	}
	return NULL;
}
NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);

/**
 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
 * @tsk: Target task
 * @fp: A frame pointer
 * @cur: a storage of the loop cursor llist_node pointer for next call
 *
 * Find the correct return address modified by a kretprobe on @tsk in unsigned
 * long type. If it finds the return address, this returns that address value,
 * or this returns 0.
 * The @tsk must be 'current' or a task which is not running. @fp is a hint
 * to get the currect return address - which is compared with the
 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
 * first call, but '@cur' itself must NOT NULL.
 */
unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
				      struct llist_node **cur)
{
	struct kretprobe_instance *ri;
	kprobe_opcode_t *ret;

	if (WARN_ON_ONCE(!cur))
		return 0;

	do {
		ret = __kretprobe_find_ret_addr(tsk, cur);
		if (!ret)
			break;
		ri = container_of(*cur, struct kretprobe_instance, llist);
	} while (ri->fp != fp);

	return (unsigned long)ret;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);

void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
					kprobe_opcode_t *correct_ret_addr)
{
	/*
	 * Do nothing by default. Please fill this to update the fake return
	 * address on the stack with the correct one on each arch if possible.
	 */
}

unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
					     void *frame_pointer)
{
	struct kretprobe_instance *ri = NULL;
	struct llist_node *first, *node = NULL;
	kprobe_opcode_t *correct_ret_addr;
	struct kretprobe *rp;

	/* Find correct address and all nodes for this frame. */
	correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
	if (!correct_ret_addr) {
		pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
		BUG_ON(1);
	}

	/*
	 * Set the return address as the instruction pointer, because if the
	 * user handler calls stack_trace_save_regs() with this 'regs',
	 * the stack trace will start from the instruction pointer.
	 */
	instruction_pointer_set(regs, (unsigned long)correct_ret_addr);

	/* Run the user handler of the nodes. */
	first = current->kretprobe_instances.first;
	while (first) {
		ri = container_of(first, struct kretprobe_instance, llist);

		if (WARN_ON_ONCE(ri->fp != frame_pointer))
			break;

		rp = get_kretprobe(ri);
		if (rp && rp->handler) {
			struct kprobe *prev = kprobe_running();

			__this_cpu_write(current_kprobe, &rp->kp);
			ri->ret_addr = correct_ret_addr;
			rp->handler(ri, regs);
			__this_cpu_write(current_kprobe, prev);
		}
		if (first == node)
			break;

		first = first->next;
	}

	arch_kretprobe_fixup_return(regs, correct_ret_addr);

	/* Unlink all nodes for this frame. */
	first = current->kretprobe_instances.first;
	current->kretprobe_instances.first = node->next;
	node->next = NULL;

	/* Recycle free instances. */
	while (first) {
		ri = container_of(first, struct kretprobe_instance, llist);
		first = first->next;

		recycle_rp_inst(ri);
	}

	return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)

/*
 * This kprobe pre_handler is registered with every kretprobe. When probe
 * hits it will set up the return probe.
 */
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
	struct kretprobe_holder *rph = rp->rph;
	struct kretprobe_instance *ri;

	ri = objpool_pop(&rph->pool);
	if (!ri) {
		rp->nmissed++;
		return 0;
	}

	if (rp->entry_handler && rp->entry_handler(ri, regs)) {
		objpool_push(ri, &rph->pool);
		return 0;
	}

	arch_prepare_kretprobe(ri, regs);

	__llist_add(&ri->llist, &current->kretprobe_instances);

	return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
#else /* CONFIG_KRETPROBE_ON_RETHOOK */
/*
 * This kprobe pre_handler is registered with every kretprobe. When probe
 * hits it will set up the return probe.
 */
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(pre_handler_kretprobe);

static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
				      unsigned long ret_addr,
				      struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kretprobe_rethook_handler);

#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */

/**
 * kprobe_on_func_entry() -- check whether given address is function entry
 * @addr: Target address
 * @sym:  Target symbol name
 * @offset: The offset from the symbol or the address
 *
 * This checks whether the given @addr+@offset or @sym+@offset is on the
 * function entry address or not.
 * This returns 0 if it is the function entry, or -EINVAL if it is not.
 * And also it returns -ENOENT if it fails the symbol or address lookup.
 * Caller must pass @addr or @sym (either one must be NULL), or this
 * returns -EINVAL.
 */
int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{}

int register_kretprobe(struct kretprobe *rp)
{}
EXPORT_SYMBOL_GPL();

int register_kretprobes(struct kretprobe **rps, int num)
{}
EXPORT_SYMBOL_GPL();

void unregister_kretprobe(struct kretprobe *rp)
{}
EXPORT_SYMBOL_GPL();

void unregister_kretprobes(struct kretprobe **rps, int num)
{}
EXPORT_SYMBOL_GPL();

#else /* CONFIG_KRETPROBES */
int register_kretprobe(struct kretprobe *rp)
{
	return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobe);

int register_kretprobes(struct kretprobe **rps, int num)
{
	return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobes);

void unregister_kretprobe(struct kretprobe *rp)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobe);

void unregister_kretprobes(struct kretprobe **rps, int num)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobes);

static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
	return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);

#endif /* CONFIG_KRETPROBES */

/* Set the kprobe gone and remove its instruction buffer. */
static void kill_kprobe(struct kprobe *p)
{}

/* Disable one kprobe */
int disable_kprobe(struct kprobe *kp)
{}
EXPORT_SYMBOL_GPL();

/* Enable one kprobe */
int enable_kprobe(struct kprobe *kp)
{}
EXPORT_SYMBOL_GPL();

/* Caller must NOT call this in usual path. This is only for critical case */
void dump_kprobe(struct kprobe *kp)
{}
NOKPROBE_SYMBOL(dump_kprobe);

int kprobe_add_ksym_blacklist(unsigned long entry)
{}

/* Add all symbols in given area into kprobe blacklist */
int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
{}

int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
				   char *type, char *sym)
{}

int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
		       char *sym)
{}

int __init __weak arch_populate_kprobe_blacklist(void)
{}

/*
 * Lookup and populate the kprobe_blacklist.
 *
 * Unlike the kretprobe blacklist, we'll need to determine
 * the range of addresses that belong to the said functions,
 * since a kprobe need not necessarily be at the beginning
 * of a function.
 */
static int __init populate_kprobe_blacklist(unsigned long *start,
					     unsigned long *end)
{}

#ifdef CONFIG_MODULES
/* Remove all symbols in given area from kprobe blacklist */
static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
{}

static void kprobe_remove_ksym_blacklist(unsigned long entry)
{}

static void add_module_kprobe_blacklist(struct module *mod)
{}

static void remove_module_kprobe_blacklist(struct module *mod)
{}

/* Module notifier call back, checking kprobes on the module */
static int kprobes_module_callback(struct notifier_block *nb,
				   unsigned long val, void *data)
{}

static struct notifier_block kprobe_module_nb =;

static int kprobe_register_module_notifier(void)
{}
#else
static int kprobe_register_module_notifier(void)
{
	return 0;
}
#endif /* CONFIG_MODULES */

void kprobe_free_init_mem(void)
{}

static int __init init_kprobes(void)
{}
early_initcall(init_kprobes);

#if defined(CONFIG_OPTPROBES)
static int __init init_optprobes(void)
{}
subsys_initcall(init_optprobes);
#endif

#ifdef CONFIG_DEBUG_FS
static void report_probe(struct seq_file *pi, struct kprobe *p,
		const char *sym, int offset, char *modname, struct kprobe *pp)
{}

static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{}

static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{}

static void kprobe_seq_stop(struct seq_file *f, void *v)
{}

static int show_kprobe_addr(struct seq_file *pi, void *v)
{}

static const struct seq_operations kprobes_sops =;

DEFINE_SEQ_ATTRIBUTE();

/* kprobes/blacklist -- shows which functions can not be probed */
static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
{}

static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
{}

static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
{}

static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
{}

static const struct seq_operations kprobe_blacklist_sops =;
DEFINE_SEQ_ATTRIBUTE();

static int arm_all_kprobes(void)
{}

static int disarm_all_kprobes(void)
{}

/*
 * XXX: The debugfs bool file interface doesn't allow for callbacks
 * when the bool state is switched. We can reuse that facility when
 * available
 */
static ssize_t read_enabled_file_bool(struct file *file,
	       char __user *user_buf, size_t count, loff_t *ppos)
{}

static ssize_t write_enabled_file_bool(struct file *file,
	       const char __user *user_buf, size_t count, loff_t *ppos)
{}

static const struct file_operations fops_kp =;

static int __init debugfs_kprobe_init(void)
{}

late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */