linux/arch/x86/kernel/kprobes/core.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  Kernel Probes (KProbes)
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <[email protected]> Kernel
 *		Probes initial implementation ( includes contributions from
 *		Rusty Russell).
 * 2004-July	Suparna Bhattacharya <[email protected]> added jumper probes
 *		interface to access function arguments.
 * 2004-Oct	Jim Keniston <[email protected]> and Prasanna S Panchamukhi
 *		<[email protected]> adapted for x86_64 from i386.
 * 2005-Mar	Roland McGrath <[email protected]>
 *		Fixed to handle %rip-relative addressing mode correctly.
 * 2005-May	Hien Nguyen <[email protected]>, Jim Keniston
 *		<[email protected]> and Prasanna S Panchamukhi
 *		<[email protected]> added function-return probes.
 * 2005-May	Rusty Lynch <[email protected]>
 *		Added function return probes functionality
 * 2006-Feb	Masami Hiramatsu <[email protected]> added
 *		kprobe-booster and kretprobe-booster for i386.
 * 2007-Dec	Masami Hiramatsu <[email protected]> added kprobe-booster
 *		and kretprobe-booster for x86-64
 * 2007-Dec	Masami Hiramatsu <[email protected]>, Arjan van de Ven
 *		<[email protected]> and Jim Keniston <[email protected]>
 *		unified x86 kprobes code.
 */
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/sched/debug.h>
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/kasan.h>
#include <linux/objtool.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <linux/set_memory.h>
#include <linux/cfi.h>
#include <linux/execmem.h>

#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/ibt.h>

#include "common.h"

DEFINE_PER_CPU(struct kprobe *, current_kprobe) =;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

#define W
	/*
	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
	 * Groups, and some special opcodes can not boost.
	 * This is non-const and volatile to keep gcc from statically
	 * optimizing it out, as variable_test_bit makes gcc think only
	 * *(unsigned long*) is used.
	 */
static volatile u32 twobyte_is_boostable[256 / 32] =;
#undef W

struct kretprobe_blackpoint kretprobe_blacklist[] =;

const int kretprobe_blacklist_size =;

static nokprobe_inline void
__synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
{}

/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
void synthesize_reljump(void *dest, void *from, void *to)
{}
NOKPROBE_SYMBOL(synthesize_reljump);

/* Insert a call instruction at address 'from', which calls address 'to'.*/
void synthesize_relcall(void *dest, void *from, void *to)
{}
NOKPROBE_SYMBOL(synthesize_relcall);

/*
 * Returns non-zero if INSN is boostable.
 * RIP relative instructions are adjusted at copying time in 64 bits mode
 */
bool can_boost(struct insn *insn, void *addr)
{}

static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
{}

/*
 * Recover the probed instruction at addr for further analysis.
 * Caller must lock kprobes by kprobe_mutex, or disable preemption
 * for preventing to release referencing kprobes.
 * Returns zero if the instruction can not get recovered (or access failed).
 */
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
{}

/* Check if insn is INT or UD */
static inline bool is_exception_insn(struct insn *insn)
{}

/*
 * Check if paddr is at an instruction boundary and that instruction can
 * be probed
 */
static bool can_probe(unsigned long paddr)
{}

/* If x86 supports IBT (ENDBR) it must be skipped. */
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
					 bool *on_func_entry)
{}

/*
 * Copy an instruction with recovering modified instruction by kprobes
 * and adjust the displacement if the instruction uses the %rip-relative
 * addressing mode. Note that since @real will be the final place of copied
 * instruction, displacement must be adjust by @real, not @dest.
 * This returns the length of copied instruction, or 0 if it has an error.
 */
int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
{}

/* Prepare reljump or int3 right after instruction */
static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
			      struct insn *insn)
{}

/* Make page to RO mode when allocate it */
void *alloc_insn_page(void)
{}

/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */

static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);

static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_ret);

static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_call);

static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_jmp);

static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_jcc);

static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_loop);

static const int addrmode_regoffs[] =;

static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);

static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);

static int prepare_emulation(struct kprobe *p, struct insn *insn)
{}

static int arch_copy_kprobe(struct kprobe *p)
{}

int arch_prepare_kprobe(struct kprobe *p)
{}

void arch_arm_kprobe(struct kprobe *p)
{}

void arch_disarm_kprobe(struct kprobe *p)
{}

void arch_remove_kprobe(struct kprobe *p)
{}

static nokprobe_inline void
save_previous_kprobe(struct kprobe_ctlblk *kcb)
{}

static nokprobe_inline void
restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{}

static nokprobe_inline void
set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
		   struct kprobe_ctlblk *kcb)
{}

static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
			       struct kprobe_ctlblk *kcb)
{}
NOKPROBE_SYMBOL(kprobe_post_process);

static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
			     struct kprobe_ctlblk *kcb, int reenter)
{}
NOKPROBE_SYMBOL(setup_singlestep);

/*
 * Called after single-stepping.  p->addr is the address of the
 * instruction whose first byte has been replaced by the "int3"
 * instruction.  To avoid the SMP problems that can occur when we
 * temporarily put back the original opcode to single-step, we
 * single-stepped a copy of the instruction.  The address of this
 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
 * right after the copied instruction.
 * Different from the trap single-step, "int3" single-step can not
 * handle the instruction which changes the ip register, e.g. jmp,
 * call, conditional jmp, and the instructions which changes the IF
 * flags because interrupt must be disabled around the single-stepping.
 * Such instructions are software emulated, but others are single-stepped
 * using "int3".
 *
 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
 * be adjusted, so that we can resume execution on correct code.
 */
static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
			      struct kprobe_ctlblk *kcb)
{}
NOKPROBE_SYMBOL(resume_singlestep);

/*
 * We have reentered the kprobe_handler(), since another probe was hit while
 * within the handler. We save the original kprobes variables and just single
 * step on the instruction of the new probe without calling any user handlers.
 */
static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
			  struct kprobe_ctlblk *kcb)
{}
NOKPROBE_SYMBOL(reenter_kprobe);

static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
{}

/*
 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
 * remain disabled throughout this function.
 */
int kprobe_int3_handler(struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(kprobe_int3_handler);

int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{}
NOKPROBE_SYMBOL(kprobe_fault_handler);

int __init arch_populate_kprobe_blacklist(void)
{}

int __init arch_init_kprobes(void)
{}

int arch_trampoline_kprobe(struct kprobe *p)
{}