linux/arch/x86/kernel/kprobes/opt.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  Kernel Probes Jump Optimization (Optprobes)
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 * Copyright (C) Hitachi Ltd., 2012
 */
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/objtool.h>
#include <linux/pgtable.h>
#include <linux/static_call.h>

#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/nospec-branch.h>

#include "common.h"

unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
{}

static void synthesize_clac(kprobe_opcode_t *addr)
{}

/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
{}

asm;

void optprobe_template_func(void);
STACK_FRAME_NON_STANDARD();

#define TMPL_CLAC_IDX
#define TMPL_MOVE_IDX
#define TMPL_CALL_IDX
#define TMPL_END_IDX

/* Optimized kprobe call back function: called from optinsn */
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(optimized_callback);

static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
{}

/* Check whether insn is indirect jump */
static int insn_is_indirect_jump(struct insn *insn)
{}

/* Check whether insn jumps into specified address range */
static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
{}

/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{}

/* Check optimized_kprobe can actually be optimized. */
int arch_check_optimized_kprobe(struct optimized_kprobe *op)
{}

/* Check the addr is within the optimized instructions. */
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
				 kprobe_opcode_t *addr)
{}

/* Free optimized instruction slot */
static
void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
{}

void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{}

/*
 * Copy replacing target instructions
 * Target instructions MUST be relocatable (checked inside)
 * This is called when new aggr(opt)probe is allocated or reused.
 */
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
				  struct kprobe *__unused)
{}

/*
 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
 * Caller must call with locking kprobe_mutex and text_mutex.
 *
 * The caller will have installed a regular kprobe and after that issued
 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
 * the 4 bytes after the INT3 are unused and can now be overwritten.
 */
void arch_optimize_kprobes(struct list_head *oplist)
{}

/*
 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
 *
 * After that, we can restore the 4 bytes after the INT3 to undo what
 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
 * unused once the INT3 lands.
 */
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{}

/*
 * Recover original instructions and breakpoints from relative jumps.
 * Caller must call with locking kprobe_mutex.
 */
extern void arch_unoptimize_kprobes(struct list_head *oplist,
				    struct list_head *done_list)
{}

int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
{}
NOKPROBE_SYMBOL(setup_detour_execution);