#define pr_fmt(fmt) …
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <linux/execmem.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <linux/uaccess.h>
#define KPROBE_HASH_BITS …
#define KPROBE_TABLE_SIZE …
#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
#define kprobe_sysctls_init …
#endif
static int kprobes_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
unsigned int __unused)
{ … }
static LIST_HEAD(kprobe_blacklist);
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
struct kprobe_insn_page { … };
#define KPROBE_INSN_PAGE_SIZE(slots) …
static int slots_per_page(struct kprobe_insn_cache *c)
{ … }
enum kprobe_slot_state { … };
void __weak *alloc_insn_page(void)
{ … }
static void free_insn_page(void *page)
{ … }
struct kprobe_insn_cache kprobe_insn_slots = …;
static int collect_garbage_slots(struct kprobe_insn_cache *c);
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{ … }
static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
{ … }
static int collect_garbage_slots(struct kprobe_insn_cache *c)
{ … }
void __free_insn_slot(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty)
{ … }
bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
{ … }
int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
unsigned long *value, char *type, char *sym)
{ … }
#ifdef CONFIG_OPTPROBES
void __weak *alloc_optinsn_page(void)
{ … }
void __weak free_optinsn_page(void *page)
{ … }
struct kprobe_insn_cache kprobe_optinsn_slots = …;
#endif
#endif
static inline void set_kprobe_instance(struct kprobe *kp)
{ … }
static inline void reset_kprobe_instance(void)
{ … }
struct kprobe *get_kprobe(void *addr)
{ … }
NOKPROBE_SYMBOL(get_kprobe);
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
static inline bool kprobe_aggrprobe(struct kprobe *p)
{ … }
static inline bool kprobe_unused(struct kprobe *p)
{ … }
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
{ … }
#ifdef CONFIG_OPTPROBES
static bool kprobes_allow_optimization;
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{ … }
NOKPROBE_SYMBOL(opt_pre_handler);
static void free_aggr_kprobe(struct kprobe *p)
{ … }
static inline int kprobe_optready(struct kprobe *p)
{ … }
bool kprobe_disarmed(struct kprobe *p)
{ … }
static bool kprobe_queued(struct kprobe *p)
{ … }
static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
{ … }
static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list);
static LIST_HEAD(freeing_list);
static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY …
static void do_optimize_kprobes(void)
{ … }
static void do_unoptimize_kprobes(void)
{ … }
static void do_free_cleaned_kprobes(void)
{ … }
static void kick_kprobe_optimizer(void)
{ … }
static void kprobe_optimizer(struct work_struct *work)
{ … }
void wait_for_kprobe_optimizer(void)
{ … }
bool optprobe_queued_unopt(struct optimized_kprobe *op)
{ … }
static void optimize_kprobe(struct kprobe *p)
{ … }
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{ … }
static void unoptimize_kprobe(struct kprobe *p, bool force)
{ … }
static int reuse_unused_kprobe(struct kprobe *ap)
{ … }
static void kill_optimized_kprobe(struct kprobe *p)
{ … }
static inline
void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{ … }
static void prepare_optimized_kprobe(struct kprobe *p)
{ … }
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{ … }
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
static void try_to_optimize_kprobe(struct kprobe *p)
{ … }
static void optimize_all_kprobes(void)
{ … }
#ifdef CONFIG_SYSCTL
static void unoptimize_all_kprobes(void)
{ … }
static DEFINE_MUTEX(kprobe_sysctl_mutex);
static int sysctl_kprobes_optimization;
static int proc_kprobes_optimization_handler(const struct ctl_table *table,
int write, void *buffer,
size_t *length, loff_t *ppos)
{ … }
static struct ctl_table kprobe_sysctls[] = …;
static void __init kprobe_sysctls_init(void)
{ … }
#endif
static void __arm_kprobe(struct kprobe *p)
{ … }
static void __disarm_kprobe(struct kprobe *p, bool reopt)
{ … }
#else
#define optimize_kprobe …
#define unoptimize_kprobe …
#define kill_optimized_kprobe …
#define prepare_optimized_kprobe …
#define try_to_optimize_kprobe …
#define __arm_kprobe …
#define __disarm_kprobe …
#define kprobe_disarmed …
#define wait_for_kprobe_optimizer …
static int reuse_unused_kprobe(struct kprobe *ap)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
static void free_aggr_kprobe(struct kprobe *p)
{
arch_remove_kprobe(p);
kfree(p);
}
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif
#ifdef CONFIG_KPROBES_ON_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = …;
static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = …;
static int kprobe_ipmodify_enabled;
static int kprobe_ftrace_enabled;
bool kprobe_ftrace_disabled;
static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int *cnt)
{ … }
static int arm_kprobe_ftrace(struct kprobe *p)
{ … }
static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int *cnt)
{ … }
static int disarm_kprobe_ftrace(struct kprobe *p)
{ … }
void kprobe_ftrace_kill(void)
{ … }
#else
static inline int arm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
static inline int disarm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
#endif
static int prepare_kprobe(struct kprobe *p)
{ … }
static int arm_kprobe(struct kprobe *kp)
{ … }
static int disarm_kprobe(struct kprobe *kp, bool reopt)
{ … }
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
{ … }
NOKPROBE_SYMBOL(aggr_pre_handler);
static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{ … }
NOKPROBE_SYMBOL(aggr_post_handler);
void kprobes_inc_nmissed_count(struct kprobe *p)
{ … }
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
static struct kprobe kprobe_busy = …;
void kprobe_busy_begin(void)
{ … }
void kprobe_busy_end(void)
{ … }
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{ … }
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
{ … }
static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
{ … }
bool __weak arch_within_kprobe_blacklist(unsigned long addr)
{ … }
static bool __within_kprobe_blacklist(unsigned long addr)
{ … }
bool within_kprobe_blacklist(unsigned long addr)
{ … }
kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
unsigned long offset,
bool *on_func_entry)
{ … }
static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry)
{ … }
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{ … }
static struct kprobe *__get_valid_kprobe(struct kprobe *p)
{ … }
static inline int warn_kprobe_rereg(struct kprobe *p)
{ … }
static int check_ftrace_location(struct kprobe *p)
{ … }
static bool is_cfi_preamble_symbol(unsigned long addr)
{ … }
static int check_kprobe_address_safe(struct kprobe *p,
struct module **probed_mod)
{ … }
int register_kprobe(struct kprobe *p)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool aggr_kprobe_disabled(struct kprobe *ap)
{ … }
static struct kprobe *__disable_kprobe(struct kprobe *p)
{ … }
static int __unregister_kprobe_top(struct kprobe *p)
{ … }
static void __unregister_kprobe_bottom(struct kprobe *p)
{ … }
int register_kprobes(struct kprobe **kps, int num)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_kprobe(struct kprobe *p)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_kprobes(struct kprobe **kps, int num)
{ … }
EXPORT_SYMBOL_GPL(…);
int __weak kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{ … }
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
static struct notifier_block kprobe_exceptions_nb = …;
#ifdef CONFIG_KRETPROBES
#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
static int kretprobe_init_inst(void *nod, void *context)
{
struct kretprobe_instance *ri = nod;
ri->rph = context;
return 0;
}
static int kretprobe_fini_pool(struct objpool_head *head, void *context)
{
kfree(context);
return 0;
}
static void free_rp_inst_rcu(struct rcu_head *head)
{
struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
struct kretprobe_holder *rph = ri->rph;
objpool_drop(ri, &rph->pool);
}
NOKPROBE_SYMBOL(free_rp_inst_rcu);
static void recycle_rp_inst(struct kretprobe_instance *ri)
{
struct kretprobe *rp = get_kretprobe(ri);
if (likely(rp))
objpool_push(ri, &rp->rph->pool);
else
call_rcu(&ri->rcu, free_rp_inst_rcu);
}
NOKPROBE_SYMBOL(recycle_rp_inst);
void kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
struct llist_node *node;
if (unlikely(!kprobes_initialized))
return;
kprobe_busy_begin();
node = __llist_del_all(&tk->kretprobe_instances);
while (node) {
ri = container_of(node, struct kretprobe_instance, llist);
node = node->next;
recycle_rp_inst(ri);
}
kprobe_busy_end();
}
NOKPROBE_SYMBOL(kprobe_flush_task);
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_holder *rph = rp->rph;
if (!rph)
return;
rp->rph = NULL;
objpool_fini(&rph->pool);
}
static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
struct llist_node **cur)
{
struct kretprobe_instance *ri = NULL;
struct llist_node *node = *cur;
if (!node)
node = tsk->kretprobe_instances.first;
else
node = node->next;
while (node) {
ri = container_of(node, struct kretprobe_instance, llist);
if (ri->ret_addr != kretprobe_trampoline_addr()) {
*cur = node;
return ri->ret_addr;
}
node = node->next;
}
return NULL;
}
NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
struct llist_node **cur)
{
struct kretprobe_instance *ri;
kprobe_opcode_t *ret;
if (WARN_ON_ONCE(!cur))
return 0;
do {
ret = __kretprobe_find_ret_addr(tsk, cur);
if (!ret)
break;
ri = container_of(*cur, struct kretprobe_instance, llist);
} while (ri->fp != fp);
return (unsigned long)ret;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
kprobe_opcode_t *correct_ret_addr)
{
}
unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
void *frame_pointer)
{
struct kretprobe_instance *ri = NULL;
struct llist_node *first, *node = NULL;
kprobe_opcode_t *correct_ret_addr;
struct kretprobe *rp;
correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
if (!correct_ret_addr) {
pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
BUG_ON(1);
}
instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
first = current->kretprobe_instances.first;
while (first) {
ri = container_of(first, struct kretprobe_instance, llist);
if (WARN_ON_ONCE(ri->fp != frame_pointer))
break;
rp = get_kretprobe(ri);
if (rp && rp->handler) {
struct kprobe *prev = kprobe_running();
__this_cpu_write(current_kprobe, &rp->kp);
ri->ret_addr = correct_ret_addr;
rp->handler(ri, regs);
__this_cpu_write(current_kprobe, prev);
}
if (first == node)
break;
first = first->next;
}
arch_kretprobe_fixup_return(regs, correct_ret_addr);
first = current->kretprobe_instances.first;
current->kretprobe_instances.first = node->next;
node->next = NULL;
while (first) {
ri = container_of(first, struct kretprobe_instance, llist);
first = first->next;
recycle_rp_inst(ri);
}
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
struct kretprobe_holder *rph = rp->rph;
struct kretprobe_instance *ri;
ri = objpool_pop(&rph->pool);
if (!ri) {
rp->nmissed++;
return 0;
}
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
objpool_push(ri, &rph->pool);
return 0;
}
arch_prepare_kretprobe(ri, regs);
__llist_add(&ri->llist, ¤t->kretprobe_instances);
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
#else
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{ … }
NOKPROBE_SYMBOL(pre_handler_kretprobe);
static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
unsigned long ret_addr,
struct pt_regs *regs)
{ … }
NOKPROBE_SYMBOL(kretprobe_rethook_handler);
#endif
int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{ … }
int register_kretprobe(struct kretprobe *rp)
{ … }
EXPORT_SYMBOL_GPL(…);
int register_kretprobes(struct kretprobe **rps, int num)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_kretprobe(struct kretprobe *rp)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_kretprobes(struct kretprobe **rps, int num)
{ … }
EXPORT_SYMBOL_GPL(…);
#else
int register_kretprobe(struct kretprobe *rp)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobe);
int register_kretprobes(struct kretprobe **rps, int num)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobes);
void unregister_kretprobe(struct kretprobe *rp)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobe);
void unregister_kretprobes(struct kretprobe **rps, int num)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobes);
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
#endif
static void kill_kprobe(struct kprobe *p)
{ … }
int disable_kprobe(struct kprobe *kp)
{ … }
EXPORT_SYMBOL_GPL(…);
int enable_kprobe(struct kprobe *kp)
{ … }
EXPORT_SYMBOL_GPL(…);
void dump_kprobe(struct kprobe *kp)
{ … }
NOKPROBE_SYMBOL(dump_kprobe);
int kprobe_add_ksym_blacklist(unsigned long entry)
{ … }
int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
{ … }
int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym)
{ … }
int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{ … }
int __init __weak arch_populate_kprobe_blacklist(void)
{ … }
static int __init populate_kprobe_blacklist(unsigned long *start,
unsigned long *end)
{ … }
#ifdef CONFIG_MODULES
static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
{ … }
static void kprobe_remove_ksym_blacklist(unsigned long entry)
{ … }
static void add_module_kprobe_blacklist(struct module *mod)
{ … }
static void remove_module_kprobe_blacklist(struct module *mod)
{ … }
static int kprobes_module_callback(struct notifier_block *nb,
unsigned long val, void *data)
{ … }
static struct notifier_block kprobe_module_nb = …;
static int kprobe_register_module_notifier(void)
{ … }
#else
static int kprobe_register_module_notifier(void)
{
return 0;
}
#endif
void kprobe_free_init_mem(void)
{ … }
static int __init init_kprobes(void)
{ … }
early_initcall(init_kprobes);
#if defined(CONFIG_OPTPROBES)
static int __init init_optprobes(void)
{ … }
subsys_initcall(init_optprobes);
#endif
#ifdef CONFIG_DEBUG_FS
static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)
{ … }
static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{ … }
static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{ … }
static void kprobe_seq_stop(struct seq_file *f, void *v)
{ … }
static int show_kprobe_addr(struct seq_file *pi, void *v)
{ … }
static const struct seq_operations kprobes_sops = …;
DEFINE_SEQ_ATTRIBUTE(…);
static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
{ … }
static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
{ … }
static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
{ … }
static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
{ … }
static const struct seq_operations kprobe_blacklist_sops = …;
DEFINE_SEQ_ATTRIBUTE(…);
static int arm_all_kprobes(void)
{ … }
static int disarm_all_kprobes(void)
{ … }
static ssize_t read_enabled_file_bool(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{ … }
static ssize_t write_enabled_file_bool(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{ … }
static const struct file_operations fops_kp = …;
static int __init debugfs_kprobe_init(void)
{ … }
late_initcall(debugfs_kprobe_init);
#endif