linux/kernel/bpf/trampoline.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/rbtree_latch.h>
#include <linux/perf_event.h>
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>
#include <linux/static_call.h>
#include <linux/bpf_verifier.h>
#include <linux/bpf_lsm.h>
#include <linux/delay.h>

/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops =;
const struct bpf_prog_ops bpf_extension_prog_ops =;

/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
#define TRAMPOLINE_HASH_BITS
#define TRAMPOLINE_TABLE_SIZE

static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];

/* serializes access to trampoline_table */
static DEFINE_MUTEX(trampoline_mutex);

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);

static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
{}
#endif

bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{}

void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym)
{}

void bpf_image_ksym_del(struct bpf_ksym *ksym)
{}

static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{}

static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
{}

static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
			 bool lock_direct_mutex)
{}

/* first time registering */
static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
{}

static struct bpf_tramp_links *
bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
{}

static void bpf_tramp_image_free(struct bpf_tramp_image *im)
{}

static void __bpf_tramp_image_put_deferred(struct work_struct *work)
{}

/* callback, fexit step 3 or fentry step 2 */
static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
{}

/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
static void __bpf_tramp_image_release(struct percpu_ref *pcref)
{}

/* callback, fexit or fentry step 1 */
static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
{}

static void bpf_tramp_image_put(struct bpf_tramp_image *im)
{}

static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size)
{}

static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
{}

static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
{}

static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
{}

int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
{}

static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
{}

/* bpf_trampoline_unlink_prog() should never fail. */
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
{}

#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
static void bpf_shim_tramp_link_release(struct bpf_link *link)
{}

static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
{}

static const struct bpf_link_ops bpf_shim_tramp_link_lops =;

static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
						     bpf_func_t bpf_func,
						     int cgroup_atype)
{}

static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
						    bpf_func_t bpf_func)
{}

int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
				    int cgroup_atype)
{}

void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
{}
#endif

struct bpf_trampoline *bpf_trampoline_get(u64 key,
					  struct bpf_attach_target_info *tgt_info)
{}

void bpf_trampoline_put(struct bpf_trampoline *tr)
{}

#define NO_START_TIME
static __always_inline u64 notrace bpf_prog_start_time(void)
{}

/* The logic is similar to bpf_prog_run(), but with an explicit
 * rcu_read_lock() and migrate_disable() which are required
 * for the trampoline. The macro is split into
 * call __bpf_prog_enter
 * call prog->bpf_func
 * call __bpf_prog_exit
 *
 * __bpf_prog_enter returns:
 * 0 - skip execution of the bpf prog
 * 1 - execute bpf prog
 * [2..MAX_U64] - execute bpf prog and record execution time.
 *     This is start time.
 */
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
	__acquires(RCU)
{}

static void notrace update_prog_stats(struct bpf_prog *prog,
				      u64 start)
{}

static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
					  struct bpf_tramp_run_ctx *run_ctx)
	__releases(RCU)
{}

static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
					       struct bpf_tramp_run_ctx *run_ctx)
	__acquires(RCU)
{}

static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
					       struct bpf_tramp_run_ctx *run_ctx)
	__releases(RCU)
{}

u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
					     struct bpf_tramp_run_ctx *run_ctx)
{}

void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
					     struct bpf_tramp_run_ctx *run_ctx)
{}

static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
					      struct bpf_tramp_run_ctx *run_ctx)
{}

static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
					      struct bpf_tramp_run_ctx *run_ctx)
{}

static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
				    struct bpf_tramp_run_ctx *run_ctx)
	__acquires(RCU)
{}

static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
				    struct bpf_tramp_run_ctx *run_ctx)
	__releases(RCU)
{}

void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
{}

void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
{}

bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
{}

bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
{}

int __weak
arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
			    const struct btf_func_model *m, u32 flags,
			    struct bpf_tramp_links *tlinks,
			    void *func_addr)
{}

void * __weak arch_alloc_bpf_trampoline(unsigned int size)
{}

void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
{}

int __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
{}

int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
				    struct bpf_tramp_links *tlinks, void *func_addr)
{}

static int __init init_trampolines(void)
{}
late_initcall(init_trampolines);