linux/kernel/trace/bpf_trace.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
 * Copyright (c) 2016 Facebook
 */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bpf_perf_event.h>
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/kprobes.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/error-injection.h>
#include <linux/btf_ids.h>
#include <linux/bpf_lsm.h>
#include <linux/fprobe.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/key.h>
#include <linux/verification.h>
#include <linux/namei.h>
#include <linux/fileattr.h>

#include <net/bpf_sk_storage.h>

#include <uapi/linux/bpf.h>
#include <uapi/linux/btf.h>

#include <asm/tlb.h>

#include "trace_probe.h"
#include "trace.h"

#define CREATE_TRACE_POINTS
#include "bpf_trace.h"

#define bpf_event_rcu_dereference(p)

#define MAX_UPROBE_MULTI_CNT
#define MAX_KPROBE_MULTI_CNT

#ifdef CONFIG_MODULES
struct bpf_trace_module {};

static LIST_HEAD(bpf_trace_modules);
static DEFINE_MUTEX(bpf_module_mutex);

static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
{}
#else
static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
{
	return NULL;
}
#endif /* CONFIG_MODULES */

u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
				  u64 flags, const struct btf **btf,
				  s32 *btf_id);
static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);

static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);

/**
 * trace_call_bpf - invoke BPF program
 * @call: tracepoint event
 * @ctx: opaque context pointer
 *
 * kprobe handlers execute BPF programs via this helper.
 * Can be used from static tracepoints in the future.
 *
 * Return: BPF programs always return an integer which is interpreted by
 * kprobe handler as:
 * 0 - return from kprobe (event is filtered out)
 * 1 - store kprobe event into ring buffer
 * Other values are reserved and currently alias to 1
 */
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{}

#ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
{}

static const struct bpf_func_proto bpf_override_return_proto =;
#endif

static __always_inline int
bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
{}

BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
	   const void __user *, unsafe_ptr)
{}

const struct bpf_func_proto bpf_probe_read_user_proto =;

static __always_inline int
bpf_probe_read_user_str_common(void *dst, u32 size,
			       const void __user *unsafe_ptr)
{}

BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
	   const void __user *, unsafe_ptr)
{}

const struct bpf_func_proto bpf_probe_read_user_str_proto =;

BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
	   const void *, unsafe_ptr)
{}

const struct bpf_func_proto bpf_probe_read_kernel_proto =;

static __always_inline int
bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
{}

BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
	   const void *, unsafe_ptr)
{}

const struct bpf_func_proto bpf_probe_read_kernel_str_proto =;

#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
	   const void *, unsafe_ptr)
{}

static const struct bpf_func_proto bpf_probe_read_compat_proto =;

BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
	   const void *, unsafe_ptr)
{}

static const struct bpf_func_proto bpf_probe_read_compat_str_proto =;
#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */

BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
	   u32, size)
{}

static const struct bpf_func_proto bpf_probe_write_user_proto =;

static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
{}

#define MAX_TRACE_PRINTK_VARARGS
#define BPF_TRACE_PRINTK_SIZE

BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
	   u64, arg2, u64, arg3)
{}

static const struct bpf_func_proto bpf_trace_printk_proto =;

static void __set_printk_clr_event(void)
{}

const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
{}

BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
	   u32, data_len)
{}

static const struct bpf_func_proto bpf_trace_vprintk_proto =;

const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
{}

BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
	   const void *, args, u32, data_len)
{}

BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)

static const struct bpf_func_proto bpf_seq_printf_proto =;

BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
{}

static const struct bpf_func_proto bpf_seq_write_proto =;

BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
	   u32, btf_ptr_size, u64, flags)
{}

static const struct bpf_func_proto bpf_seq_printf_btf_proto =;

static __always_inline int
get_map_perf_counter(struct bpf_map *map, u64 flags,
		     u64 *value, u64 *enabled, u64 *running)
{}

BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
{}

static const struct bpf_func_proto bpf_perf_event_read_proto =;

BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
	   struct bpf_perf_event_value *, buf, u32, size)
{}

static const struct bpf_func_proto bpf_perf_event_read_value_proto =;

static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
			u64 flags, struct perf_sample_data *sd)
{}

/*
 * Support executing tracepoints in normal, irq, and nmi context that each call
 * bpf_perf_event_output
 */
struct bpf_trace_sample_data {};

static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
	   u64, flags, void *, data, u64, size)
{}

static const struct bpf_func_proto bpf_perf_event_output_proto =;

static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
struct bpf_nested_pt_regs {};
static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{}

BPF_CALL_0(bpf_get_current_task)
{}

const struct bpf_func_proto bpf_get_current_task_proto =;

BPF_CALL_0(bpf_get_current_task_btf)
{}

const struct bpf_func_proto bpf_get_current_task_btf_proto =;

BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
{}

BTF_ID_LIST(bpf_task_pt_regs_ids)
BTF_ID()

const struct bpf_func_proto bpf_task_pt_regs_proto =;

BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
{}

static const struct bpf_func_proto bpf_current_task_under_cgroup_proto =;

struct send_signal_irq_work {};

static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);

static void do_bpf_send_signal(struct irq_work *entry)
{}

static int bpf_send_signal_common(u32 sig, enum pid_type type)
{}

BPF_CALL_1(bpf_send_signal, u32, sig)
{}

static const struct bpf_func_proto bpf_send_signal_proto =;

BPF_CALL_1(bpf_send_signal_thread, u32, sig)
{}

static const struct bpf_func_proto bpf_send_signal_thread_proto =;

BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
{}

BTF_SET_START(btf_allowlist_d_path)
#ifdef CONFIG_SECURITY
BTF_ID()
BTF_ID()
BTF_ID()
#endif
#ifdef CONFIG_SECURITY_PATH
BTF_ID()
#endif
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_SET_END()

static bool bpf_d_path_allowed(const struct bpf_prog *prog)
{}

BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)

static const struct bpf_func_proto bpf_d_path_proto =;

#define BTF_F_ALL

static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
				  u64 flags, const struct btf **btf,
				  s32 *btf_id)
{}

BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
	   u32, btf_ptr_size, u64, flags)
{}

const struct bpf_func_proto bpf_snprintf_btf_proto =;

BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
{}

static const struct bpf_func_proto bpf_get_func_ip_proto_tracing =;

#ifdef CONFIG_X86_KERNEL_IBT
static unsigned long get_entry_ip(unsigned long fentry_ip)
{}
#else
#define get_entry_ip
#endif

BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{}

static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe =;

BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
{}

static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi =;

BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
{}

static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti =;

BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
{}

static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi =;

BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
{}

static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti =;

BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
{}

static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace =;

BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
{}

static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe =;

BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
{}

static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing =;

BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
{}

static const struct bpf_func_proto bpf_get_branch_snapshot_proto =;

BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
{}

static const struct bpf_func_proto bpf_get_func_arg_proto =;

BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
{}

static const struct bpf_func_proto bpf_get_func_ret_proto =;

BPF_CALL_1(get_func_arg_cnt, void *, ctx)
{}

static const struct bpf_func_proto bpf_get_func_arg_cnt_proto =;

#ifdef CONFIG_KEYS
__bpf_kfunc_start_defs();

/**
 * bpf_lookup_user_key - lookup a key by its serial
 * @serial: key handle serial number
 * @flags: lookup-specific flags
 *
 * Search a key with a given *serial* and the provided *flags*.
 * If found, increment the reference count of the key by one, and
 * return it in the bpf_key structure.
 *
 * The bpf_key structure must be passed to bpf_key_put() when done
 * with it, so that the key reference count is decremented and the
 * bpf_key structure is freed.
 *
 * Permission checks are deferred to the time the key is used by
 * one of the available key-specific kfuncs.
 *
 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
 * special keyring (e.g. session keyring), if it doesn't yet exist.
 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
 * for the key construction, and to retrieve uninstantiated keys (keys
 * without data attached to them).
 *
 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
 *         NULL pointer otherwise.
 */
__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
{}

/**
 * bpf_lookup_system_key - lookup a key by a system-defined ID
 * @id: key ID
 *
 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
 * The key pointer is marked as invalid, to prevent bpf_key_put() from
 * attempting to decrement the key reference count on that pointer. The key
 * pointer set in such way is currently understood only by
 * verify_pkcs7_signature().
 *
 * Set *id* to one of the values defined in include/linux/verification.h:
 * 0 for the primary keyring (immutable keyring of system keys);
 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
 * (where keys can be added only if they are vouched for by existing keys
 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
 * kerned image and, possibly, the initramfs signature).
 *
 * Return: a bpf_key pointer with an invalid key pointer set from the
 *         pre-determined ID on success, a NULL pointer otherwise
 */
__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
{}

/**
 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
 * @bkey: bpf_key structure
 *
 * Decrement the reference count of the key inside *bkey*, if the pointer
 * is valid, and free *bkey*.
 */
__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
{}

#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
/**
 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
 * @data_p: data to verify
 * @sig_p: signature of the data
 * @trusted_keyring: keyring with keys trusted for signature verification
 *
 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
 * with keys in a keyring referenced by *trusted_keyring*.
 *
 * Return: 0 on success, a negative value on error.
 */
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
			       struct bpf_dynptr *sig_p,
			       struct bpf_key *trusted_keyring)
{}
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(key_sig_kfunc_set)
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
BTF_ID_FLAGS()
#endif
BTF_KFUNCS_END()

static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set =;

static int __init bpf_key_sig_kfuncs_init(void)
{}

late_initcall(bpf_key_sig_kfuncs_init);
#endif /* CONFIG_KEYS */

/* filesystem kfuncs */
__bpf_kfunc_start_defs();

/**
 * bpf_get_file_xattr - get xattr of a file
 * @file: file to get xattr from
 * @name__str: name of the xattr
 * @value_p: output buffer of the xattr value
 *
 * Get xattr *name__str* of *file* and store the output in *value_ptr*.
 *
 * For security reasons, only *name__str* with prefix "user." is allowed.
 *
 * Return: 0 on success, a negative value on error.
 */
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
				   struct bpf_dynptr *value_p)
{}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(fs_kfunc_set_ids)
BTF_ID_FLAGS()
BTF_KFUNCS_END()

static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
{}

static const struct btf_kfunc_id_set bpf_fs_kfunc_set =;

static int __init bpf_fs_kfuncs_init(void)
{}

late_initcall(bpf_fs_kfuncs_init);

static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

static bool is_kprobe_multi(const struct bpf_prog *prog)
{}

static inline bool is_kprobe_session(const struct bpf_prog *prog)
{}

static const struct bpf_func_proto *
kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
					const struct bpf_prog *prog,
					struct bpf_insn_access_aux *info)
{}

const struct bpf_verifier_ops kprobe_verifier_ops =;

const struct bpf_prog_ops kprobe_prog_ops =;

BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
	   u64, flags, void *, data, u64, size)
{}

static const struct bpf_func_proto bpf_perf_event_output_proto_tp =;

BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
	   u64, flags)
{}

static const struct bpf_func_proto bpf_get_stackid_proto_tp =;

BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
	   u64, flags)
{}

static const struct bpf_func_proto bpf_get_stack_proto_tp =;

static const struct bpf_func_proto *
tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
				    const struct bpf_prog *prog,
				    struct bpf_insn_access_aux *info)
{}

const struct bpf_verifier_ops tracepoint_verifier_ops =;

const struct bpf_prog_ops tracepoint_prog_ops =;

BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
	   struct bpf_perf_event_value *, buf, u32, size)
{}

static const struct bpf_func_proto bpf_perf_prog_read_value_proto =;

BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
	   void *, buf, u32, size, u64, flags)
{}

static const struct bpf_func_proto bpf_read_branch_records_proto =;

static const struct bpf_func_proto *
pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

/*
 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
 * to avoid potential recursive reuse issue when/if tracepoints are added
 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
 *
 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
 * in normal, irq, and nmi context.
 */
struct bpf_raw_tp_regs {};
static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
static struct pt_regs *get_bpf_raw_tp_regs(void)
{}

static void put_bpf_raw_tp_regs(void)
{}

BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
{}

static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp =;

extern const struct bpf_func_proto bpf_skb_output_proto;
extern const struct bpf_func_proto bpf_xdp_output_proto;
extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;

BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
	   struct bpf_map *, map, u64, flags)
{}

static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp =;

BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
	   void *, buf, u32, size, u64, flags)
{}

static const struct bpf_func_proto bpf_get_stack_proto_raw_tp =;

static const struct bpf_func_proto *
raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

const struct bpf_func_proto *
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

static bool raw_tp_prog_is_valid_access(int off, int size,
					enum bpf_access_type type,
					const struct bpf_prog *prog,
					struct bpf_insn_access_aux *info)
{}

static bool tracing_prog_is_valid_access(int off, int size,
					 enum bpf_access_type type,
					 const struct bpf_prog *prog,
					 struct bpf_insn_access_aux *info)
{}

int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr)
{}

const struct bpf_verifier_ops raw_tracepoint_verifier_ops =;

const struct bpf_prog_ops raw_tracepoint_prog_ops =;

const struct bpf_verifier_ops tracing_verifier_ops =;

const struct bpf_prog_ops tracing_prog_ops =;

static bool raw_tp_writable_prog_is_valid_access(int off, int size,
						 enum bpf_access_type type,
						 const struct bpf_prog *prog,
						 struct bpf_insn_access_aux *info)
{}

const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops =;

const struct bpf_prog_ops raw_tracepoint_writable_prog_ops =;

static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
				    const struct bpf_prog *prog,
				    struct bpf_insn_access_aux *info)
{}

static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
				      const struct bpf_insn *si,
				      struct bpf_insn *insn_buf,
				      struct bpf_prog *prog, u32 *target_size)
{}

const struct bpf_verifier_ops perf_event_verifier_ops =;

const struct bpf_prog_ops perf_event_prog_ops =;

static DEFINE_MUTEX(bpf_event_mutex);

#define BPF_TRACE_MAX_PROGS

int perf_event_attach_bpf_prog(struct perf_event *event,
			       struct bpf_prog *prog,
			       u64 bpf_cookie)
{}

void perf_event_detach_bpf_prog(struct perf_event *event)
{}

int perf_event_query_prog_array(struct perf_event *event, void __user *info)
{}

extern struct bpf_raw_event_map __start__bpf_raw_tp[];
extern struct bpf_raw_event_map __stop__bpf_raw_tp[];

struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{}

void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
{}

static __always_inline
void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
{}

#define UNPACK(...)
#define REPEAT_1(FN, DL, X, ...)
#define REPEAT_2(FN, DL, X, ...)
#define REPEAT_3(FN, DL, X, ...)
#define REPEAT_4(FN, DL, X, ...)
#define REPEAT_5(FN, DL, X, ...)
#define REPEAT_6(FN, DL, X, ...)
#define REPEAT_7(FN, DL, X, ...)
#define REPEAT_8(FN, DL, X, ...)
#define REPEAT_9(FN, DL, X, ...)
#define REPEAT_10(FN, DL, X, ...)
#define REPEAT_11(FN, DL, X, ...)
#define REPEAT_12(FN, DL, X, ...)
#define REPEAT(X, FN, DL, ...)

#define SARG(X)
#define COPY(X)

#define __DL_COM
#define __DL_SEM

#define __SEQ_0_11

#define BPF_TRACE_DEFN_x(x)
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();
BPF_TRACE_DEFN_x();

int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{}

int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{}

int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
			    u32 *fd_type, const char **buf,
			    u64 *probe_offset, u64 *probe_addr,
			    unsigned long *missed)
{}

static int __init send_signal_irq_work_init(void)
{}

subsys_initcall(send_signal_irq_work_init);

#ifdef CONFIG_MODULES
static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
			    void *module)
{}

static struct notifier_block bpf_module_nb =;

static int __init bpf_event_init(void)
{}

fs_initcall(bpf_event_init);
#endif /* CONFIG_MODULES */

struct bpf_session_run_ctx {};

#ifdef CONFIG_FPROBE
struct bpf_kprobe_multi_link {};

struct bpf_kprobe_multi_run_ctx {};

struct user_syms {};

static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
{}

static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
{}

static void free_user_syms(struct user_syms *us)
{}

static void bpf_kprobe_multi_link_release(struct bpf_link *link)
{}

static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
{}

static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
						struct bpf_link_info *info)
{}

static const struct bpf_link_ops bpf_kprobe_multi_link_lops =;

static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
{}

static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
{}

static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
{}

static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
{}

static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{}

static int
kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
			   unsigned long entry_ip, struct pt_regs *regs,
			   bool is_return, void *data)
{}

static int
kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
			  unsigned long ret_ip, struct pt_regs *regs,
			  void *data)
{}

static void
kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
			       unsigned long ret_ip, struct pt_regs *regs,
			       void *data)
{}

static int symbols_cmp_r(const void *a, const void *b, const void *priv)
{}

struct multi_symbols_sort {};

static void symbols_swap_r(void *a, void *b, int size, const void *priv)
{}

struct modules_array {};

static int add_module(struct modules_array *arr, struct module *mod)
{}

static bool has_module(struct modules_array *arr, struct module *mod)
{}

static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
{}

static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
{}

int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{}
#else /* !CONFIG_FPROBE */
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
	return -EOPNOTSUPP;
}
static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
{
	return 0;
}
static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{
	return 0;
}
#endif

#ifdef CONFIG_UPROBES
struct bpf_uprobe_multi_link;

struct bpf_uprobe {};

struct bpf_uprobe_multi_link {};

struct bpf_uprobe_multi_run_ctx {};

static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
				  u32 cnt)
{}

static void bpf_uprobe_multi_link_release(struct bpf_link *link)
{}

static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
{}

static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
						struct bpf_link_info *info)
{}

static const struct bpf_link_ops bpf_uprobe_multi_link_lops =;

static int uprobe_prog_run(struct bpf_uprobe *uprobe,
			   unsigned long entry_ip,
			   struct pt_regs *regs)
{}

static bool
uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
			 struct mm_struct *mm)
{}

static int
uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
{}

static int
uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
{}

static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{}

static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
{}

int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{}
#else /* !CONFIG_UPROBES */
int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
	return -EOPNOTSUPP;
}
static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
{
	return 0;
}
static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{
	return 0;
}
#endif /* CONFIG_UPROBES */

__bpf_kfunc_start_defs();

__bpf_kfunc bool bpf_session_is_return(void)
{}

__bpf_kfunc __u64 *bpf_session_cookie(void)
{}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_KFUNCS_END()

static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
{}

static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set =;

static int __init bpf_kprobe_multi_kfuncs_init(void)
{}

late_initcall(bpf_kprobe_multi_kfuncs_init);