#include <uapi/linux/btf.h>
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/objtool.h>
#include <linux/overflow.h>
#include <linux/rbtree_latch.h>
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/log2.h>
#include <linux/bpf_verifier.h>
#include <linux/nodemask.h>
#include <linux/nospec.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/memcontrol.h>
#include <linux/execmem.h>
#include <asm/barrier.h>
#include <asm/unaligned.h>
#define BPF_R0 …
#define BPF_R1 …
#define BPF_R2 …
#define BPF_R3 …
#define BPF_R4 …
#define BPF_R5 …
#define BPF_R6 …
#define BPF_R7 …
#define BPF_R8 …
#define BPF_R9 …
#define BPF_R10 …
#define DST …
#define SRC …
#define FP …
#define AX …
#define ARG1 …
#define CTX …
#define OFF …
#define IMM …
struct bpf_mem_alloc bpf_global_ma;
bool bpf_global_ma_set;
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
{ … }
enum page_size_enum { … };
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{ … }
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
{ … }
EXPORT_SYMBOL_GPL(…);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
{ … }
void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
{ … }
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off)
{ … }
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{ … }
void __bpf_prog_free(struct bpf_prog *fp)
{ … }
int bpf_prog_calc_tag(struct bpf_prog *fp)
{ … }
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, s32 curr, const bool probe_pass)
{ … }
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, s32 curr, const bool probe_pass)
{ … }
static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
s32 end_new, const bool probe_pass)
{ … }
static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
{ … }
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len)
{ … }
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
{ … }
static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{ … }
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
{ … }
#ifdef CONFIG_BPF_JIT
int bpf_jit_enable __read_mostly = … IS_BUILTIN(…);
int bpf_jit_kallsyms __read_mostly = … IS_BUILTIN(…);
int bpf_jit_harden __read_mostly;
long bpf_jit_limit __read_mostly;
long bpf_jit_limit_max __read_mostly;
static void
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
{ … }
static void
bpf_prog_ksym_set_name(struct bpf_prog *prog)
{ … }
static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
{ … }
static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
struct latch_tree_node *b)
{ … }
static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
{ … }
static const struct latch_tree_ops bpf_tree_ops = …;
static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;
void bpf_ksym_add(struct bpf_ksym *ksym)
{ … }
static void __bpf_ksym_del(struct bpf_ksym *ksym)
{ … }
void bpf_ksym_del(struct bpf_ksym *ksym)
{ … }
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
{ … }
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{ … }
void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{ … }
static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
{ … }
int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{ … }
bool is_bpf_text_address(unsigned long addr)
{ … }
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
{ … }
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{ … }
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{ … }
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke)
{ … }
#define BPF_PROG_CHUNK_SHIFT …
#define BPF_PROG_CHUNK_SIZE …
#define BPF_PROG_CHUNK_MASK …
struct bpf_prog_pack { … };
void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
{ … }
#define BPF_PROG_SIZE_TO_NBITS(size) …
static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);
#ifdef PMD_SIZE
#define BPF_PROG_PACK_SIZE …
#else
#define BPF_PROG_PACK_SIZE …
#endif
#define BPF_PROG_CHUNK_COUNT …
static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ … }
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ … }
void bpf_prog_pack_free(void *ptr, u32 size)
{ … }
static atomic_long_t bpf_jit_current;
u64 __weak bpf_jit_alloc_exec_limit(void)
{ … }
static int __init bpf_jit_charge_init(void)
{ … }
pure_initcall(bpf_jit_charge_init);
int bpf_jit_charge_modmem(u32 size)
{ … }
void bpf_jit_uncharge_modmem(u32 size)
{ … }
void *__weak bpf_jit_alloc_exec(unsigned long size)
{ … }
void __weak bpf_jit_free_exec(void *addr)
{ … }
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ … }
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{ … }
struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
struct bpf_binary_header **rw_header,
u8 **rw_image,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ … }
int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header)
{ … }
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header)
{ … }
struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
{ … }
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
{ … }
void __weak bpf_jit_free(struct bpf_prog *fp)
{ … }
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
{ … }
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff,
bool emit_zext)
{ … }
static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
gfp_t gfp_extra_flags)
{ … }
static void bpf_prog_clone_free(struct bpf_prog *fp)
{ … }
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
{ … }
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
{ … }
#endif
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{ … }
EXPORT_SYMBOL_GPL(…);
#define BPF_INSN_MAP(INSN_2, INSN_3) …
bool bpf_opcode_in_insntable(u8 code)
{ … }
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
{
#define BPF_INSN_2_LBL …
#define BPF_INSN_3_LBL …
static const void * const jumptable[256] __annotate_jump_table = {
[0 ... 255] = &&default_label,
BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
[BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
};
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
u32 tail_call_cnt = 0;
#define CONT …
#define CONT_JMP …
select_insn:
goto *jumptable[insn->code];
#define SHT …
#define ALU …
ALU(ADD, +)
ALU(SUB, -)
ALU(AND, &)
ALU(OR, |)
ALU(XOR, ^)
ALU(MUL, *)
SHT(LSH, <<)
SHT(RSH, >>)
#undef SHT
#undef ALU
ALU_NEG:
DST = (u32) -DST;
CONT;
ALU64_NEG:
DST = -DST;
CONT;
ALU_MOV_X:
switch (OFF) {
case 0:
DST = (u32) SRC;
break;
case 8:
DST = (u32)(s8) SRC;
break;
case 16:
DST = (u32)(s16) SRC;
break;
}
CONT;
ALU_MOV_K:
DST = (u32) IMM;
CONT;
ALU64_MOV_X:
switch (OFF) {
case 0:
DST = SRC;
break;
case 8:
DST = (s8) SRC;
break;
case 16:
DST = (s16) SRC;
break;
case 32:
DST = (s32) SRC;
break;
}
CONT;
ALU64_MOV_K:
DST = IMM;
CONT;
LD_IMM_DW:
DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
insn++;
CONT;
ALU_ARSH_X:
DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
CONT;
ALU_ARSH_K:
DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= (SRC & 63);
CONT;
ALU64_ARSH_K:
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
switch (OFF) {
case 0:
div64_u64_rem(DST, SRC, &AX);
DST = AX;
break;
case 1:
AX = div64_s64(DST, SRC);
DST = DST - AX * SRC;
break;
}
CONT;
ALU_MOD_X:
switch (OFF) {
case 0:
AX = (u32) DST;
DST = do_div(AX, (u32) SRC);
break;
case 1:
AX = abs((s32)DST);
AX = do_div(AX, abs((s32)SRC));
if ((s32)DST < 0)
DST = (u32)-AX;
else
DST = (u32)AX;
break;
}
CONT;
ALU64_MOD_K:
switch (OFF) {
case 0:
div64_u64_rem(DST, IMM, &AX);
DST = AX;
break;
case 1:
AX = div64_s64(DST, IMM);
DST = DST - AX * IMM;
break;
}
CONT;
ALU_MOD_K:
switch (OFF) {
case 0:
AX = (u32) DST;
DST = do_div(AX, (u32) IMM);
break;
case 1:
AX = abs((s32)DST);
AX = do_div(AX, abs((s32)IMM));
if ((s32)DST < 0)
DST = (u32)-AX;
else
DST = (u32)AX;
break;
}
CONT;
ALU64_DIV_X:
switch (OFF) {
case 0:
DST = div64_u64(DST, SRC);
break;
case 1:
DST = div64_s64(DST, SRC);
break;
}
CONT;
ALU_DIV_X:
switch (OFF) {
case 0:
AX = (u32) DST;
do_div(AX, (u32) SRC);
DST = (u32) AX;
break;
case 1:
AX = abs((s32)DST);
do_div(AX, abs((s32)SRC));
if (((s32)DST < 0) == ((s32)SRC < 0))
DST = (u32)AX;
else
DST = (u32)-AX;
break;
}
CONT;
ALU64_DIV_K:
switch (OFF) {
case 0:
DST = div64_u64(DST, IMM);
break;
case 1:
DST = div64_s64(DST, IMM);
break;
}
CONT;
ALU_DIV_K:
switch (OFF) {
case 0:
AX = (u32) DST;
do_div(AX, (u32) IMM);
DST = (u32) AX;
break;
case 1:
AX = abs((s32)DST);
do_div(AX, abs((s32)IMM));
if (((s32)DST < 0) == ((s32)IMM < 0))
DST = (u32)AX;
else
DST = (u32)-AX;
break;
}
CONT;
ALU_END_TO_BE:
switch (IMM) {
case 16:
DST = (__force u16) cpu_to_be16(DST);
break;
case 32:
DST = (__force u32) cpu_to_be32(DST);
break;
case 64:
DST = (__force u64) cpu_to_be64(DST);
break;
}
CONT;
ALU_END_TO_LE:
switch (IMM) {
case 16:
DST = (__force u16) cpu_to_le16(DST);
break;
case 32:
DST = (__force u32) cpu_to_le32(DST);
break;
case 64:
DST = (__force u64) cpu_to_le64(DST);
break;
}
CONT;
ALU64_END_TO_LE:
switch (IMM) {
case 16:
DST = (__force u16) __swab16(DST);
break;
case 32:
DST = (__force u32) __swab32(DST);
break;
case 64:
DST = (__force u64) __swab64(DST);
break;
}
CONT;
JMP_CALL:
BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
BPF_R4, BPF_R5);
CONT;
JMP_CALL_ARGS:
BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
BPF_R3, BPF_R4,
BPF_R5,
insn + insn->off + 1);
CONT;
JMP_TAIL_CALL: {
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog;
u32 index = BPF_R3;
if (unlikely(index >= array->map.max_entries))
goto out;
if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
goto out;
tail_call_cnt++;
prog = READ_ONCE(array->ptrs[index]);
if (!prog)
goto out;
insn = prog->insnsi;
goto select_insn;
out:
CONT;
}
JMP_JA:
insn += insn->off;
CONT;
JMP32_JA:
insn += insn->imm;
CONT;
JMP_EXIT:
return BPF_R0;
#define COND_JMP …
COND_JMP(u, JEQ, ==)
COND_JMP(u, JNE, !=)
COND_JMP(u, JGT, >)
COND_JMP(u, JLT, <)
COND_JMP(u, JGE, >=)
COND_JMP(u, JLE, <=)
COND_JMP(u, JSET, &)
COND_JMP(s, JSGT, >)
COND_JMP(s, JSLT, <)
COND_JMP(s, JSGE, >=)
COND_JMP(s, JSLE, <=)
#undef COND_JMP
ST_NOSPEC:
barrier_nospec();
CONT;
#define LDST …
LDST(B, u8)
LDST(H, u16)
LDST(W, u32)
LDST(DW, u64)
#undef LDST
#define LDSX …
LDSX(B, s8)
LDSX(H, s16)
LDSX(W, s32)
#undef LDSX
#define ATOMIC_ALU_OP …
STX_ATOMIC_DW:
STX_ATOMIC_W:
switch (IMM) {
ATOMIC_ALU_OP(BPF_ADD, add)
ATOMIC_ALU_OP(BPF_AND, and)
ATOMIC_ALU_OP(BPF_OR, or)
ATOMIC_ALU_OP(BPF_XOR, xor)
#undef ATOMIC_ALU_OP
case BPF_XCHG:
if (BPF_SIZE(insn->code) == BPF_W)
SRC = (u32) atomic_xchg(
(atomic_t *)(unsigned long) (DST + insn->off),
(u32) SRC);
else
SRC = (u64) atomic64_xchg(
(atomic64_t *)(unsigned long) (DST + insn->off),
(u64) SRC);
break;
case BPF_CMPXCHG:
if (BPF_SIZE(insn->code) == BPF_W)
BPF_R0 = (u32) atomic_cmpxchg(
(atomic_t *)(unsigned long) (DST + insn->off),
(u32) BPF_R0, (u32) SRC);
else
BPF_R0 = (u64) atomic64_cmpxchg(
(atomic64_t *)(unsigned long) (DST + insn->off),
(u64) BPF_R0, (u64) SRC);
break;
default:
goto default_label;
}
CONT;
default_label:
pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
insn->code, insn->imm);
BUG_ON(1);
return 0;
}
#define PROG_NAME …
#define DEFINE_BPF_PROG_RUN …
#define PROG_NAME_ARGS …
#define DEFINE_BPF_PROG_RUN_ARGS …
#define EVAL1 …
#define EVAL2 …
#define EVAL3 …
#define EVAL4 …
#define EVAL5 …
#define EVAL6 …
EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
#define PROG_NAME_LIST …
static unsigned int (*interpreters[])(const void *ctx,
const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
#define PROG_NAME_LIST …
static __maybe_unused
u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
#ifdef CONFIG_BPF_SYSCALL
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
{
stack_depth = max_t(u32, stack_depth, 1);
insn->off = (s16) insn->imm;
insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
__bpf_call_base_args;
insn->code = BPF_JMP | BPF_CALL_ARGS;
}
#endif
#else
static unsigned int __bpf_prog_ret0_warn(const void *ctx,
const struct bpf_insn *insn)
{ … }
#endif
bool bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp)
{ … }
static int bpf_check_tail_call(const struct bpf_prog *fp)
{ … }
static void bpf_prog_select_func(struct bpf_prog *fp)
{ … }
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{ … }
EXPORT_SYMBOL_GPL(…);
static unsigned int __bpf_prog_ret1(const void *ctx,
const struct bpf_insn *insn)
{ … }
static struct bpf_prog_dummy { … } dummy_bpf_prog = …;
struct bpf_empty_prog_array bpf_empty_prog_array = …;
EXPORT_SYMBOL(…);
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
{ … }
void bpf_prog_array_free(struct bpf_prog_array *progs)
{ … }
static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
{ … }
void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
{ … }
int bpf_prog_array_length(struct bpf_prog_array *array)
{ … }
bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
{ … }
static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
u32 *prog_ids,
u32 request_cnt)
{ … }
int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
__u32 __user *prog_ids, u32 cnt)
{ … }
void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
struct bpf_prog *old_prog)
{ … }
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
{ … }
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
struct bpf_prog *prog)
{ … }
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
u64 bpf_cookie,
struct bpf_prog_array **new_array)
{ … }
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt)
{ … }
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
struct bpf_map **used_maps, u32 len)
{ … }
static void bpf_free_used_maps(struct bpf_prog_aux *aux)
{ … }
void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
{ … }
static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
{ … }
static void bpf_prog_free_deferred(struct work_struct *work)
{ … }
void bpf_prog_free(struct bpf_prog *fp)
{ … }
EXPORT_SYMBOL_GPL(…);
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
void bpf_user_rnd_init_once(void)
{ … }
BPF_CALL_0(bpf_user_rnd_u32)
{ … }
BPF_CALL_0(bpf_get_raw_cpu_id)
{ … }
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
const struct bpf_func_proto bpf_map_push_elem_proto __weak;
const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
const struct bpf_func_proto bpf_spin_lock_proto __weak;
const struct bpf_func_proto bpf_spin_unlock_proto __weak;
const struct bpf_func_proto bpf_jiffies64_proto __weak;
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
const struct bpf_func_proto bpf_set_retval_proto __weak;
const struct bpf_func_proto bpf_get_retval_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{ … }
const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
{ … }
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{ … }
EXPORT_SYMBOL_GPL(…);
const struct bpf_func_proto bpf_tail_call_proto = …;
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
{ … }
void __weak bpf_jit_compile(struct bpf_prog *prog)
{ … }
bool __weak bpf_helper_changes_pkt_data(void *func)
{ … }
bool __weak bpf_jit_needs_zext(void)
{ … }
bool __weak bpf_jit_inlines_helper_call(s32 imm)
{ … }
bool __weak bpf_jit_supports_subprog_tailcalls(void)
{ … }
bool __weak bpf_jit_supports_percpu_insn(void)
{ … }
bool __weak bpf_jit_supports_kfunc_call(void)
{ … }
bool __weak bpf_jit_supports_far_kfunc_call(void)
{ … }
bool __weak bpf_jit_supports_arena(void)
{ … }
bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
{ … }
u64 __weak bpf_arch_uaddress_limit(void)
{ … }
bool __weak bpf_jit_supports_ptr_xchg(void)
{ … }
int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
int len)
{ … }
int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2)
{ … }
void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
{ … }
int __weak bpf_arch_text_invalidate(void *dst, size_t len)
{ … }
bool __weak bpf_jit_supports_exceptions(void)
{ … }
void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
{ … }
__weak const struct bpf_map_ops arena_map_ops;
__weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
{ … }
__weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
{ … }
#ifdef CONFIG_BPF_SYSCALL
static int __init bpf_global_ma_init(void)
{ … }
late_initcall(bpf_global_ma_init);
#endif
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL(…);
#define CREATE_TRACE_POINTS
#include <linux/bpf_trace.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);