linux/kernel/bpf/core.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Linux Socket Filter - Kernel level socket filtering
 *
 * Based on the design of the Berkeley Packet Filter. The new
 * internal format has been designed by PLUMgrid:
 *
 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
 *
 * Authors:
 *
 *	Jay Schulist <[email protected]>
 *	Alexei Starovoitov <[email protected]>
 *	Daniel Borkmann <[email protected]>
 *
 * Andi Kleen - Fix a few bad bugs and races.
 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
 */

#include <uapi/linux/btf.h>
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/objtool.h>
#include <linux/overflow.h>
#include <linux/rbtree_latch.h>
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/log2.h>
#include <linux/bpf_verifier.h>
#include <linux/nodemask.h>
#include <linux/nospec.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/memcontrol.h>
#include <linux/execmem.h>

#include <asm/barrier.h>
#include <asm/unaligned.h>

/* Registers */
#define BPF_R0
#define BPF_R1
#define BPF_R2
#define BPF_R3
#define BPF_R4
#define BPF_R5
#define BPF_R6
#define BPF_R7
#define BPF_R8
#define BPF_R9
#define BPF_R10

/* Named registers */
#define DST
#define SRC
#define FP
#define AX
#define ARG1
#define CTX
#define OFF
#define IMM

struct bpf_mem_alloc bpf_global_ma;
bool bpf_global_ma_set;

/* No hurry in this branch
 *
 * Exported for the bpf jit load helper.
 */
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
{}

/* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
enum page_size_enum {};

struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{}

struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
{}
EXPORT_SYMBOL_GPL();

int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
{}

void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
{}

/* The jit engine is responsible to provide an array
 * for insn_off to the jited_off mapping (insn_to_jit_off).
 *
 * The idx to this array is the insn_off.  Hence, the insn_off
 * here is relative to the prog itself instead of the main prog.
 * This array has one entry for each xlated bpf insn.
 *
 * jited_off is the byte off to the end of the jited insn.
 *
 * Hence, with
 * insn_start:
 *      The first bpf insn off of the prog.  The insn off
 *      here is relative to the main prog.
 *      e.g. if prog is a subprog, insn_start > 0
 * linfo_idx:
 *      The prog's idx to prog->aux->linfo and jited_linfo
 *
 * jited_linfo[linfo_idx] = prog->bpf_func
 *
 * For i > linfo_idx,
 *
 * jited_linfo[i] = prog->bpf_func +
 *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 */
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
			       const u32 *insn_to_jit_off)
{}

struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
				  gfp_t gfp_extra_flags)
{}

void __bpf_prog_free(struct bpf_prog *fp)
{}

int bpf_prog_calc_tag(struct bpf_prog *fp)
{}

static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
				s32 end_new, s32 curr, const bool probe_pass)
{}

static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
				s32 end_new, s32 curr, const bool probe_pass)
{}

static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
			    s32 end_new, const bool probe_pass)
{}

static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
{}

struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
				       const struct bpf_insn *patch, u32 len)
{}

int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
{}

static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{}

void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
{}

#ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable   __read_mostly = IS_BUILTIN();
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN();
int bpf_jit_harden   __read_mostly;
long bpf_jit_limit   __read_mostly;
long bpf_jit_limit_max __read_mostly;

static void
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
{}

static void
bpf_prog_ksym_set_name(struct bpf_prog *prog)
{}

static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
{}

static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
					  struct latch_tree_node *b)
{}

static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
{}

static const struct latch_tree_ops bpf_tree_ops =;

static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;

void bpf_ksym_add(struct bpf_ksym *ksym)
{}

static void __bpf_ksym_del(struct bpf_ksym *ksym)
{}

void bpf_ksym_del(struct bpf_ksym *ksym)
{}

static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
{}

void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{}

void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{}

static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
{}

int __bpf_address_lookup(unsigned long addr, unsigned long *size,
				 unsigned long *off, char *sym)
{}

bool is_bpf_text_address(unsigned long addr)
{}

struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
{}

const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{}

int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
		    char *sym)
{}

int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
				struct bpf_jit_poke_descriptor *poke)
{}

/*
 * BPF program pack allocator.
 *
 * Most BPF programs are pretty small. Allocating a hole page for each
 * program is sometime a waste. Many small bpf program also adds pressure
 * to instruction TLB. To solve this issue, we introduce a BPF program pack
 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
 * to host BPF programs.
 */
#define BPF_PROG_CHUNK_SHIFT
#define BPF_PROG_CHUNK_SIZE
#define BPF_PROG_CHUNK_MASK

struct bpf_prog_pack {};

void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
{}

#define BPF_PROG_SIZE_TO_NBITS(size)

static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);

/* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
 */
#ifdef PMD_SIZE
/* PMD_SIZE is really big for some archs. It doesn't make sense to
 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
 * greater than or equal to 2MB.
 */
#define BPF_PROG_PACK_SIZE
#else
#define BPF_PROG_PACK_SIZE
#endif

#define BPF_PROG_CHUNK_COUNT

static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{}

void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
{}

void bpf_prog_pack_free(void *ptr, u32 size)
{}

static atomic_long_t bpf_jit_current;

/* Can be overridden by an arch's JIT compiler if it has a custom,
 * dedicated BPF backend memory area, or if neither of the two
 * below apply.
 */
u64 __weak bpf_jit_alloc_exec_limit(void)
{}

static int __init bpf_jit_charge_init(void)
{}
pure_initcall(bpf_jit_charge_init);

int bpf_jit_charge_modmem(u32 size)
{}

void bpf_jit_uncharge_modmem(u32 size)
{}

void *__weak bpf_jit_alloc_exec(unsigned long size)
{}

void __weak bpf_jit_free_exec(void *addr)
{}

struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
		     unsigned int alignment,
		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
{}

void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{}

/* Allocate jit binary from bpf_prog_pack allocator.
 * Since the allocated memory is RO+X, the JIT engine cannot write directly
 * to the memory. To solve this problem, a RW buffer is also allocated at
 * as the same time. The JIT engine should calculate offsets based on the
 * RO memory address, but write JITed program to the RW buffer. Once the
 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
 * the JITed program to the RO memory.
 */
struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
			  unsigned int alignment,
			  struct bpf_binary_header **rw_header,
			  u8 **rw_image,
			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
{}

/* Copy JITed text from rw_header to its final location, the ro_header. */
int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
				 struct bpf_binary_header *rw_header)
{}

/* bpf_jit_binary_pack_free is called in two different scenarios:
 *   1) when the program is freed after;
 *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
 * For case 2), we need to free both the RO memory and the RW buffer.
 *
 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
 * bpf_arch_text_copy (when jit fails).
 */
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
			      struct bpf_binary_header *rw_header)
{}

struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
{}

static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
{}

/* This symbol is only overridden by archs that have different
 * requirements than the usual eBPF JITs, f.e. when they only
 * implement cBPF JIT, do not set images read-only, etc.
 */
void __weak bpf_jit_free(struct bpf_prog *fp)
{}

int bpf_jit_get_func_addr(const struct bpf_prog *prog,
			  const struct bpf_insn *insn, bool extra_pass,
			  u64 *func_addr, bool *func_addr_fixed)
{}

static int bpf_jit_blind_insn(const struct bpf_insn *from,
			      const struct bpf_insn *aux,
			      struct bpf_insn *to_buff,
			      bool emit_zext)
{}

static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
					      gfp_t gfp_extra_flags)
{}

static void bpf_prog_clone_free(struct bpf_prog *fp)
{}

void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
{}

struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
{}
#endif /* CONFIG_BPF_JIT */

/* Base function for offset calculation. Needs to go into .text section,
 * therefore keeping it non-static as well; will also be used by JITs
 * anyway later on, so do not let the compiler omit it. This also needs
 * to go into kallsyms for correlation from e.g. bpftool, so naming
 * must not change.
 */
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{}
EXPORT_SYMBOL_GPL();

/* All UAPI available opcodes. */
#define BPF_INSN_MAP(INSN_2, INSN_3)

bool bpf_opcode_in_insntable(u8 code)
{}

#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
 *	___bpf_prog_run - run eBPF program on a given context
 *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
 *	@insn: is the array of eBPF instructions
 *
 * Decode and execute eBPF instructions.
 *
 * Return: whatever value is in %BPF_R0 at program exit
 */
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
{
#define BPF_INSN_2_LBL
#define BPF_INSN_3_LBL
	static const void * const jumptable[256] __annotate_jump_table = {
		[0 ... 255] = &&default_label,
		/* Now overwrite non-defaults ... */
		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
		/* Non-UAPI available opcodes. */
		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
	};
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
	u32 tail_call_cnt = 0;

#define CONT
#define CONT_JMP

select_insn:
	goto *jumptable[insn->code];

	/* Explicitly mask the register-based shift amounts with 63 or 31
	 * to avoid undefined behavior. Normally this won't affect the
	 * generated code, for example, in case of native 64 bit archs such
	 * as x86-64 or arm64, the compiler is optimizing the AND away for
	 * the interpreter. In case of JITs, each of the JIT backends compiles
	 * the BPF shift operations to machine instructions which produce
	 * implementation-defined results in such a case; the resulting
	 * contents of the register may be arbitrary, but program behaviour
	 * as a whole remains defined. In other words, in case of JIT backends,
	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
	 */
	/* ALU (shifts) */
#define SHT
	/* ALU (rest) */
#define ALU
	ALU(ADD,  +)
	ALU(SUB,  -)
	ALU(AND,  &)
	ALU(OR,   |)
	ALU(XOR,  ^)
	ALU(MUL,  *)
	SHT(LSH, <<)
	SHT(RSH, >>)
#undef SHT
#undef ALU
	ALU_NEG:
		DST = (u32) -DST;
		CONT;
	ALU64_NEG:
		DST = -DST;
		CONT;
	ALU_MOV_X:
		switch (OFF) {
		case 0:
			DST = (u32) SRC;
			break;
		case 8:
			DST = (u32)(s8) SRC;
			break;
		case 16:
			DST = (u32)(s16) SRC;
			break;
		}
		CONT;
	ALU_MOV_K:
		DST = (u32) IMM;
		CONT;
	ALU64_MOV_X:
		switch (OFF) {
		case 0:
			DST = SRC;
			break;
		case 8:
			DST = (s8) SRC;
			break;
		case 16:
			DST = (s16) SRC;
			break;
		case 32:
			DST = (s32) SRC;
			break;
		}
		CONT;
	ALU64_MOV_K:
		DST = IMM;
		CONT;
	LD_IMM_DW:
		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
		insn++;
		CONT;
	ALU_ARSH_X:
		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
		CONT;
	ALU_ARSH_K:
		DST = (u64) (u32) (((s32) DST) >> IMM);
		CONT;
	ALU64_ARSH_X:
		(*(s64 *) &DST) >>= (SRC & 63);
		CONT;
	ALU64_ARSH_K:
		(*(s64 *) &DST) >>= IMM;
		CONT;
	ALU64_MOD_X:
		switch (OFF) {
		case 0:
			div64_u64_rem(DST, SRC, &AX);
			DST = AX;
			break;
		case 1:
			AX = div64_s64(DST, SRC);
			DST = DST - AX * SRC;
			break;
		}
		CONT;
	ALU_MOD_X:
		switch (OFF) {
		case 0:
			AX = (u32) DST;
			DST = do_div(AX, (u32) SRC);
			break;
		case 1:
			AX = abs((s32)DST);
			AX = do_div(AX, abs((s32)SRC));
			if ((s32)DST < 0)
				DST = (u32)-AX;
			else
				DST = (u32)AX;
			break;
		}
		CONT;
	ALU64_MOD_K:
		switch (OFF) {
		case 0:
			div64_u64_rem(DST, IMM, &AX);
			DST = AX;
			break;
		case 1:
			AX = div64_s64(DST, IMM);
			DST = DST - AX * IMM;
			break;
		}
		CONT;
	ALU_MOD_K:
		switch (OFF) {
		case 0:
			AX = (u32) DST;
			DST = do_div(AX, (u32) IMM);
			break;
		case 1:
			AX = abs((s32)DST);
			AX = do_div(AX, abs((s32)IMM));
			if ((s32)DST < 0)
				DST = (u32)-AX;
			else
				DST = (u32)AX;
			break;
		}
		CONT;
	ALU64_DIV_X:
		switch (OFF) {
		case 0:
			DST = div64_u64(DST, SRC);
			break;
		case 1:
			DST = div64_s64(DST, SRC);
			break;
		}
		CONT;
	ALU_DIV_X:
		switch (OFF) {
		case 0:
			AX = (u32) DST;
			do_div(AX, (u32) SRC);
			DST = (u32) AX;
			break;
		case 1:
			AX = abs((s32)DST);
			do_div(AX, abs((s32)SRC));
			if (((s32)DST < 0) == ((s32)SRC < 0))
				DST = (u32)AX;
			else
				DST = (u32)-AX;
			break;
		}
		CONT;
	ALU64_DIV_K:
		switch (OFF) {
		case 0:
			DST = div64_u64(DST, IMM);
			break;
		case 1:
			DST = div64_s64(DST, IMM);
			break;
		}
		CONT;
	ALU_DIV_K:
		switch (OFF) {
		case 0:
			AX = (u32) DST;
			do_div(AX, (u32) IMM);
			DST = (u32) AX;
			break;
		case 1:
			AX = abs((s32)DST);
			do_div(AX, abs((s32)IMM));
			if (((s32)DST < 0) == ((s32)IMM < 0))
				DST = (u32)AX;
			else
				DST = (u32)-AX;
			break;
		}
		CONT;
	ALU_END_TO_BE:
		switch (IMM) {
		case 16:
			DST = (__force u16) cpu_to_be16(DST);
			break;
		case 32:
			DST = (__force u32) cpu_to_be32(DST);
			break;
		case 64:
			DST = (__force u64) cpu_to_be64(DST);
			break;
		}
		CONT;
	ALU_END_TO_LE:
		switch (IMM) {
		case 16:
			DST = (__force u16) cpu_to_le16(DST);
			break;
		case 32:
			DST = (__force u32) cpu_to_le32(DST);
			break;
		case 64:
			DST = (__force u64) cpu_to_le64(DST);
			break;
		}
		CONT;
	ALU64_END_TO_LE:
		switch (IMM) {
		case 16:
			DST = (__force u16) __swab16(DST);
			break;
		case 32:
			DST = (__force u32) __swab32(DST);
			break;
		case 64:
			DST = (__force u64) __swab64(DST);
			break;
		}
		CONT;

	/* CALL */
	JMP_CALL:
		/* Function call scratches BPF_R1-BPF_R5 registers,
		 * preserves BPF_R6-BPF_R9, and stores return value
		 * into BPF_R0.
		 */
		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
						       BPF_R4, BPF_R5);
		CONT;

	JMP_CALL_ARGS:
		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
							    BPF_R3, BPF_R4,
							    BPF_R5,
							    insn + insn->off + 1);
		CONT;

	JMP_TAIL_CALL: {
		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
		struct bpf_array *array = container_of(map, struct bpf_array, map);
		struct bpf_prog *prog;
		u32 index = BPF_R3;

		if (unlikely(index >= array->map.max_entries))
			goto out;

		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
			goto out;

		tail_call_cnt++;

		prog = READ_ONCE(array->ptrs[index]);
		if (!prog)
			goto out;

		/* ARG1 at this point is guaranteed to point to CTX from
		 * the verifier side due to the fact that the tail call is
		 * handled like a helper, that is, bpf_tail_call_proto,
		 * where arg1_type is ARG_PTR_TO_CTX.
		 */
		insn = prog->insnsi;
		goto select_insn;
out:
		CONT;
	}
	JMP_JA:
		insn += insn->off;
		CONT;
	JMP32_JA:
		insn += insn->imm;
		CONT;
	JMP_EXIT:
		return BPF_R0;
	/* JMP */
#define COND_JMP
	COND_JMP(u, JEQ, ==)
	COND_JMP(u, JNE, !=)
	COND_JMP(u, JGT, >)
	COND_JMP(u, JLT, <)
	COND_JMP(u, JGE, >=)
	COND_JMP(u, JLE, <=)
	COND_JMP(u, JSET, &)
	COND_JMP(s, JSGT, >)
	COND_JMP(s, JSLT, <)
	COND_JMP(s, JSGE, >=)
	COND_JMP(s, JSLE, <=)
#undef COND_JMP
	/* ST, STX and LDX*/
	ST_NOSPEC:
		/* Speculation barrier for mitigating Speculative Store Bypass.
		 * In case of arm64, we rely on the firmware mitigation as
		 * controlled via the ssbd kernel parameter. Whenever the
		 * mitigation is enabled, it works for all of the kernel code
		 * with no need to provide any additional instructions here.
		 * In case of x86, we use 'lfence' insn for mitigation. We
		 * reuse preexisting logic from Spectre v1 mitigation that
		 * happens to produce the required code on x86 for v4 as well.
		 */
		barrier_nospec();
		CONT;
#define LDST

	LDST(B,   u8)
	LDST(H,  u16)
	LDST(W,  u32)
	LDST(DW, u64)
#undef LDST

#define LDSX

	LDSX(B,   s8)
	LDSX(H,  s16)
	LDSX(W,  s32)
#undef LDSX

#define ATOMIC_ALU_OP

	STX_ATOMIC_DW:
	STX_ATOMIC_W:
		switch (IMM) {
		ATOMIC_ALU_OP(BPF_ADD, add)
		ATOMIC_ALU_OP(BPF_AND, and)
		ATOMIC_ALU_OP(BPF_OR, or)
		ATOMIC_ALU_OP(BPF_XOR, xor)
#undef ATOMIC_ALU_OP

		case BPF_XCHG:
			if (BPF_SIZE(insn->code) == BPF_W)
				SRC = (u32) atomic_xchg(
					(atomic_t *)(unsigned long) (DST + insn->off),
					(u32) SRC);
			else
				SRC = (u64) atomic64_xchg(
					(atomic64_t *)(unsigned long) (DST + insn->off),
					(u64) SRC);
			break;
		case BPF_CMPXCHG:
			if (BPF_SIZE(insn->code) == BPF_W)
				BPF_R0 = (u32) atomic_cmpxchg(
					(atomic_t *)(unsigned long) (DST + insn->off),
					(u32) BPF_R0, (u32) SRC);
			else
				BPF_R0 = (u64) atomic64_cmpxchg(
					(atomic64_t *)(unsigned long) (DST + insn->off),
					(u64) BPF_R0, (u64) SRC);
			break;

		default:
			goto default_label;
		}
		CONT;

	default_label:
		/* If we ever reach this, we have a bug somewhere. Die hard here
		 * instead of just returning 0; we could be somewhere in a subprog,
		 * so execution could continue otherwise which we do /not/ want.
		 *
		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
		 */
		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
			insn->code, insn->imm);
		BUG_ON(1);
		return 0;
}

#define PROG_NAME
#define DEFINE_BPF_PROG_RUN

#define PROG_NAME_ARGS
#define DEFINE_BPF_PROG_RUN_ARGS

#define EVAL1
#define EVAL2
#define EVAL3
#define EVAL4
#define EVAL5
#define EVAL6

EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);

EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);

#define PROG_NAME_LIST

static unsigned int (*interpreters[])(const void *ctx,
				      const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
#define PROG_NAME_LIST
static __maybe_unused
u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
			   const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST

#ifdef CONFIG_BPF_SYSCALL
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
{
	stack_depth = max_t(u32, stack_depth, 1);
	insn->off = (s16) insn->imm;
	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
		__bpf_call_base_args;
	insn->code = BPF_JMP | BPF_CALL_ARGS;
}
#endif
#else
static unsigned int __bpf_prog_ret0_warn(const void *ctx,
					 const struct bpf_insn *insn)
{}
#endif

bool bpf_prog_map_compatible(struct bpf_map *map,
			     const struct bpf_prog *fp)
{}

static int bpf_check_tail_call(const struct bpf_prog *fp)
{}

static void bpf_prog_select_func(struct bpf_prog *fp)
{}

/**
 *	bpf_prog_select_runtime - select exec runtime for BPF program
 *	@fp: bpf_prog populated with BPF program
 *	@err: pointer to error variable
 *
 * Try to JIT eBPF program, if JIT is not available, use interpreter.
 * The BPF program will be executed via bpf_prog_run() function.
 *
 * Return: the &fp argument along with &err set to 0 for success or
 * a negative errno code on failure
 */
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{}
EXPORT_SYMBOL_GPL();

static unsigned int __bpf_prog_ret1(const void *ctx,
				    const struct bpf_insn *insn)
{}

static struct bpf_prog_dummy {} dummy_bpf_prog =;

struct bpf_empty_prog_array bpf_empty_prog_array =;
EXPORT_SYMBOL();

struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
{}

void bpf_prog_array_free(struct bpf_prog_array *progs)
{}

static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
{}

void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
{}

int bpf_prog_array_length(struct bpf_prog_array *array)
{}

bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
{}

static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
				     u32 *prog_ids,
				     u32 request_cnt)
{}

int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
				__u32 __user *prog_ids, u32 cnt)
{}

void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
				struct bpf_prog *old_prog)
{}

/**
 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
 *                                   index into the program array with
 *                                   a dummy no-op program.
 * @array: a bpf_prog_array
 * @index: the index of the program to replace
 *
 * Skips over dummy programs, by not counting them, when calculating
 * the position of the program to replace.
 *
 * Return:
 * * 0		- Success
 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
 * * -ENOENT	- Index out of range
 */
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
{}

/**
 * bpf_prog_array_update_at() - Updates the program at the given index
 *                              into the program array.
 * @array: a bpf_prog_array
 * @index: the index of the program to update
 * @prog: the program to insert into the array
 *
 * Skips over dummy programs, by not counting them, when calculating
 * the position of the program to update.
 *
 * Return:
 * * 0		- Success
 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
 * * -ENOENT	- Index out of range
 */
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
			     struct bpf_prog *prog)
{}

int bpf_prog_array_copy(struct bpf_prog_array *old_array,
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
			u64 bpf_cookie,
			struct bpf_prog_array **new_array)
{}

int bpf_prog_array_copy_info(struct bpf_prog_array *array,
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt)
{}

void __bpf_free_used_maps(struct bpf_prog_aux *aux,
			  struct bpf_map **used_maps, u32 len)
{}

static void bpf_free_used_maps(struct bpf_prog_aux *aux)
{}

void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
{}

static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
{}

static void bpf_prog_free_deferred(struct work_struct *work)
{}

void bpf_prog_free(struct bpf_prog *fp)
{}
EXPORT_SYMBOL_GPL();

/* RNG for unprivileged user space with separated state from prandom_u32(). */
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);

void bpf_user_rnd_init_once(void)
{}

BPF_CALL_0(bpf_user_rnd_u32)
{}

BPF_CALL_0(bpf_get_raw_cpu_id)
{}

/* Weak definitions of helper functions in case we don't have bpf syscall. */
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
const struct bpf_func_proto bpf_map_push_elem_proto __weak;
const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
const struct bpf_func_proto bpf_spin_lock_proto __weak;
const struct bpf_func_proto bpf_spin_unlock_proto __weak;
const struct bpf_func_proto bpf_jiffies64_proto __weak;

const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;

const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
const struct bpf_func_proto bpf_set_retval_proto __weak;
const struct bpf_func_proto bpf_get_retval_proto __weak;

const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{}

const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
{}

u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{}
EXPORT_SYMBOL_GPL();

/* Always built-in helper functions. */
const struct bpf_func_proto bpf_tail_call_proto =;

/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
 * It is encouraged to implement bpf_int_jit_compile() instead, so that
 * eBPF and implicitly also cBPF can get JITed!
 */
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
{}

/* Stub for JITs that support eBPF. All cBPF code gets transformed into
 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
 */
void __weak bpf_jit_compile(struct bpf_prog *prog)
{}

bool __weak bpf_helper_changes_pkt_data(void *func)
{}

/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
 * analysis code and wants explicit zero extension inserted by verifier.
 * Otherwise, return FALSE.
 *
 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
 * you don't override this. JITs that don't want these extra insns can detect
 * them using insn_is_zext.
 */
bool __weak bpf_jit_needs_zext(void)
{}

/* Return true if the JIT inlines the call to the helper corresponding to
 * the imm.
 *
 * The verifier will not patch the insn->imm for the call to the helper if
 * this returns true.
 */
bool __weak bpf_jit_inlines_helper_call(s32 imm)
{}

/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bool __weak bpf_jit_supports_subprog_tailcalls(void)
{}

bool __weak bpf_jit_supports_percpu_insn(void)
{}

bool __weak bpf_jit_supports_kfunc_call(void)
{}

bool __weak bpf_jit_supports_far_kfunc_call(void)
{}

bool __weak bpf_jit_supports_arena(void)
{}

bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
{}

u64 __weak bpf_arch_uaddress_limit(void)
{}

/* Return TRUE if the JIT backend satisfies the following two conditions:
 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
 * 2) Under the specific arch, the implementation of xchg() is the same
 *    as atomic_xchg() on pointer-sized words.
 */
bool __weak bpf_jit_supports_ptr_xchg(void)
{}

/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
 */
int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
			 int len)
{}

int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
			      void *addr1, void *addr2)
{}

void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
{}

int __weak bpf_arch_text_invalidate(void *dst, size_t len)
{}

bool __weak bpf_jit_supports_exceptions(void)
{}

void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
{}

/* for configs without MMU or 32-bit */
__weak const struct bpf_map_ops arena_map_ops;
__weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
{}
__weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
{}

#ifdef CONFIG_BPF_SYSCALL
static int __init bpf_global_ma_init(void)
{}
late_initcall(bpf_global_ma_init);
#endif

DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL();

/* All definitions of tracepoints related to BPF. */
#define CREATE_TRACE_POINTS
#include <linux/bpf_trace.h>

EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();