linux/lib/test_bpf.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Testsuite for BPF interpreter and BPF JIT compiler
 *
 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */

#define pr_fmt(fmt)

#include <linux/init.h>
#include <linux/module.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/sched.h>

/* General test specific settings */
#define MAX_SUBTESTS
#define MAX_TESTRUNS
#define MAX_DATA
#define MAX_INSNS
#define MAX_K

/* Few constants used to init test 'skb' */
#define SKB_TYPE
#define SKB_MARK
#define SKB_HASH
#define SKB_QUEUE_MAP
#define SKB_VLAN_TCI
#define SKB_VLAN_PRESENT
#define SKB_DEV_IFINDEX
#define SKB_DEV_TYPE

/* Redefine REGs to make tests less verbose */
#define R0
#define R1
#define R2
#define R3
#define R4
#define R5
#define R6
#define R7
#define R8
#define R9
#define R10

/* Flags that can be passed to test cases */
#define FLAG_NO_DATA
#define FLAG_EXPECTED_FAIL
#define FLAG_SKB_FRAG
#define FLAG_VERIFIER_ZEXT
#define FLAG_LARGE_MEM

enum {};

#define TEST_TYPE_MASK

struct bpf_test {};

/* Large test cases need separate allocation and fill handler. */

static int bpf_fill_maxinsns1(struct bpf_test *self)
{}

static int bpf_fill_maxinsns2(struct bpf_test *self)
{}

static int bpf_fill_maxinsns3(struct bpf_test *self)
{}

static int bpf_fill_maxinsns4(struct bpf_test *self)
{}

static int bpf_fill_maxinsns5(struct bpf_test *self)
{}

static int bpf_fill_maxinsns6(struct bpf_test *self)
{}

static int bpf_fill_maxinsns7(struct bpf_test *self)
{}

static int bpf_fill_maxinsns8(struct bpf_test *self)
{}

static int bpf_fill_maxinsns9(struct bpf_test *self)
{}

static int bpf_fill_maxinsns10(struct bpf_test *self)
{}

static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
			 unsigned int plen)
{}

static int bpf_fill_maxinsns11(struct bpf_test *self)
{}

static int bpf_fill_maxinsns12(struct bpf_test *self)
{}

static int bpf_fill_maxinsns13(struct bpf_test *self)
{}

static int bpf_fill_ja(struct bpf_test *self)
{}

static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{}

static int __bpf_fill_stxdw(struct bpf_test *self, int size)
{}

static int bpf_fill_stxw(struct bpf_test *self)
{}

static int bpf_fill_stxdw(struct bpf_test *self)
{}

static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
{}

/*
 * Branch conversion tests. Complex operations can expand to a lot
 * of instructions when JITed. This in turn may cause jump offsets
 * to overflow the field size of the native instruction, triggering
 * a branch conversion mechanism in some JITs.
 */
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
{}

/* Branch taken by runtime decision */
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{}

/* Branch not taken by runtime decision */
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{}

/* Branch always taken, known at JIT time */
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{}

/* Branch never taken, known at JIT time */
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{}

/* ALU result computation used in tests */
static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
{}

/* Test an ALU shift operation for all valid shift values */
static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
				u8 mode, bool alu32)
{}

static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
{}

/*
 * Test an ALU register shift operation for all valid shift values
 * for the case when the source and destination are the same.
 */
static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
					 bool alu32)
{}

static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
{}

/*
 * Common operand pattern generator for exhaustive power-of-two magnitudes
 * tests. The block size parameters can be adjusted to increase/reduce the
 * number of combinatons tested and thereby execution speed and memory
 * footprint.
 */

static inline s64 value(int msb, int delta, int sign)
{}

static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
			      int dbits, int sbits, int block1, int block2,
			      int (*emit)(struct bpf_test*, void*,
					  struct bpf_insn*, s64, s64))
{}

/*
 * Block size parameters used in pattern tests below. une as needed to
 * increase/reduce the number combinations tested, see following examples.
 *        block   values per operand MSB
 * ----------------------------------------
 *           0     none
 *           1     (1 << MSB)
 *           2     (1 << MSB) + [-1, 0]
 *           3     (1 << MSB) + [-1, 0, 1]
 */
#define PATTERN_BLOCK1
#define PATTERN_BLOCK2

/* Number of test runs for a pattern test */
#define NR_PATTERN_RUNS

/*
 * Exhaustive tests of ALU operations for all combinations of power-of-two
 * magnitudes of the operands, both for positive and negative values. The
 * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
 * emit different code depending on the magnitude of the immediate value.
 */
static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{}

static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{}

static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
{}

static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
{}

static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
{}

static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
{}

/* ALU64 immediate operations */
static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_and_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_or_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_add_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_div_imm(struct bpf_test *self)
{}

static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
{}

/* ALU32 immediate operations */
static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_and_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_or_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_add_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_div_imm(struct bpf_test *self)
{}

static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
{}

/* ALU64 register operations */
static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_and_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_or_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_add_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_div_reg(struct bpf_test *self)
{}

static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
{}

/* ALU32 register operations */
static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_and_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_or_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_add_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_div_reg(struct bpf_test *self)
{}

static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
{}

/*
 * Test JITs that implement complex ALU operations as function
 * calls, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
{}

/* ALU64 K registers */
static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
{}

/* ALU32 K registers */
static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
{}

static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
{}

/*
 * Test JITs that implement complex ALU operations as function
 * calls, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
{}

/* ALU64 X register combinations */
static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
{}

/* ALU32 X register combinations */
static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
{}

/*
 * Exhaustive tests of atomic operations for all power-of-two operand
 * magnitudes, both for positive and negative values.
 */

static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
			       struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
			       struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_fill_atomic64(struct bpf_test *self, int op)
{}

static int __bpf_fill_atomic32(struct bpf_test *self, int op)
{}

/* 64-bit atomic operations */
static int bpf_fill_atomic64_add(struct bpf_test *self)
{}

static int bpf_fill_atomic64_and(struct bpf_test *self)
{}

static int bpf_fill_atomic64_or(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xor(struct bpf_test *self)
{}

static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xchg(struct bpf_test *self)
{}

static int bpf_fill_cmpxchg64(struct bpf_test *self)
{}

/* 32-bit atomic operations */
static int bpf_fill_atomic32_add(struct bpf_test *self)
{}

static int bpf_fill_atomic32_and(struct bpf_test *self)
{}

static int bpf_fill_atomic32_or(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xor(struct bpf_test *self)
{}

static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xchg(struct bpf_test *self)
{}

static int bpf_fill_cmpxchg32(struct bpf_test *self)
{}

/*
 * Test JITs that implement ATOMIC operations as function calls or
 * other primitives, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
{}

/* 64-bit atomic register tests */
static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
{}

/* 32-bit atomic register tests */
static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
{}

static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
{}

/*
 * Test the two-instruction 64-bit immediate load operation for all
 * power-of-two magnitudes of the immediate operand. For each MSB, a block
 * of immediate values centered around the power-of-two MSB are tested,
 * both for positive and negative values. The test is designed to verify
 * the operation for JITs that emit different code depending on the magnitude
 * of the immediate value. This is often the case if the native instruction
 * immediate field width is narrower than 32 bits.
 */
static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
{}

/*
 * Test the two-instruction 64-bit immediate load operation for different
 * combinations of bytes. Each byte in the 64-bit word is constructed as
 * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
 * All patterns (base1, mask1) and (base2, mask2) bytes are tested.
 */
static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
				     u8 base1, u8 mask1,
				     u8 base2, u8 mask2)
{}

static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
{}

static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
{}

static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
{}

static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
{}

/*
 * Exhaustive tests of JMP operations for all combinations of power-of-two
 * magnitudes of the operands, both for positive and negative values. The
 * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
 * emit different code depending on the magnitude of the immediate value.
 */

static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
{}

static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
			      struct bpf_insn *insns, s64 dst, s64 imm)
{}

static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{}

static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
			      struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{}

static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
{}

static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
{}

static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
{}

static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
{}

/* JMP immediate tests */
static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
{}

/* JMP32 immediate tests */
static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
{}

/* JMP register tests */
static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
{}

/* JMP32 register tests */
static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
{}

static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
{}

/*
 * Set up a sequence of staggered jumps, forwards and backwards with
 * increasing offset. This tests the conversion of relative jumps to
 * JITed native jumps. On some architectures, for example MIPS, a large
 * PC-relative jump offset may overflow the immediate field of the native
 * conditional branch instruction, triggering a conversion to use an
 * absolute jump instead. Since this changes the jump offsets, another
 * offset computation pass is necessary, and that may in turn trigger
 * another branch conversion. This jump sequence is particularly nasty
 * in that regard.
 *
 * The sequence generation is parameterized by size and jump type.
 * The size must be even, and the expected result is always size + 1.
 * Below is an example with size=8 and result=9.
 *
 *                     ________________________Start
 *                     R0 = 0
 *                     R1 = r1
 *                     R2 = r2
 *            ,------- JMP +4 * 3______________Preamble: 4 insns
 * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
 * |          |        R0 = 8                                        |
 * |          |        JMP +7 * 3               ------------------------.
 * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------.     |  |
 * | |        |        R0 = 6                                  |     |  |
 * | |        |        JMP +5 * 3               ------------------.  |  |
 * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------.     |  |  |  |
 * | | |      |        R0 = 4                            |     |  |  |  |
 * | | |      |        JMP +3 * 3               ------------.  |  |  |  |
 * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--.     |  |  |  |  |  |
 * | | | |    |        R0 = 2                      |     |  |  |  |  |  |
 * | | | |    |        JMP +1 * 3               ------.  |  |  |  |  |  |
 * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1    1  2  3  4  5  6  7  8 loc
 * | | | | |           R0 = 1                     -1 +2 -3 +4 -5 +6 -7 +8 off
 * | | | | |           JMP -2 * 3               ---'  |  |  |  |  |  |  |
 * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----'  |  |  |  |  |  |
 * | | | | | |         R0 = 3                            |  |  |  |  |  |
 * | | | | | |         JMP -4 * 3               ---------'  |  |  |  |  |
 * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------'  |  |  |  |
 * | | | | | | |       R0 = 5                                  |  |  |  |
 * | | | | | | |       JMP -6 * 3               ---------------'  |  |  |
 * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------'  |  |
 * | | | | | | | |     R0 = 7                                        |  |
 * | | Error | | |     JMP -8 * 3               ---------------------'  |
 * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
 * | | | | | | | | |   R0 = 9__________________Sequence: 3 * size - 1 insns
 * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
 *
 */

/* The maximum size parameter */
#define MAX_STAGGERED_JMP_SIZE

/* We use a reduced number of iterations to get a reasonable execution time */
#define NR_STAGGERED_JMP_RUNS

static int __bpf_fill_staggered_jumps(struct bpf_test *self,
				      const struct bpf_insn *jmp,
				      u64 r1, u64 r2)
{}

/* 64-bit unconditional jump */
static int bpf_fill_staggered_ja(struct bpf_test *self)
{}

/* 64-bit immediate jumps */
static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
{}

/* 64-bit register jumps */
static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
{}

/* 32-bit immediate jumps */
static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
{}

/* 32-bit register jumps */
static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
{}

static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
{}


static struct bpf_test tests[] =;

static struct net_device dev;

static struct sk_buff *populate_skb(char *buf, int size)
{}

static void *generate_test_data(struct bpf_test *test, int sub)
{}

static void release_test_data(const struct bpf_test *test, void *data)
{}

static int filter_length(int which)
{}

static void *filter_pointer(int which)
{}

static struct bpf_prog *generate_filter(int which, int *err)
{}

static void release_filter(struct bpf_prog *fp, int which)
{}

static int __run_one(const struct bpf_prog *fp, const void *data,
		     int runs, u64 *duration)
{}

static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{}

static char test_name[64];
module_param_string();

static int test_id =;
module_param(test_id, int, 0);

static int test_range[2] =;
module_param_array();

static bool exclude_test(int test_id)
{}

static __init struct sk_buff *build_test_skb(void)
{}

static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
{}

struct skb_segment_test {};

static struct skb_segment_test skb_segment_tests[] __initconst =;

static __init int test_skb_segment_single(const struct skb_segment_test *test)
{}

static __init int test_skb_segment(void)
{}

static __init int test_bpf(void)
{}

struct tail_call_test {};

/* Flags that can be passed to tail call test cases */
#define FLAG_NEED_STATE
#define FLAG_RESULT_IN_STATE

/*
 * Magic marker used in test snippets for tail calls below.
 * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
 * with the proper values by the test runner.
 */
#define TAIL_CALL_MARKER

/* Special offset to indicate a NULL call target */
#define TAIL_CALL_NULL

/* Special offset to indicate an out-of-range index */
#define TAIL_CALL_INVALID

#define TAIL_CALL(offset)

/*
 * A test function to be called from a BPF program, clobbering a lot of
 * CPU registers in the process. A JITed BPF program calling this function
 * must save and restore any caller-saved registers it uses for internal
 * state, for example the current tail call count.
 */
BPF_CALL_1(bpf_test_func, u64, arg)
{}
#define BPF_FUNC_test_func

/*
 * Tail call tests. Each test case may call any other test in the table,
 * including itself, specified as a relative index offset from the calling
 * test. The index TAIL_CALL_NULL can be used to specify a NULL target
 * function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
 * results in a target index that is out of range.
 */
static struct tail_call_test tail_call_tests[] =;

static void __init destroy_tail_call_tests(struct bpf_array *progs)
{}

static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
{}

static __init int test_tail_calls(struct bpf_array *progs)
{}

static char test_suite[32];
module_param_string();

static __init int find_test_index(const char *test_name)
{}

static __init int prepare_test_range(void)
{}

static int __init test_bpf_init(void)
{}

static void __exit test_bpf_exit(void)
{}

module_init(test_bpf_init);
module_exit(test_bpf_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();