linux/kernel/bpf/verifier.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 * Copyright (c) 2016 Facebook
 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
 */
#include <uapi/linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
#include <linux/error-injection.h>
#include <linux/bpf_lsm.h>
#include <linux/btf_ids.h>
#include <linux/poison.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>

#include "disasm.h"

static const struct bpf_verifier_ops * const bpf_verifier_ops[] =;

struct bpf_mem_alloc bpf_global_percpu_ma;
static bool bpf_global_percpu_ma_set;

/* bpf_check() is a static code analyzer that walks eBPF program
 * instruction by instruction and updates register/stack state.
 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
 *
 * The first pass is depth-first-search to check that the program is a DAG.
 * It rejects the following programs:
 * - larger than BPF_MAXINSNS insns
 * - if loop is present (detected via back-edge)
 * - unreachable insns exist (shouldn't be a forest. program = one function)
 * - out of bounds or malformed jumps
 * The second pass is all possible path descent from the 1st insn.
 * Since it's analyzing all paths through the program, the length of the
 * analysis is limited to 64k insn, which may be hit even if total number of
 * insn is less then 4K, but there are too many branches that change stack/regs.
 * Number of 'branches to be analyzed' is limited to 1k
 *
 * On entry to each instruction, each register has a type, and the instruction
 * changes the types of the registers depending on instruction semantics.
 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
 * copied to R1.
 *
 * All registers are 64-bit.
 * R0 - return register
 * R1-R5 argument passing registers
 * R6-R9 callee saved registers
 * R10 - frame pointer read-only
 *
 * At the start of BPF program the register R1 contains a pointer to bpf_context
 * and has type PTR_TO_CTX.
 *
 * Verifier tracks arithmetic operations on pointers in case:
 *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
 * 1st insn copies R10 (which has FRAME_PTR) type into R1
 * and 2nd arithmetic instruction is pattern matched to recognize
 * that it wants to construct a pointer to some element within stack.
 * So after 2nd insn, the register R1 has type PTR_TO_STACK
 * (and -20 constant is saved for further stack bounds checking).
 * Meaning that this reg is a pointer to stack plus known immediate constant.
 *
 * Most of the time the registers have SCALAR_VALUE type, which
 * means the register has some value, but it's not a valid pointer.
 * (like pointer plus pointer becomes SCALAR_VALUE type)
 *
 * When verifier sees load or store instructions the type of base register
 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
 * four pointer types recognized by check_mem_access() function.
 *
 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
 * and the range of [ptr, ptr + map's value_size) is accessible.
 *
 * registers used to pass values to function calls are checked against
 * function argument constraints.
 *
 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
 * It means that the register type passed to this function must be
 * PTR_TO_STACK and it will be used inside the function as
 * 'pointer to map element key'
 *
 * For example the argument constraints for bpf_map_lookup_elem():
 *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
 *   .arg1_type = ARG_CONST_MAP_PTR,
 *   .arg2_type = ARG_PTR_TO_MAP_KEY,
 *
 * ret_type says that this function returns 'pointer to map elem value or null'
 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
 * 2nd argument should be a pointer to stack, which will be used inside
 * the helper function as a pointer to map element key.
 *
 * On the kernel side the helper function looks like:
 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 * {
 *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 *    void *key = (void *) (unsigned long) r2;
 *    void *value;
 *
 *    here kernel can access 'key' and 'map' pointers safely, knowing that
 *    [key, key + map->key_size) bytes are valid and were initialized on
 *    the stack of eBPF program.
 * }
 *
 * Corresponding eBPF program may look like:
 *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
 *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
 *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
 * here verifier looks at prototype of map_lookup_elem() and sees:
 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
 *
 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
 * and were initialized prior to this call.
 * If it's ok, then verifier allows this BPF_CALL insn and looks at
 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
 * returns either pointer to map value or NULL.
 *
 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
 * insn, the register holding that pointer in the true branch changes state to
 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
 * branch. See check_cond_jmp_op().
 *
 * After the call R0 is set to return type of the function and registers R1-R5
 * are set to NOT_INIT to indicate that they are no longer readable.
 *
 * The following reference types represent a potential reference to a kernel
 * resource which, after first being allocated, must be checked and freed by
 * the BPF program:
 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
 *
 * When the verifier sees a helper call return a reference type, it allocates a
 * pointer id for the reference and stores it in the current function state.
 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
 * passes through a NULL-check conditional. For the branch wherein the state is
 * changed to CONST_IMM, the verifier releases the reference.
 *
 * For each helper function that allocates a reference, such as
 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
 * bpf_sk_release(). When a reference type passes into the release function,
 * the verifier also releases the reference. If any unchecked or unreleased
 * reference remains at the end of the program, the verifier rejects it.
 */

/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {};

#define BPF_COMPLEXITY_LIMIT_JMP_SEQ
#define BPF_COMPLEXITY_LIMIT_STATES

#define BPF_MAP_KEY_POISON
#define BPF_MAP_KEY_SEEN

#define BPF_GLOBAL_PERCPU_MA_MAX_SIZE

static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
static int ref_set_non_owning(struct bpf_verifier_env *env,
			      struct bpf_reg_state *reg);
static void specialize_kfunc(struct bpf_verifier_env *env,
			     u32 func_id, u16 offset, unsigned long *addr);
static bool is_trusted_reg(const struct bpf_reg_state *reg);

static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{}

static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{}

static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
			      struct bpf_map *map,
			      bool unpriv, bool poison)
{}

static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
{}

static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
{}

static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
{}

static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
{}

static bool bpf_helper_call(const struct bpf_insn *insn)
{}

static bool bpf_pseudo_call(const struct bpf_insn *insn)
{}

static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
{}

struct bpf_call_arg_meta {};

struct bpf_kfunc_call_arg_meta {};

struct btf *btf_vmlinux;

static const char *btf_type_name(const struct btf *btf, u32 id)
{}

static DEFINE_MUTEX(bpf_verifier_lock);
static DEFINE_MUTEX(bpf_percpu_ma_lock);

__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{}

static void verbose_invalid_scalar(struct bpf_verifier_env *env,
				   struct bpf_reg_state *reg,
				   struct bpf_retval_range range, const char *ctx,
				   const char *reg_name)
{}

static bool type_may_be_null(u32 type)
{}

static bool reg_not_null(const struct bpf_reg_state *reg)
{}

static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
{}

static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
{}

static const char *subprog_name(const struct bpf_verifier_env *env, int subprog)
{}

static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog)
{}

static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog)
{}

static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{}

static bool type_is_rdonly_mem(u32 type)
{}

static bool is_acquire_function(enum bpf_func_id func_id,
				const struct bpf_map *map)
{}

static bool is_ptr_cast_function(enum bpf_func_id func_id)
{}

static bool is_dynptr_ref_function(enum bpf_func_id func_id)
{}

static bool is_sync_callback_calling_kfunc(u32 btf_id);
static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);

static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);

static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{}

static bool is_async_callback_calling_function(enum bpf_func_id func_id)
{}

static bool is_callback_calling_function(enum bpf_func_id func_id)
{}

static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
{}

static bool is_async_callback_calling_insn(struct bpf_insn *insn)
{}

static bool is_may_goto_insn(struct bpf_insn *insn)
{}

static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
{}

static bool is_storage_get_function(enum bpf_func_id func_id)
{}

static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
					const struct bpf_map *map)
{}

static bool is_cmpxchg_insn(const struct bpf_insn *insn)
{}

static int __get_spi(s32 off)
{}

static struct bpf_func_state *func(struct bpf_verifier_env *env,
				   const struct bpf_reg_state *reg)
{}

static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
{}

static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
			          const char *obj_kind, int nr_slots)
{}

static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
{}

static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
{}

static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
{}

static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
{}

static void __mark_dynptr_reg(struct bpf_reg_state *reg,
			      enum bpf_dynptr_type type,
			      bool first_slot, int dynptr_id);

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
				struct bpf_reg_state *reg);

static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
				   struct bpf_reg_state *sreg1,
				   struct bpf_reg_state *sreg2,
				   enum bpf_dynptr_type type)
{}

static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
			       struct bpf_reg_state *reg,
			       enum bpf_dynptr_type type)
{}

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
				        struct bpf_func_state *state, int spi);

static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
				   enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
{}

static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
{}

static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static void __mark_reg_unknown(const struct bpf_verifier_env *env,
			       struct bpf_reg_state *reg);

static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
				        struct bpf_func_state *state, int spi)
{}

static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
				    enum bpf_arg_type arg_type)
{}

static void __mark_reg_known_zero(struct bpf_reg_state *reg);

static bool in_rcu_cs(struct bpf_verifier_env *env);

static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta);

static int mark_stack_slots_iter(struct bpf_verifier_env *env,
				 struct bpf_kfunc_call_arg_meta *meta,
				 struct bpf_reg_state *reg, int insn_idx,
				 struct btf *btf, u32 btf_id, int nr_slots)
{}

static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
				   struct bpf_reg_state *reg, int nr_slots)
{}

static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
				     struct bpf_reg_state *reg, int nr_slots)
{}

static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
				   struct btf *btf, u32 btf_id, int nr_slots)
{}

/* Check if given stack slot is "special":
 *   - spilled register state (STACK_SPILL);
 *   - dynptr state (STACK_DYNPTR);
 *   - iter state (STACK_ITER).
 */
static bool is_stack_slot_special(const struct bpf_stack_state *stack)
{}

/* The reg state of a pointer or a bounded scalar was saved when
 * it was spilled to the stack.
 */
static bool is_spilled_reg(const struct bpf_stack_state *stack)
{}

static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
{}

static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
{}

/* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
 * case they are equivalent, or it's STACK_ZERO, in which case we preserve
 * more precise STACK_ZERO.
 * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take
 * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
 */
static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
{}

static void scrub_spilled_slot(u8 *stype)
{}

/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
 * small to hold src. This is different from krealloc since we don't want to preserve
 * the contents of dst.
 *
 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
 * not be allocated.
 */
static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
{}

/* resize an array from old_n items to new_n items. the array is reallocated if it's too
 * small to hold new_n items. new items are zeroed out if the array grows.
 *
 * Contrary to krealloc_array, does not free arr if new_n is zero.
 */
static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
{}

static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{}

static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{}

static int resize_reference_state(struct bpf_func_state *state, size_t n)
{}

/* Possibly update state->allocated_stack to be at least size bytes. Also
 * possibly update the function's high-water mark in its bpf_subprog_info.
 */
static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
{}

/* Acquire a pointer id from the env and update the state->refs to include
 * this new pointer reference.
 * On success, returns a valid pointer id to associate with the register
 * On failure, returns a negative errno.
 */
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{}

/* release function corresponding to acquire_reference_state(). Idempotent. */
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
{}

static void free_func_state(struct bpf_func_state *state)
{}

static void clear_jmp_history(struct bpf_verifier_state *state)
{}

static void free_verifier_state(struct bpf_verifier_state *state,
				bool free_self)
{}

/* copy verifier state from src to dst growing dst stack space
 * when necessary to accommodate larger src stack
 */
static int copy_func_state(struct bpf_func_state *dst,
			   const struct bpf_func_state *src)
{}

static int copy_verifier_state(struct bpf_verifier_state *dst_state,
			       const struct bpf_verifier_state *src)
{}

static u32 state_htab_size(struct bpf_verifier_env *env)
{}

static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx)
{}

static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b)
{}

/* Open coded iterators allow back-edges in the state graph in order to
 * check unbounded loops that iterators.
 *
 * In is_state_visited() it is necessary to know if explored states are
 * part of some loops in order to decide whether non-exact states
 * comparison could be used:
 * - non-exact states comparison establishes sub-state relation and uses
 *   read and precision marks to do so, these marks are propagated from
 *   children states and thus are not guaranteed to be final in a loop;
 * - exact states comparison just checks if current and explored states
 *   are identical (and thus form a back-edge).
 *
 * Paper "A New Algorithm for Identifying Loops in Decompilation"
 * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient
 * algorithm for loop structure detection and gives an overview of
 * relevant terminology. It also has helpful illustrations.
 *
 * [1] https://api.semanticscholar.org/CorpusID:15784067
 *
 * We use a similar algorithm but because loop nested structure is
 * irrelevant for verifier ours is significantly simpler and resembles
 * strongly connected components algorithm from Sedgewick's textbook.
 *
 * Define topmost loop entry as a first node of the loop traversed in a
 * depth first search starting from initial state. The goal of the loop
 * tracking algorithm is to associate topmost loop entries with states
 * derived from these entries.
 *
 * For each step in the DFS states traversal algorithm needs to identify
 * the following situations:
 *
 *          initial                     initial                   initial
 *            |                           |                         |
 *            V                           V                         V
 *           ...                         ...           .---------> hdr
 *            |                           |            |            |
 *            V                           V            |            V
 *           cur                     .-> succ          |    .------...
 *            |                      |    |            |    |       |
 *            V                      |    V            |    V       V
 *           succ                    '-- cur           |   ...     ...
 *                                                     |    |       |
 *                                                     |    V       V
 *                                                     |   succ <- cur
 *                                                     |    |
 *                                                     |    V
 *                                                     |   ...
 *                                                     |    |
 *                                                     '----'
 *
 *  (A) successor state of cur   (B) successor state of cur or it's entry
 *      not yet traversed            are in current DFS path, thus cur and succ
 *                                   are members of the same outermost loop
 *
 *                      initial                  initial
 *                        |                        |
 *                        V                        V
 *                       ...                      ...
 *                        |                        |
 *                        V                        V
 *                .------...               .------...
 *                |       |                |       |
 *                V       V                V       V
 *           .-> hdr     ...              ...     ...
 *           |    |       |                |       |
 *           |    V       V                V       V
 *           |   succ <- cur              succ <- cur
 *           |    |                        |
 *           |    V                        V
 *           |   ...                      ...
 *           |    |                        |
 *           '----'                       exit
 *
 * (C) successor state of cur is a part of some loop but this loop
 *     does not include cur or successor state is not in a loop at all.
 *
 * Algorithm could be described as the following python code:
 *
 *     traversed = set()   # Set of traversed nodes
 *     entries = {}        # Mapping from node to loop entry
 *     depths = {}         # Depth level assigned to graph node
 *     path = set()        # Current DFS path
 *
 *     # Find outermost loop entry known for n
 *     def get_loop_entry(n):
 *         h = entries.get(n, None)
 *         while h in entries and entries[h] != h:
 *             h = entries[h]
 *         return h
 *
 *     # Update n's loop entry if h's outermost entry comes
 *     # before n's outermost entry in current DFS path.
 *     def update_loop_entry(n, h):
 *         n1 = get_loop_entry(n) or n
 *         h1 = get_loop_entry(h) or h
 *         if h1 in path and depths[h1] <= depths[n1]:
 *             entries[n] = h1
 *
 *     def dfs(n, depth):
 *         traversed.add(n)
 *         path.add(n)
 *         depths[n] = depth
 *         for succ in G.successors(n):
 *             if succ not in traversed:
 *                 # Case A: explore succ and update cur's loop entry
 *                 #         only if succ's entry is in current DFS path.
 *                 dfs(succ, depth + 1)
 *                 h = get_loop_entry(succ)
 *                 update_loop_entry(n, h)
 *             else:
 *                 # Case B or C depending on `h1 in path` check in update_loop_entry().
 *                 update_loop_entry(n, succ)
 *         path.remove(n)
 *
 * To adapt this algorithm for use with verifier:
 * - use st->branch == 0 as a signal that DFS of succ had been finished
 *   and cur's loop entry has to be updated (case A), handle this in
 *   update_branch_counts();
 * - use st->branch > 0 as a signal that st is in the current DFS path;
 * - handle cases B and C in is_state_visited();
 * - update topmost loop entry for intermediate states in get_loop_entry().
 */
static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st)
{}

static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr)
{}

static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{}

static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
		     int *insn_idx, bool pop_log)
{}

static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
					     int insn_idx, int prev_insn_idx,
					     bool speculative)
{}

#define CALLER_SAVED_REGS
static const int caller_saved[CALLER_SAVED_REGS] =;

/* This helper doesn't clear reg->id */
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{}

/* Mark the unknown part of a register (variable offset or scalar value) as
 * known to have the value @imm.
 */
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{}

static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{}

/* Mark the 'variable offset' part of a register as zero.  This should be
 * used only on registers holding a pointer type.
 */
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{}

static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static void mark_reg_known_zero(struct bpf_verifier_env *env,
				struct bpf_reg_state *regs, u32 regno)
{}

static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
			      bool first_slot, int dynptr_id)
{}

static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
{}

static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
				struct btf_field_graph_root *ds_head)
{}

static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{}

static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{}

static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
{}

/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
				    enum bpf_reg_type which)
{}

/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{}

static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
{}

static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
{}

static void __update_reg32_bounds(struct bpf_reg_state *reg)
{}

static void __update_reg64_bounds(struct bpf_reg_state *reg)
{}

static void __update_reg_bounds(struct bpf_reg_state *reg)
{}

/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
{}

static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
{}

static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
{}

static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{}

/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{}

static void reg_bounds_sync(struct bpf_reg_state *reg)
{}

static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
				   struct bpf_reg_state *reg, const char *ctx)
{}

static bool __reg32_bound_s64(s32 a)
{}

static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{}

/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{}

/* Mark a register as having a completely unknown (scalar) value,
 * initialize .precise as true when not bpf capable.
 */
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
			       struct bpf_reg_state *reg)
{}

static void mark_reg_unknown(struct bpf_verifier_env *env,
			     struct bpf_reg_state *regs, u32 regno)
{}

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
				struct bpf_reg_state *reg)
{}

static void mark_reg_not_init(struct bpf_verifier_env *env,
			      struct bpf_reg_state *regs, u32 regno)
{}

static void mark_btf_ld_reg(struct bpf_verifier_env *env,
			    struct bpf_reg_state *regs, u32 regno,
			    enum bpf_reg_type reg_type,
			    struct btf *btf, u32 btf_id,
			    enum bpf_type_flag flag)
{}

#define DEF_NOT_SUBREG
static void init_reg_state(struct bpf_verifier_env *env,
			   struct bpf_func_state *state)
{}

static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
{}

#define BPF_MAIN_FUNC
static void init_func_state(struct bpf_verifier_env *env,
			    struct bpf_func_state *state,
			    int callsite, int frameno, int subprogno)
{}

/* Similar to push_stack(), but for async callbacks */
static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
						int insn_idx, int prev_insn_idx,
						int subprog, bool is_sleepable)
{}


enum reg_arg_type {};

static int cmp_subprogs(const void *a, const void *b)
{}

static int find_subprog(struct bpf_verifier_env *env, int off)
{}

static int add_subprog(struct bpf_verifier_env *env, int off)
{}

static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
{}

#define MAX_KFUNC_DESCS
#define MAX_KFUNC_BTFS

struct bpf_kfunc_desc {};

struct bpf_kfunc_btf {};

struct bpf_kfunc_desc_tab {};

struct bpf_kfunc_btf_tab {};

static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
{}

static int kfunc_btf_cmp_by_off(const void *a, const void *b)
{}

static const struct bpf_kfunc_desc *
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
{}

int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
		       u16 btf_fd_idx, u8 **func_addr)
{}

static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
					 s16 offset)
{}

void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
{}

static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
{}

static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
{}

static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
{}

static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog)
{}

bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
{}

const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
			 const struct bpf_insn *insn)
{}

static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
{}

static int check_subprogs(struct bpf_verifier_env *env)
{}

/* Parentage chain of this register (or stack slot) should take care of all
 * issues like callee-saved registers, stack slot allocation time, etc.
 */
static int mark_reg_read(struct bpf_verifier_env *env,
			 const struct bpf_reg_state *state,
			 struct bpf_reg_state *parent, u8 flag)
{}

static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
			  int spi, int nr_slots)
{}

/* This function is supposed to be used by the following 32-bit optimization
 * code only. It returns TRUE if the source or destination register operates
 * on 64-bit, otherwise return FALSE.
 */
static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
{}

/* Return the regno defined by the insn, or -1. */
static int insn_def_regno(const struct bpf_insn *insn)
{}

/* Return TRUE if INSN has defined any 32-bit value explicitly. */
static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
{}

static void mark_insn_zext(struct bpf_verifier_env *env,
			   struct bpf_reg_state *reg)
{}

static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
			   enum reg_arg_type t)
{}

static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
			 enum reg_arg_type t)
{}

static int insn_stack_access_flags(int frameno, int spi)
{}

static int insn_stack_access_spi(int insn_flags)
{}

static int insn_stack_access_frameno(int insn_flags)
{}

static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
{}

static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
{}

/* for any branch, call, exit record the history of jmps in the given state */
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
			    int insn_flags)
{}

static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
						        u32 hist_end, int insn_idx)
{}

/* Backtrack one insn at a time. If idx is not at the top of recorded
 * history then previous instruction came from straight line execution.
 * Return -ENOENT if we exhausted all instructions within given state.
 *
 * It's legal to have a bit of a looping with the same starting and ending
 * insn index within the same state, e.g.: 3->4->5->3, so just because current
 * instruction index is the same as state's first_idx doesn't mean we are
 * done. If there is still some jump history left, we should keep going. We
 * need to take into account that we might have a jump history between given
 * state's parent and itself, due to checkpointing. In this case, we'll have
 * history entry recording a jump from last instruction of parent state and
 * first instruction of given state.
 */
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
			     u32 *history)
{}

static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
{}

static inline void bt_init(struct backtrack_state *bt, u32 frame)
{}

static inline void bt_reset(struct backtrack_state *bt)
{}

static inline u32 bt_empty(struct backtrack_state *bt)
{}

static inline int bt_subprog_enter(struct backtrack_state *bt)
{}

static inline int bt_subprog_exit(struct backtrack_state *bt)
{}

static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
{}

static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
{}

static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
{}

static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
{}

static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
{}

static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
{}

static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
{}

static inline u32 bt_reg_mask(struct backtrack_state *bt)
{}

static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
{}

static inline u64 bt_stack_mask(struct backtrack_state *bt)
{}

static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
{}

static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
{}

/* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
{}
/* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
{}

static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);

/* For given verifier state backtrack_insn() is called from the last insn to
 * the first insn. Its purpose is to compute a bitmask of registers and
 * stack slots that needs precision in the parent verifier state.
 *
 * @idx is an index of the instruction we are currently processing;
 * @subseq_idx is an index of the subsequent instruction that:
 *   - *would be* executed next, if jump history is viewed in forward order;
 *   - *was* processed previously during backtracking.
 */
static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
			  struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
{}

/* the scalar precision tracking algorithm:
 * . at the start all registers have precise=false.
 * . scalar ranges are tracked as normal through alu and jmp insns.
 * . once precise value of the scalar register is used in:
 *   .  ptr + scalar alu
 *   . if (scalar cond K|scalar)
 *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
 *   backtrack through the verifier states and mark all registers and
 *   stack slots with spilled constants that these scalar regisers
 *   should be precise.
 * . during state pruning two registers (or spilled stack slots)
 *   are equivalent if both are not precise.
 *
 * Note the verifier cannot simply walk register parentage chain,
 * since many different registers and stack slots could have been
 * used to compute single precise scalar.
 *
 * The approach of starting with precise=true for all registers and then
 * backtrack to mark a register as not precise when the verifier detects
 * that program doesn't care about specific value (e.g., when helper
 * takes register as ARG_ANYTHING parameter) is not safe.
 *
 * It's ok to walk single parentage chain of the verifier states.
 * It's possible that this backtracking will go all the way till 1st insn.
 * All other branches will be explored for needing precision later.
 *
 * The backtracking needs to deal with cases like:
 *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
 * r9 -= r8
 * r5 = r9
 * if r5 > 0x79f goto pc+7
 *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
 * r5 += 1
 * ...
 * call bpf_perf_event_output#25
 *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
 *
 * and this case:
 * r6 = 1
 * call foo // uses callee's r6 inside to compute r0
 * r0 += r6
 * if r0 == 0 goto
 *
 * to track above reg_mask/stack_mask needs to be independent for each frame.
 *
 * Also if parent's curframe > frame where backtracking started,
 * the verifier need to mark registers in both frames, otherwise callees
 * may incorrectly prune callers. This is similar to
 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
 *
 * For now backtracking falls back into conservative marking.
 */
static void mark_all_scalars_precise(struct bpf_verifier_env *env,
				     struct bpf_verifier_state *st)
{}

static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{}

static bool idset_contains(struct bpf_idset *s, u32 id)
{}

static int idset_push(struct bpf_idset *s, u32 id)
{}

static void idset_reset(struct bpf_idset *s)
{}

/* Collect a set of IDs for all registers currently marked as precise in env->bt.
 * Mark all registers with these IDs as precise.
 */
static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{}

/*
 * __mark_chain_precision() backtracks BPF program instruction sequence and
 * chain of verifier states making sure that register *regno* (if regno >= 0)
 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
 * SCALARS, as well as any other registers and slots that contribute to
 * a tracked state of given registers/stack slots, depending on specific BPF
 * assembly instructions (see backtrack_insns() for exact instruction handling
 * logic). This backtracking relies on recorded jmp_history and is able to
 * traverse entire chain of parent states. This process ends only when all the
 * necessary registers/slots and their transitive dependencies are marked as
 * precise.
 *
 * One important and subtle aspect is that precise marks *do not matter* in
 * the currently verified state (current state). It is important to understand
 * why this is the case.
 *
 * First, note that current state is the state that is not yet "checkpointed",
 * i.e., it is not yet put into env->explored_states, and it has no children
 * states as well. It's ephemeral, and can end up either a) being discarded if
 * compatible explored state is found at some point or BPF_EXIT instruction is
 * reached or b) checkpointed and put into env->explored_states, branching out
 * into one or more children states.
 *
 * In the former case, precise markings in current state are completely
 * ignored by state comparison code (see regsafe() for details). Only
 * checkpointed ("old") state precise markings are important, and if old
 * state's register/slot is precise, regsafe() assumes current state's
 * register/slot as precise and checks value ranges exactly and precisely. If
 * states turn out to be compatible, current state's necessary precise
 * markings and any required parent states' precise markings are enforced
 * after the fact with propagate_precision() logic, after the fact. But it's
 * important to realize that in this case, even after marking current state
 * registers/slots as precise, we immediately discard current state. So what
 * actually matters is any of the precise markings propagated into current
 * state's parent states, which are always checkpointed (due to b) case above).
 * As such, for scenario a) it doesn't matter if current state has precise
 * markings set or not.
 *
 * Now, for the scenario b), checkpointing and forking into child(ren)
 * state(s). Note that before current state gets to checkpointing step, any
 * processed instruction always assumes precise SCALAR register/slot
 * knowledge: if precise value or range is useful to prune jump branch, BPF
 * verifier takes this opportunity enthusiastically. Similarly, when
 * register's value is used to calculate offset or memory address, exact
 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
 * what we mentioned above about state comparison ignoring precise markings
 * during state comparison, BPF verifier ignores and also assumes precise
 * markings *at will* during instruction verification process. But as verifier
 * assumes precision, it also propagates any precision dependencies across
 * parent states, which are not yet finalized, so can be further restricted
 * based on new knowledge gained from restrictions enforced by their children
 * states. This is so that once those parent states are finalized, i.e., when
 * they have no more active children state, state comparison logic in
 * is_state_visited() would enforce strict and precise SCALAR ranges, if
 * required for correctness.
 *
 * To build a bit more intuition, note also that once a state is checkpointed,
 * the path we took to get to that state is not important. This is crucial
 * property for state pruning. When state is checkpointed and finalized at
 * some instruction index, it can be correctly and safely used to "short
 * circuit" any *compatible* state that reaches exactly the same instruction
 * index. I.e., if we jumped to that instruction from a completely different
 * code path than original finalized state was derived from, it doesn't
 * matter, current state can be discarded because from that instruction
 * forward having a compatible state will ensure we will safely reach the
 * exit. States describe preconditions for further exploration, but completely
 * forget the history of how we got here.
 *
 * This also means that even if we needed precise SCALAR range to get to
 * finalized state, but from that point forward *that same* SCALAR register is
 * never used in a precise context (i.e., it's precise value is not needed for
 * correctness), it's correct and safe to mark such register as "imprecise"
 * (i.e., precise marking set to false). This is what we rely on when we do
 * not set precise marking in current state. If no child state requires
 * precision for any given SCALAR register, it's safe to dictate that it can
 * be imprecise. If any child state does require this register to be precise,
 * we'll mark it precise later retroactively during precise markings
 * propagation from child state to parent states.
 *
 * Skipping precise marking setting in current state is a mild version of
 * relying on the above observation. But we can utilize this property even
 * more aggressively by proactively forgetting any precise marking in the
 * current state (which we inherited from the parent state), right before we
 * checkpoint it and branch off into new child state. This is done by
 * mark_all_scalars_imprecise() to hopefully get more permissive and generic
 * finalized states which help in short circuiting more future states.
 */
static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
{}

int mark_chain_precision(struct bpf_verifier_env *env, int regno)
{}

/* mark_chain_precision_batch() assumes that env->bt is set in the caller to
 * desired reg and stack masks across all relevant frames
 */
static int mark_chain_precision_batch(struct bpf_verifier_env *env)
{}

static bool is_spillable_regtype(enum bpf_reg_type type)
{}

/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state *reg)
{}

/* check if register is a constant scalar value */
static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32)
{}

/* assuming is_reg_const() is true, return constant value of a register */
static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
{}

static bool __is_pointer_value(bool allow_ptr_leaks,
			       const struct bpf_reg_state *reg)
{}

static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
					struct bpf_reg_state *src_reg)
{}

/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{}

static void save_register_state(struct bpf_verifier_env *env,
				struct bpf_func_state *state,
				int spi, struct bpf_reg_state *reg,
				int size)
{}

static bool is_bpf_st_mem(struct bpf_insn *insn)
{}

static int get_reg_width(struct bpf_reg_state *reg)
{}

/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
 * stack boundary and alignment are checked in check_mem_access()
 */
static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
				       /* stack frame we're writing to */
				       struct bpf_func_state *state,
				       int off, int size, int value_regno,
				       int insn_idx)
{}

/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
 * known to contain a variable offset.
 * This function checks whether the write is permitted and conservatively
 * tracks the effects of the write, considering that each stack slot in the
 * dynamic range is potentially written to.
 *
 * 'off' includes 'regno->off'.
 * 'value_regno' can be -1, meaning that an unknown value is being written to
 * the stack.
 *
 * Spilled pointers in range are not marked as written because we don't know
 * what's going to be actually written. This means that read propagation for
 * future reads cannot be terminated by this write.
 *
 * For privileged programs, uninitialized stack slots are considered
 * initialized by this write (even though we don't know exactly what offsets
 * are going to be written to). The idea is that we don't want the verifier to
 * reject future reads that access slots written to through variable offsets.
 */
static int check_stack_write_var_off(struct bpf_verifier_env *env,
				     /* func where register points to */
				     struct bpf_func_state *state,
				     int ptr_regno, int off, int size,
				     int value_regno, int insn_idx)
{}

/* When register 'dst_regno' is assigned some values from stack[min_off,
 * max_off), we set the register's type according to the types of the
 * respective stack slots. If all the stack values are known to be zeros, then
 * so is the destination reg. Otherwise, the register is considered to be
 * SCALAR. This function does not deal with register filling; the caller must
 * ensure that all spilled registers in the stack range have been marked as
 * read.
 */
static void mark_reg_stack_read(struct bpf_verifier_env *env,
				/* func where src register points to */
				struct bpf_func_state *ptr_state,
				int min_off, int max_off, int dst_regno)
{}

/* Read the stack at 'off' and put the results into the register indicated by
 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
 * spilled reg.
 *
 * 'dst_regno' can be -1, meaning that the read value is not going to a
 * register.
 *
 * The access is assumed to be within the current stack bounds.
 */
static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
				      /* func where src register points to */
				      struct bpf_func_state *reg_state,
				      int off, int size, int dst_regno)
{}

enum bpf_access_src {};

static int check_stack_range_initialized(struct bpf_verifier_env *env,
					 int regno, int off, int access_size,
					 bool zero_size_allowed,
					 enum bpf_access_src type,
					 struct bpf_call_arg_meta *meta);

static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{}

/* Read the stack at 'ptr_regno + off' and put the result into the register
 * 'dst_regno'.
 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
 * but not its variable offset.
 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
 *
 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
 * filling registers (i.e. reads of spilled register cannot be detected when
 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
 * offset; for a fixed offset check_stack_read_fixed_off should be used
 * instead.
 */
static int check_stack_read_var_off(struct bpf_verifier_env *env,
				    int ptr_regno, int off, int size, int dst_regno)
{}

/* check_stack_read dispatches to check_stack_read_fixed_off or
 * check_stack_read_var_off.
 *
 * The caller must ensure that the offset falls within the allocated stack
 * bounds.
 *
 * 'dst_regno' is a register which will receive the value from the stack. It
 * can be -1, meaning that the read value is not going to a register.
 */
static int check_stack_read(struct bpf_verifier_env *env,
			    int ptr_regno, int off, int size,
			    int dst_regno)
{}


/* check_stack_write dispatches to check_stack_write_fixed_off or
 * check_stack_write_var_off.
 *
 * 'ptr_regno' is the register used as a pointer into the stack.
 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
 * 'value_regno' is the register whose value we're writing to the stack. It can
 * be -1, meaning that we're not writing from a register.
 *
 * The caller must ensure that the offset falls within the maximum stack size.
 */
static int check_stack_write(struct bpf_verifier_env *env,
			     int ptr_regno, int off, int size,
			     int value_regno, int insn_idx)
{}

static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
				 int off, int size, enum bpf_access_type type)
{}

/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
static int __check_mem_access(struct bpf_verifier_env *env, int regno,
			      int off, int size, u32 mem_size,
			      bool zero_size_allowed)
{}

/* check read/write into a memory region with possible variable offset */
static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
				   int off, int size, u32 mem_size,
				   bool zero_size_allowed)
{}

static int __check_ptr_off_reg(struct bpf_verifier_env *env,
			       const struct bpf_reg_state *reg, int regno,
			       bool fixed_off_ok)
{}

static int check_ptr_off_reg(struct bpf_verifier_env *env,
		             const struct bpf_reg_state *reg, int regno)
{}

static int map_kptr_match_type(struct bpf_verifier_env *env,
			       struct btf_field *kptr_field,
			       struct bpf_reg_state *reg, u32 regno)
{}

static bool in_sleepable(struct bpf_verifier_env *env)
{}

/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
 * can dereference RCU protected pointers and result is PTR_TRUSTED.
 */
static bool in_rcu_cs(struct bpf_verifier_env *env)
{}

/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
BTF_SET_START(rcu_protected_types)
BTF_ID()
#ifdef CONFIG_CGROUPS
BTF_ID()
#endif
#ifdef CONFIG_BPF_JIT
BTF_ID()
#endif
BTF_ID()
BTF_ID()
BTF_SET_END()

static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
{}

static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field)
{}

static bool rcu_safe_kptr(const struct btf_field *field)
{}

static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
{}

static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
				 int value_regno, int insn_idx,
				 struct btf_field *kptr_field)
{}

/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
			    int off, int size, bool zero_size_allowed,
			    enum bpf_access_src src)
{}

#define MAX_PACKET_OFF

static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
				       const struct bpf_call_arg_meta *meta,
				       enum bpf_access_type t)
{}

static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
			       int size, bool zero_size_allowed)
{}

/* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
			    struct btf **btf, u32 *btf_id)
{}

static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
				  int size)
{}

static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
			     u32 regno, int off, int size,
			     enum bpf_access_type t)
{}

static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{}

static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{}

static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
{}

static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{}

static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
{}

static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
{}

static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] =;

static bool is_trusted_reg(const struct bpf_reg_state *reg)
{}

static bool is_rcu_reg(const struct bpf_reg_state *reg)
{}

static void clear_trusted_flags(enum bpf_type_flag *flag)
{}

static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
				   const struct bpf_reg_state *reg,
				   int off, int size, bool strict)
{}

static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
				       const struct bpf_reg_state *reg,
				       const char *pointer_desc,
				       int off, int size, bool strict)
{}

static int check_ptr_alignment(struct bpf_verifier_env *env,
			       const struct bpf_reg_state *reg, int off,
			       int size, bool strict_alignment_once)
{}

static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
{}

/* starting from main bpf function walk all instructions of the function
 * and recursively walk all callees that given function can call.
 * Ignore jump and exit insns.
 * Since recursion is prevented by check_cfg() this algorithm
 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
 */
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
{}

static int check_max_stack_depth(struct bpf_verifier_env *env)
{}

#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static int get_callee_stack_depth(struct bpf_verifier_env *env,
				  const struct bpf_insn *insn, int idx)
{
	int start = idx + insn->imm + 1, subprog;

	subprog = find_subprog(env, start);
	if (subprog < 0) {
		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
			  start);
		return -EFAULT;
	}
	return env->subprog_info[subprog].stack_depth;
}
#endif

static int __check_buffer_access(struct bpf_verifier_env *env,
				 const char *buf_info,
				 const struct bpf_reg_state *reg,
				 int regno, int off, int size)
{}

static int check_tp_buffer_access(struct bpf_verifier_env *env,
				  const struct bpf_reg_state *reg,
				  int regno, int off, int size)
{}

static int check_buffer_access(struct bpf_verifier_env *env,
			       const struct bpf_reg_state *reg,
			       int regno, int off, int size,
			       bool zero_size_allowed,
			       u32 *max_access)
{}

/* BPF architecture zero extends alu32 ops into 64-bit registesr */
static void zext_32_to_64(struct bpf_reg_state *reg)
{}

/* truncate register to smaller size (in bytes)
 * must be called with size < BPF_REG_SIZE
 */
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{}

static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
{}

static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
{}

static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
{}

static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
{}

static bool bpf_map_is_rdonly(const struct bpf_map *map)
{}

static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
			       bool is_ldsx)
{}

#define BTF_TYPE_SAFE_RCU(__type)
#define BTF_TYPE_SAFE_RCU_OR_NULL(__type)
#define BTF_TYPE_SAFE_TRUSTED(__type)
#define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type)

/*
 * Allow list few fields as RCU trusted or full trusted.
 * This logic doesn't allow mix tagging and will be removed once GCC supports
 * btf_type_tag.
 */

/* RCU trusted: these fields are trusted in RCU CS and never NULL */
BTF_TYPE_SAFE_RCU(struct task_struct) {};

BTF_TYPE_SAFE_RCU(struct cgroup) {};

BTF_TYPE_SAFE_RCU(struct css_set) {};

/* RCU trusted: these fields are trusted in RCU CS and can be NULL */
BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {};

/* skb->sk, req->sk are not RCU protected, but we mark them as such
 * because bpf prog accessible sockets are SOCK_RCU_FREE.
 */
BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {};

BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {};

/* full trusted: these fields are trusted even outside of RCU CS and never NULL */
BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {};

BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {};

BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {};

BTF_TYPE_SAFE_TRUSTED(struct file) {};

BTF_TYPE_SAFE_TRUSTED(struct dentry) {};

BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) {};

static bool type_is_rcu(struct bpf_verifier_env *env,
			struct bpf_reg_state *reg,
			const char *field_name, u32 btf_id)
{}

static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
				struct bpf_reg_state *reg,
				const char *field_name, u32 btf_id)
{}

static bool type_is_trusted(struct bpf_verifier_env *env,
			    struct bpf_reg_state *reg,
			    const char *field_name, u32 btf_id)
{}

static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
				    struct bpf_reg_state *reg,
				    const char *field_name, u32 btf_id)
{}

static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
				   struct bpf_reg_state *regs,
				   int regno, int off, int size,
				   enum bpf_access_type atype,
				   int value_regno)
{}

static int check_ptr_to_map_access(struct bpf_verifier_env *env,
				   struct bpf_reg_state *regs,
				   int regno, int off, int size,
				   enum bpf_access_type atype,
				   int value_regno)
{}

/* Check that the stack access at the given offset is within bounds. The
 * maximum valid offset is -1.
 *
 * The minimum valid offset is -MAX_BPF_STACK for writes, and
 * -state->allocated_stack for reads.
 */
static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
                                          s64 off,
                                          struct bpf_func_state *state,
                                          enum bpf_access_type t)
{}

/* Check that the stack access at 'regno + off' falls within the maximum stack
 * bounds.
 *
 * 'off' includes `regno->offset`, but not its dynamic part (if any).
 */
static int check_stack_access_within_bounds(
		struct bpf_verifier_env *env,
		int regno, int off, int access_size,
		enum bpf_access_src src, enum bpf_access_type type)
{}

/* check whether memory at (regno + off) is accessible for t = (read | write)
 * if t==write, value_regno is a register which value is stored into memory
 * if t==read, value_regno is a register which will receive the value from memory
 * if t==write && value_regno==-1, some unknown value is stored into memory
 * if t==read && value_regno==-1, don't care what we read from memory
 */
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
			    int off, int bpf_size, enum bpf_access_type t,
			    int value_regno, bool strict_alignment_once, bool is_ldsx)
{}

static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
			     bool allow_trust_mismatch);

static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{}

/* When register 'regno' is used to read the stack (either directly or through
 * a helper function) make sure that it's within stack boundary and, depending
 * on the access type and privileges, that all elements of the stack are
 * initialized.
 *
 * 'off' includes 'regno->off', but not its dynamic part (if any).
 *
 * All registers that have been spilled on the stack in the slots within the
 * read offsets are marked as read.
 */
static int check_stack_range_initialized(
		struct bpf_verifier_env *env, int regno, int off,
		int access_size, bool zero_size_allowed,
		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
{}

static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
				   int access_size, bool zero_size_allowed,
				   struct bpf_call_arg_meta *meta)
{}

/* verify arguments to helpers or kfuncs consisting of a pointer and an access
 * size.
 *
 * @regno is the register containing the access size. regno-1 is the register
 * containing the pointer.
 */
static int check_mem_size_reg(struct bpf_verifier_env *env,
			      struct bpf_reg_state *reg, u32 regno,
			      bool zero_size_allowed,
			      struct bpf_call_arg_meta *meta)
{}

static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
			 u32 regno, u32 mem_size)
{}

static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
				    u32 regno)
{}

/* Implementation details:
 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
 * Two bpf_map_lookups (even with the same key) will have different reg->id.
 * Two separate bpf_obj_new will also have different reg->id.
 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
 * clears reg->id after value_or_null->value transition, since the verifier only
 * cares about the range of access to valid map value pointer and doesn't care
 * about actual address of the map element.
 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
 * reg->id > 0 after value_or_null->value transition. By doing so
 * two bpf_map_lookups will be considered two different pointers that
 * point to different bpf_spin_locks. Likewise for pointers to allocated objects
 * returned from bpf_obj_new.
 * The verifier allows taking only one bpf_spin_lock at a time to avoid
 * dead-locks.
 * Since only one bpf_spin_lock is allowed the checks are simpler than
 * reg_is_refcounted() logic. The verifier needs to remember only
 * one spin_lock instead of array of acquired_refs.
 * cur_state->active_lock remembers which map value element or allocated
 * object got locked and clears it after bpf_spin_unlock.
 */
static int process_spin_lock(struct bpf_verifier_env *env, int regno,
			     bool is_lock)
{}

static int process_timer_func(struct bpf_verifier_env *env, int regno,
			      struct bpf_call_arg_meta *meta)
{}

static int process_wq_func(struct bpf_verifier_env *env, int regno,
			   struct bpf_kfunc_call_arg_meta *meta)
{}

static int process_kptr_func(struct bpf_verifier_env *env, int regno,
			     struct bpf_call_arg_meta *meta)
{}

/* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
 *
 * In both cases we deal with the first 8 bytes, but need to mark the next 8
 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
 *
 * Mutability of bpf_dynptr is at two levels, one is at the level of struct
 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
 * mutate the view of the dynptr and also possibly destroy it. In the latter
 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
 * memory that dynptr points to.
 *
 * The verifier will keep track both levels of mutation (bpf_dynptr's in
 * reg->type and the memory's in reg->dynptr.type), but there is no support for
 * readonly dynptr view yet, hence only the first case is tracked and checked.
 *
 * This is consistent with how C applies the const modifier to a struct object,
 * where the pointer itself inside bpf_dynptr becomes const but not what it
 * points to.
 *
 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
 * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
 */
static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
			       enum bpf_arg_type arg_type, int clone_ref_obj_id)
{}

static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
{}

static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
{}

static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
			    struct bpf_kfunc_call_arg_meta *meta)
{}

/* Look for a previous loop entry at insn_idx: nearest parent state
 * stopped at insn_idx with callsites matching those in cur->frame.
 */
static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
						  struct bpf_verifier_state *cur,
						  int insn_idx)
{}

static void reset_idmap_scratch(struct bpf_verifier_env *env);
static bool regs_exact(const struct bpf_reg_state *rold,
		       const struct bpf_reg_state *rcur,
		       struct bpf_idmap *idmap);

static void maybe_widen_reg(struct bpf_verifier_env *env,
			    struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
			    struct bpf_idmap *idmap)
{}

static int widen_imprecise_scalars(struct bpf_verifier_env *env,
				   struct bpf_verifier_state *old,
				   struct bpf_verifier_state *cur)
{}

/* process_iter_next_call() is called when verifier gets to iterator's next
 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
 * to it as just "iter_next()" in comments below.
 *
 * BPF verifier relies on a crucial contract for any iter_next()
 * implementation: it should *eventually* return NULL, and once that happens
 * it should keep returning NULL. That is, once iterator exhausts elements to
 * iterate, it should never reset or spuriously return new elements.
 *
 * With the assumption of such contract, process_iter_next_call() simulates
 * a fork in the verifier state to validate loop logic correctness and safety
 * without having to simulate infinite amount of iterations.
 *
 * In current state, we first assume that iter_next() returned NULL and
 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
 * conditions we should not form an infinite loop and should eventually reach
 * exit.
 *
 * Besides that, we also fork current state and enqueue it for later
 * verification. In a forked state we keep iterator state as ACTIVE
 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
 * also bump iteration depth to prevent erroneous infinite loop detection
 * later on (see iter_active_depths_differ() comment for details). In this
 * state we assume that we'll eventually loop back to another iter_next()
 * calls (it could be in exactly same location or in some other instruction,
 * it doesn't matter, we don't make any unnecessary assumptions about this,
 * everything revolves around iterator state in a stack slot, not which
 * instruction is calling iter_next()). When that happens, we either will come
 * to iter_next() with equivalent state and can conclude that next iteration
 * will proceed in exactly the same way as we just verified, so it's safe to
 * assume that loop converges. If not, we'll go on another iteration
 * simulation with a different input state, until all possible starting states
 * are validated or we reach maximum number of instructions limit.
 *
 * This way, we will either exhaustively discover all possible input states
 * that iterator loop can start with and eventually will converge, or we'll
 * effectively regress into bounded loop simulation logic and either reach
 * maximum number of instructions if loop is not provably convergent, or there
 * is some statically known limit on number of iterations (e.g., if there is
 * an explicit `if n > 100 then break;` statement somewhere in the loop).
 *
 * Iteration convergence logic in is_state_visited() relies on exact
 * states comparison, which ignores read and precision marks.
 * This is necessary because read and precision marks are not finalized
 * while in the loop. Exact comparison might preclude convergence for
 * simple programs like below:
 *
 *     i = 0;
 *     while(iter_next(&it))
 *       i++;
 *
 * At each iteration step i++ would produce a new distinct state and
 * eventually instruction processing limit would be reached.
 *
 * To avoid such behavior speculatively forget (widen) range for
 * imprecise scalar registers, if those registers were not precise at the
 * end of the previous iteration and do not match exactly.
 *
 * This is a conservative heuristic that allows to verify wide range of programs,
 * however it precludes verification of programs that conjure an
 * imprecise value on the first loop iteration and use it as precise on a second.
 * For example, the following safe program would fail to verify:
 *
 *     struct bpf_num_iter it;
 *     int arr[10];
 *     int i = 0, a = 0;
 *     bpf_iter_num_new(&it, 0, 10);
 *     while (bpf_iter_num_next(&it)) {
 *       if (a == 0) {
 *         a = 1;
 *         i = 7; // Because i changed verifier would forget
 *                // it's range on second loop entry.
 *       } else {
 *         arr[i] = 42; // This would fail to verify.
 *       }
 *     }
 *     bpf_iter_num_destroy(&it);
 */
static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
				  struct bpf_kfunc_call_arg_meta *meta)
{}

static bool arg_type_is_mem_size(enum bpf_arg_type type)
{}

static bool arg_type_is_release(enum bpf_arg_type type)
{}

static bool arg_type_is_dynptr(enum bpf_arg_type type)
{}

static int int_ptr_type_to_size(enum bpf_arg_type type)
{}

static int resolve_map_arg_type(struct bpf_verifier_env *env,
				 const struct bpf_call_arg_meta *meta,
				 enum bpf_arg_type *arg_type)
{}

struct bpf_reg_types {};

static const struct bpf_reg_types sock_types =;

#ifdef CONFIG_NET
static const struct bpf_reg_types btf_id_sock_common_types =;
#endif

static const struct bpf_reg_types mem_types =;

static const struct bpf_reg_types int_ptr_types =;

static const struct bpf_reg_types spin_lock_types =;

static const struct bpf_reg_types fullsock_types =;
static const struct bpf_reg_types scalar_types =;
static const struct bpf_reg_types context_types =;
static const struct bpf_reg_types ringbuf_mem_types =;
static const struct bpf_reg_types const_map_ptr_types =;
static const struct bpf_reg_types btf_ptr_types =;
static const struct bpf_reg_types percpu_btf_ptr_types =;
static const struct bpf_reg_types func_ptr_types =;
static const struct bpf_reg_types stack_ptr_types =;
static const struct bpf_reg_types const_str_ptr_types =;
static const struct bpf_reg_types timer_types =;
static const struct bpf_reg_types kptr_types =;
static const struct bpf_reg_types dynptr_types =;

static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] =;

static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
			  enum bpf_arg_type arg_type,
			  const u32 *arg_btf_id,
			  struct bpf_call_arg_meta *meta)
{}

static struct btf_field *
reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
{}

static int check_func_arg_reg_off(struct bpf_verifier_env *env,
				  const struct bpf_reg_state *reg, int regno,
				  enum bpf_arg_type arg_type)
{}

static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
						const struct bpf_func_proto *fn,
						struct bpf_reg_state *regs)
{}

static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
					    struct bpf_reg_state *reg)
{}

static int check_reg_const_str(struct bpf_verifier_env *env,
			       struct bpf_reg_state *reg, u32 regno)
{}

static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
			  struct bpf_call_arg_meta *meta,
			  const struct bpf_func_proto *fn,
			  int insn_idx)
{}

static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
{}

static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
{}

static int check_map_func_compatibility(struct bpf_verifier_env *env,
					struct bpf_map *map, int func_id)
{}

static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{}

static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
{}

static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
{}

static bool check_btf_id_ok(const struct bpf_func_proto *fn)
{}

static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{}

/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
 * are now invalid, so turn them into unknown SCALAR_VALUE.
 *
 * This also applies to dynptr slices belonging to skb and xdp dynptrs,
 * since these slices point to packet data.
 */
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{}

enum {};

static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
{}

/* The pointer with the specified id has released its reference to kernel
 * resources. Identify all copies of the same pointer and clear the reference.
 */
static int release_reference(struct bpf_verifier_env *env,
			     int ref_obj_id)
{}

static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
{}

static void clear_caller_saved_regs(struct bpf_verifier_env *env,
				    struct bpf_reg_state *regs)
{}

set_callee_state_fn;

static int set_callee_state(struct bpf_verifier_env *env,
			    struct bpf_func_state *caller,
			    struct bpf_func_state *callee, int insn_idx);

static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
			    set_callee_state_fn set_callee_state_cb,
			    struct bpf_verifier_state *state)
{}

static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
				    const struct btf *btf,
				    struct bpf_reg_state *regs)
{}

/* Compare BTF of a function call with given bpf_reg_state.
 * Returns:
 * EFAULT - there is a verifier bug. Abort verification.
 * EINVAL - there is a type mismatch or BTF is not available.
 * 0 - BTF matches with what bpf_reg_state expects.
 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
 */
static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
				  struct bpf_reg_state *regs)
{}

static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
			      int insn_idx, int subprog,
			      set_callee_state_fn set_callee_state_cb)
{}

static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
			   int *insn_idx)
{}

int map_set_for_each_callback_args(struct bpf_verifier_env *env,
				   struct bpf_func_state *caller,
				   struct bpf_func_state *callee)
{}

static int set_callee_state(struct bpf_verifier_env *env,
			    struct bpf_func_state *caller,
			    struct bpf_func_state *callee, int insn_idx)
{}

static int set_map_elem_callback_state(struct bpf_verifier_env *env,
				       struct bpf_func_state *caller,
				       struct bpf_func_state *callee,
				       int insn_idx)
{}

static int set_loop_callback_state(struct bpf_verifier_env *env,
				   struct bpf_func_state *caller,
				   struct bpf_func_state *callee,
				   int insn_idx)
{}

static int set_timer_callback_state(struct bpf_verifier_env *env,
				    struct bpf_func_state *caller,
				    struct bpf_func_state *callee,
				    int insn_idx)
{}

static int set_find_vma_callback_state(struct bpf_verifier_env *env,
				       struct bpf_func_state *caller,
				       struct bpf_func_state *callee,
				       int insn_idx)
{}

static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
					   struct bpf_func_state *caller,
					   struct bpf_func_state *callee,
					   int insn_idx)
{}

static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
					 struct bpf_func_state *caller,
					 struct bpf_func_state *callee,
					 int insn_idx)
{}

static bool is_rbtree_lock_required_kfunc(u32 btf_id);

/* Are we currently verifying the callback for a rbtree helper that must
 * be called with lock held? If so, no need to complain about unreleased
 * lock
 */
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
{}

static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
{}

static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{}

static int do_refine_retval_range(struct bpf_verifier_env *env,
				  struct bpf_reg_state *regs, int ret_type,
				  int func_id,
				  struct bpf_call_arg_meta *meta)
{}

static int
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
		int func_id, int insn_idx)
{}

static int
record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
		int func_id, int insn_idx)
{}

static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
{}

static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
				   struct bpf_reg_state *regs)
{}

static int check_get_func_ip(struct bpf_verifier_env *env)
{}

static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
{}

static bool loop_flag_is_zero(struct bpf_verifier_env *env)
{}

static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
{}

static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
			     int *insn_idx_p)
{}

/* mark_btf_func_reg_size() is used when the reg size is determined by
 * the BTF func_proto's return value size and argument.
 */
static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
				   size_t reg_size)
{}

static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_arg_mem_size(const struct btf *btf,
				  const struct btf_param *arg,
				  const struct bpf_reg_state *reg)
{}

static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
					const struct btf_param *arg,
					const struct bpf_reg_state *reg)
{}

static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_map(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
					  const struct btf_param *arg,
					  const char *name)
{}

enum {};

BTF_ID_LIST(kf_arg_btf_ids)
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()

static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
				    const struct btf_param *arg, int type)
{}

static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg)
{}

static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
				  const struct btf_param *arg)
{}

/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
					const struct btf *btf,
					const struct btf_type *t, int rec)
{}

enum kfunc_ptr_arg_type {};

enum special_kfunc_type {};

BTF_SET_START(special_kfunc_set)
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
#ifdef CONFIG_CGROUPS
BTF_ID()
#endif
BTF_SET_END()

BTF_ID_LIST(special_kfunc_list)
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
BTF_ID()
#ifdef CONFIG_CGROUPS
BTF_ID()
#else
BTF_ID_UNUSED
#endif
#ifdef CONFIG_BPF_EVENTS
BTF_ID()
#else
BTF_ID_UNUSED
#endif

static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta *meta)
{}

static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta *meta)
{}

static enum kfunc_ptr_arg_type
get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
		       struct bpf_kfunc_call_arg_meta *meta,
		       const struct btf_type *t, const struct btf_type *ref_t,
		       const char *ref_tname, const struct btf_param *args,
		       int argno, int nargs)
{}

static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
					struct bpf_reg_state *reg,
					const struct btf_type *ref_t,
					const char *ref_tname, u32 ref_id,
					struct bpf_kfunc_call_arg_meta *meta,
					int argno)
{}

static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
{}

/* Implementation details:
 *
 * Each register points to some region of memory, which we define as an
 * allocation. Each allocation may embed a bpf_spin_lock which protects any
 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
 * allocation. The lock and the data it protects are colocated in the same
 * memory region.
 *
 * Hence, everytime a register holds a pointer value pointing to such
 * allocation, the verifier preserves a unique reg->id for it.
 *
 * The verifier remembers the lock 'ptr' and the lock 'id' whenever
 * bpf_spin_lock is called.
 *
 * To enable this, lock state in the verifier captures two values:
 *	active_lock.ptr = Register's type specific pointer
 *	active_lock.id  = A unique ID for each register pointer value
 *
 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
 * supported register types.
 *
 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
 * allocated objects is the reg->btf pointer.
 *
 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
 * can establish the provenance of the map value statically for each distinct
 * lookup into such maps. They always contain a single map value hence unique
 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
 *
 * So, in case of global variables, they use array maps with max_entries = 1,
 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
 * into the same map value as max_entries is 1, as described above).
 *
 * In case of inner map lookups, the inner map pointer has same map_ptr as the
 * outer map pointer (in verifier context), but each lookup into an inner map
 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
 * maps from the same outer map share the same map_ptr as active_lock.ptr, they
 * will get different reg->id assigned to each lookup, hence different
 * active_lock.id.
 *
 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
 * returned from bpf_obj_new. Each allocation receives a new reg->id.
 */
static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{}

static bool is_bpf_list_api_kfunc(u32 btf_id)
{}

static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
{}

static bool is_bpf_graph_api_kfunc(u32 btf_id)
{}

static bool is_sync_callback_calling_kfunc(u32 btf_id)
{}

static bool is_async_callback_calling_kfunc(u32 btf_id)
{}

static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
{}

static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
{}

static bool is_callback_calling_kfunc(u32 btf_id)
{}

static bool is_rbtree_lock_required_kfunc(u32 btf_id)
{}

static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
					  enum btf_field_type head_field_type,
					  u32 kfunc_btf_id)
{}

static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
					  enum btf_field_type node_field_type,
					  u32 kfunc_btf_id)
{}

static int
__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
				   struct bpf_reg_state *reg, u32 regno,
				   struct bpf_kfunc_call_arg_meta *meta,
				   enum btf_field_type head_field_type,
				   struct btf_field **head_field)
{}

static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
					   struct bpf_reg_state *reg, u32 regno,
					   struct bpf_kfunc_call_arg_meta *meta)
{}

static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
					     struct bpf_reg_state *reg, u32 regno,
					     struct bpf_kfunc_call_arg_meta *meta)
{}

static int
__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
				   struct bpf_reg_state *reg, u32 regno,
				   struct bpf_kfunc_call_arg_meta *meta,
				   enum btf_field_type head_field_type,
				   enum btf_field_type node_field_type,
				   struct btf_field **node_field)
{}

static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
					   struct bpf_reg_state *reg, u32 regno,
					   struct bpf_kfunc_call_arg_meta *meta)
{}

static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
					     struct bpf_reg_state *reg, u32 regno,
					     struct bpf_kfunc_call_arg_meta *meta)
{}

/*
 * css_task iter allowlist is needed to avoid dead locking on css_set_lock.
 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
 * Any sleepable progs are also safe since bpf_check_attach_target() enforce
 * them can only be attached to some specific hook points.
 */
static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
{}

static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
			    int insn_idx)
{}

static int fetch_kfunc_meta(struct bpf_verifier_env *env,
			    struct bpf_insn *insn,
			    struct bpf_kfunc_call_arg_meta *meta,
			    const char **kfunc_name)
{}

static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);

static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
			    int *insn_idx_p)
{}

static bool check_reg_sane_offset(struct bpf_verifier_env *env,
				  const struct bpf_reg_state *reg,
				  enum bpf_reg_type type)
{}

enum {};

static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
			      u32 *alu_limit, bool mask_to_left)
{}

static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
				    const struct bpf_insn *insn)
{}

static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
				       u32 alu_state, u32 alu_limit)
{}

static int sanitize_val_alu(struct bpf_verifier_env *env,
			    struct bpf_insn *insn)
{}

static bool sanitize_needed(u8 opcode)
{}

struct bpf_sanitize_info {};

static struct bpf_verifier_state *
sanitize_speculative_path(struct bpf_verifier_env *env,
			  const struct bpf_insn *insn,
			  u32 next_idx, u32 curr_idx)
{}

static int sanitize_ptr_alu(struct bpf_verifier_env *env,
			    struct bpf_insn *insn,
			    const struct bpf_reg_state *ptr_reg,
			    const struct bpf_reg_state *off_reg,
			    struct bpf_reg_state *dst_reg,
			    struct bpf_sanitize_info *info,
			    const bool commit_window)
{}

static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
{}

static int sanitize_err(struct bpf_verifier_env *env,
			const struct bpf_insn *insn, int reason,
			const struct bpf_reg_state *off_reg,
			const struct bpf_reg_state *dst_reg)
{}

/* check that stack access falls within stack limits and that 'reg' doesn't
 * have a variable offset.
 *
 * Variable offset is prohibited for unprivileged mode for simplicity since it
 * requires corresponding support in Spectre masking for stack ALU.  See also
 * retrieve_ptr_limit().
 *
 *
 * 'off' includes 'reg->off'.
 */
static int check_stack_access_for_ptr_arithmetic(
				struct bpf_verifier_env *env,
				int regno,
				const struct bpf_reg_state *reg,
				int off)
{}

static int sanitize_check_bounds(struct bpf_verifier_env *env,
				 const struct bpf_insn *insn,
				 const struct bpf_reg_state *dst_reg)
{}

/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
 * Caller should also handle BPF_MOV case separately.
 * If we return -EACCES, caller may want to try again treating pointer as a
 * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
 */
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
				   struct bpf_insn *insn,
				   const struct bpf_reg_state *ptr_reg,
				   const struct bpf_reg_state *off_reg)
{}

static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
				struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
			      struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
				   u64 umin_val, u64 umax_val)
{}

static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
				   u64 umin_val, u64 umax_val)
{}

static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
				 struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
			       struct bpf_reg_state *src_reg)
{}

static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
				  struct bpf_reg_state *src_reg)
{}

static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
				struct bpf_reg_state *src_reg)
{}

static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
					     const struct bpf_reg_state *src_reg)
{}

/* WARNING: This function does calculations on 64-bit values, but the actual
 * execution may occur on 32-bit values. Therefore, things like bitshifts
 * need extra checks in the 32-bit case.
 */
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
				      struct bpf_insn *insn,
				      struct bpf_reg_state *dst_reg,
				      struct bpf_reg_state src_reg)
{}

/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
 * and var_off.
 */
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
				   struct bpf_insn *insn)
{}

/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{}

static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
				   struct bpf_reg_state *dst_reg,
				   enum bpf_reg_type type,
				   bool range_right_open)
{}

/*
 * <reg1> <op> <reg2>, currently assuming reg2 is a constant
 */
static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
				  u8 opcode, bool is_jmp32)
{}

static int flip_opcode(u32 opcode)
{}

static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
				   struct bpf_reg_state *src_reg,
				   u8 opcode)
{}

/* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;"
 * and return:
 *  1 - branch will be taken and "goto target" will be executed
 *  0 - branch will not be taken and fall-through to next insn
 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
 *      range [0,10]
 */
static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
			   u8 opcode, bool is_jmp32)
{}

/* Opcode that corresponds to a *false* branch condition.
 * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2
 */
static u8 rev_opcode(u8 opcode)
{}

/* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */
static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
				u8 opcode, bool is_jmp32)
{}

/* Adjusts the register min/max values in the case that the dst_reg and
 * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
 * check, in which case we have a fake SCALAR_VALUE representing insn->imm).
 * Technically we can do similar adjustments for pointers to the same object,
 * but we don't support that right now.
 */
static int reg_set_min_max(struct bpf_verifier_env *env,
			   struct bpf_reg_state *true_reg1,
			   struct bpf_reg_state *true_reg2,
			   struct bpf_reg_state *false_reg1,
			   struct bpf_reg_state *false_reg2,
			   u8 opcode, bool is_jmp32)
{}

static void mark_ptr_or_null_reg(struct bpf_func_state *state,
				 struct bpf_reg_state *reg, u32 id,
				 bool is_null)
{}

/* The logic is similar to find_good_pkt_pointers(), both could eventually
 * be folded together at some point.
 */
static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
				  bool is_null)
{}

static bool try_match_pkt_pointers(const struct bpf_insn *insn,
				   struct bpf_reg_state *dst_reg,
				   struct bpf_reg_state *src_reg,
				   struct bpf_verifier_state *this_branch,
				   struct bpf_verifier_state *other_branch)
{}

static void find_equal_scalars(struct bpf_verifier_state *vstate,
			       struct bpf_reg_state *known_reg)
{}

static int check_cond_jmp_op(struct bpf_verifier_env *env,
			     struct bpf_insn *insn, int *insn_idx)
{}

/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{}

static bool may_access_skb(enum bpf_prog_type type)
{}

/* verify safety of LD_ABS|LD_IND instructions:
 * - they can only appear in the programs where ctx == skb
 * - since they are wrappers of function calls, they scratch R1-R5 registers,
 *   preserve R6-R9, and store return value into R0
 *
 * Implicit input:
 *   ctx == skb == R6 == CTX
 *
 * Explicit input:
 *   SRC == any register
 *   IMM == 32-bit immediate
 *
 * Output:
 *   R0 - 8/16/32-bit skb data converted to cpu endianness
 */
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{}

static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
{}

/* non-recursive DFS pseudo code
 * 1  procedure DFS-iterative(G,v):
 * 2      label v as discovered
 * 3      let S be a stack
 * 4      S.push(v)
 * 5      while S is not empty
 * 6            t <- S.peek()
 * 7            if t is what we're looking for:
 * 8                return t
 * 9            for all edges e in G.adjacentEdges(t) do
 * 10               if edge e is already labelled
 * 11                   continue with the next edge
 * 12               w <- G.adjacentVertex(t,e)
 * 13               if vertex w is not discovered and not explored
 * 14                   label e as tree-edge
 * 15                   label w as discovered
 * 16                   S.push(w)
 * 17                   continue at 5
 * 18               else if vertex w is discovered
 * 19                   label e as back-edge
 * 20               else
 * 21                   // vertex w is explored
 * 22                   label e as forward- or cross-edge
 * 23           label t as explored
 * 24           S.pop()
 *
 * convention:
 * 0x10 - discovered
 * 0x11 - discovered and fall-through edge labelled
 * 0x12 - discovered and fall-through and branch edges labelled
 * 0x20 - explored
 */

enum {};

static void mark_prune_point(struct bpf_verifier_env *env, int idx)
{}

static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
{}

static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
{}

static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
{}

static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
{}

static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
{}

enum {};

/* t, w, e - match pseudo-code above:
 * t - index of current instruction
 * w - next instruction
 * e - edge
 */
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{}

static int visit_func_call_insn(int t, struct bpf_insn *insns,
				struct bpf_verifier_env *env,
				bool visit_callee)
{}

/* Visits the instruction at index t and returns one of the following:
 *  < 0 - an error occurred
 *  DONE_EXPLORING - the instruction was fully explored
 *  KEEP_EXPLORING - there is still work to be done before it is fully explored
 */
static int visit_insn(int t, struct bpf_verifier_env *env)
{}

/* non-recursive depth-first-search to detect loops in BPF program
 * loop == back-edge in directed graph
 */
static int check_cfg(struct bpf_verifier_env *env)
{}

static int check_abnormal_return(struct bpf_verifier_env *env)
{}

/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE
#define MAX_FUNCINFO_REC_SIZE

static int check_btf_func_early(struct bpf_verifier_env *env,
				const union bpf_attr *attr,
				bpfptr_t uattr)
{}

static int check_btf_func(struct bpf_verifier_env *env,
			  const union bpf_attr *attr,
			  bpfptr_t uattr)
{}

static void adjust_btf_func(struct bpf_verifier_env *env)
{}

#define MIN_BPF_LINEINFO_SIZE
#define MAX_LINEINFO_REC_SIZE

static int check_btf_line(struct bpf_verifier_env *env,
			  const union bpf_attr *attr,
			  bpfptr_t uattr)
{}

#define MIN_CORE_RELO_SIZE
#define MAX_CORE_RELO_SIZE

static int check_core_relo(struct bpf_verifier_env *env,
			   const union bpf_attr *attr,
			   bpfptr_t uattr)
{}

static int check_btf_info_early(struct bpf_verifier_env *env,
				const union bpf_attr *attr,
				bpfptr_t uattr)
{}

static int check_btf_info(struct bpf_verifier_env *env,
			  const union bpf_attr *attr,
			  bpfptr_t uattr)
{}

/* check %cur's range satisfies %old's */
static bool range_within(const struct bpf_reg_state *old,
			 const struct bpf_reg_state *cur)
{}

/* If in the old state two registers had the same id, then they need to have
 * the same id in the new state as well.  But that id could be different from
 * the old state, so we need to track the mapping from old to new ids.
 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
 * regs with old id 5 must also have new id 9 for the new state to be safe.  But
 * regs with a different old id could still have new id 9, we don't care about
 * that.
 * So we look through our idmap to see if this old id has been seen before.  If
 * so, we require the new id to match; otherwise, we add the id pair to the map.
 */
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{}

/* Similar to check_ids(), but allocate a unique temporary ID
 * for 'old_id' or 'cur_id' of zero.
 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid.
 */
static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{}

static void clean_func_state(struct bpf_verifier_env *env,
			     struct bpf_func_state *st)
{}

static void clean_verifier_state(struct bpf_verifier_env *env,
				 struct bpf_verifier_state *st)
{}

/* the parentage chains form a tree.
 * the verifier states are added to state lists at given insn and
 * pushed into state stack for future exploration.
 * when the verifier reaches bpf_exit insn some of the verifer states
 * stored in the state lists have their final liveness state already,
 * but a lot of states will get revised from liveness point of view when
 * the verifier explores other branches.
 * Example:
 * 1: r0 = 1
 * 2: if r1 == 100 goto pc+1
 * 3: r0 = 2
 * 4: exit
 * when the verifier reaches exit insn the register r0 in the state list of
 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
 * of insn 2 and goes exploring further. At the insn 4 it will walk the
 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
 *
 * Since the verifier pushes the branch states as it sees them while exploring
 * the program the condition of walking the branch instruction for the second
 * time means that all states below this branch were already explored and
 * their final liveness marks are already propagated.
 * Hence when the verifier completes the search of state list in is_state_visited()
 * we can call this clean_live_states() function to mark all liveness states
 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
 * will not be used.
 * This function also clears the registers and stack for states that !READ
 * to simplify state merging.
 *
 * Important note here that walking the same branch instruction in the callee
 * doesn't meant that the states are DONE. The verifier has to compare
 * the callsites
 */
static void clean_live_states(struct bpf_verifier_env *env, int insn,
			      struct bpf_verifier_state *cur)
{}

static bool regs_exact(const struct bpf_reg_state *rold,
		       const struct bpf_reg_state *rcur,
		       struct bpf_idmap *idmap)
{}

enum exact_level {};

/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
		    struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
		    enum exact_level exact)
{}

static struct bpf_reg_state unbound_reg;

static __init int unbound_reg_init(void)
{}
late_initcall(unbound_reg_init);

static bool is_stack_all_misc(struct bpf_verifier_env *env,
			      struct bpf_stack_state *stack)
{}

static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
						  struct bpf_stack_state *stack)
{}

static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
		      struct bpf_func_state *cur, struct bpf_idmap *idmap,
		      enum exact_level exact)
{}

static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
		    struct bpf_idmap *idmap)
{}

/* compare two verifier states
 *
 * all states stored in state_list are known to be valid, since
 * verifier reached 'bpf_exit' instruction through them
 *
 * this function is called when verifier exploring different branches of
 * execution popped from the state stack. If it sees an old state that has
 * more strict register state and more strict stack state then this execution
 * branch doesn't need to be explored further, since verifier already
 * concluded that more strict state leads to valid finish.
 *
 * Therefore two states are equivalent if register state is more conservative
 * and explored stack state is more conservative than the current one.
 * Example:
 *       explored                   current
 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
 *
 * In other words if current stack state (one being explored) has more
 * valid slots than old one that already passed validation, it means
 * the verifier can stop exploring and conclude that current state is valid too
 *
 * Similarly with registers. If explored state has register type as invalid
 * whereas register type in current state is meaningful, it means that
 * the current state will reach 'bpf_exit' instruction safely
 */
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
			      struct bpf_func_state *cur, enum exact_level exact)
{}

static void reset_idmap_scratch(struct bpf_verifier_env *env)
{}

static bool states_equal(struct bpf_verifier_env *env,
			 struct bpf_verifier_state *old,
			 struct bpf_verifier_state *cur,
			 enum exact_level exact)
{}

/* Return 0 if no propagation happened. Return negative error code if error
 * happened. Otherwise, return the propagated bit.
 */
static int propagate_liveness_reg(struct bpf_verifier_env *env,
				  struct bpf_reg_state *reg,
				  struct bpf_reg_state *parent_reg)
{}

/* A write screens off any subsequent reads; but write marks come from the
 * straight-line code between a state and its parent.  When we arrive at an
 * equivalent state (jump target or such) we didn't arrive by the straight-line
 * code, so read marks in the state must propagate to the parent regardless
 * of the state's write marks. That's what 'parent == state->parent' comparison
 * in mark_reg_read() is for.
 */
static int propagate_liveness(struct bpf_verifier_env *env,
			      const struct bpf_verifier_state *vstate,
			      struct bpf_verifier_state *vparent)
{}

/* find precise scalars in the previous equivalent state and
 * propagate them into the current state
 */
static int propagate_precision(struct bpf_verifier_env *env,
			       const struct bpf_verifier_state *old)
{}

static bool states_maybe_looping(struct bpf_verifier_state *old,
				 struct bpf_verifier_state *cur)
{}

static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
{}

/* is_state_visited() handles iter_next() (see process_iter_next_call() for
 * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
 * states to match, which otherwise would look like an infinite loop. So while
 * iter_next() calls are taken care of, we still need to be careful and
 * prevent erroneous and too eager declaration of "ininite loop", when
 * iterators are involved.
 *
 * Here's a situation in pseudo-BPF assembly form:
 *
 *   0: again:                          ; set up iter_next() call args
 *   1:   r1 = &it                      ; <CHECKPOINT HERE>
 *   2:   call bpf_iter_num_next        ; this is iter_next() call
 *   3:   if r0 == 0 goto done
 *   4:   ... something useful here ...
 *   5:   goto again                    ; another iteration
 *   6: done:
 *   7:   r1 = &it
 *   8:   call bpf_iter_num_destroy     ; clean up iter state
 *   9:   exit
 *
 * This is a typical loop. Let's assume that we have a prune point at 1:,
 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
 * again`, assuming other heuristics don't get in a way).
 *
 * When we first time come to 1:, let's say we have some state X. We proceed
 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
 * Now we come back to validate that forked ACTIVE state. We proceed through
 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
 * are converging. But the problem is that we don't know that yet, as this
 * convergence has to happen at iter_next() call site only. So if nothing is
 * done, at 1: verifier will use bounded loop logic and declare infinite
 * looping (and would be *technically* correct, if not for iterator's
 * "eventual sticky NULL" contract, see process_iter_next_call()). But we
 * don't want that. So what we do in process_iter_next_call() when we go on
 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
 * a different iteration. So when we suspect an infinite loop, we additionally
 * check if any of the *ACTIVE* iterator states depths differ. If yes, we
 * pretend we are not looping and wait for next iter_next() call.
 *
 * This only applies to ACTIVE state. In DRAINED state we don't expect to
 * loop, because that would actually mean infinite loop, as DRAINED state is
 * "sticky", and so we'll keep returning into the same instruction with the
 * same state (at least in one of possible code paths).
 *
 * This approach allows to keep infinite loop heuristic even in the face of
 * active iterator. E.g., C snippet below is and will be detected as
 * inifintely looping:
 *
 *   struct bpf_iter_num it;
 *   int *p, x;
 *
 *   bpf_iter_num_new(&it, 0, 10);
 *   while ((p = bpf_iter_num_next(&t))) {
 *       x = p;
 *       while (x--) {} // <<-- infinite loop here
 *   }
 *
 */
static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
{}

static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{}

/* Return true if it's OK to have the same insn return a different type. */
static bool reg_type_mismatch_ok(enum bpf_reg_type type)
{}

/* If an instruction was previously used with particular pointer types, then we
 * need to be careful to avoid cases such as the below, where it may be ok
 * for one branch accessing the pointer, but not ok for the other branch:
 *
 * R1 = sock_ptr
 * goto X;
 * ...
 * R1 = some_other_valid_ptr;
 * goto X;
 * ...
 * R2 = *(u32 *)(R1 + 0);
 */
static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
{}

static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
			     bool allow_trust_mismatch)
{}

static int do_check(struct bpf_verifier_env *env)
{}

static int find_btf_percpu_datasec(struct btf *btf)
{}

/* replace pseudo btf_id with kernel symbol address */
static int check_pseudo_btf_id(struct bpf_verifier_env *env,
			       struct bpf_insn *insn,
			       struct bpf_insn_aux_data *aux)
{}

static bool is_tracing_prog_type(enum bpf_prog_type type)
{}

static int check_map_prog_compatibility(struct bpf_verifier_env *env,
					struct bpf_map *map,
					struct bpf_prog *prog)

{}

static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{}

/* find and rewrite pseudo imm in ld_imm64 instructions:
 *
 * 1. if it accesses map FD, replace it with actual map pointer.
 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
 *
 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
 */
static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{}

/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{}

/* drop refcnt of maps used by the rejected program */
static void release_btfs(struct bpf_verifier_env *env)
{}

/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{}

/* single env->prog->insni[off] instruction was replaced with the range
 * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
 * [0, off) and [off, end) to new locations, so the patched range stays zero
 */
static void adjust_insn_aux_data(struct bpf_verifier_env *env,
				 struct bpf_insn_aux_data *new_data,
				 struct bpf_prog *new_prog, u32 off, u32 cnt)
{}

static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{}

static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
{}

static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
					    const struct bpf_insn *patch, u32 len)
{}

/*
 * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the
 * jump offset by 'delta'.
 */
static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
{}

static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
					      u32 off, u32 cnt)
{}

static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
				      u32 cnt)
{}

static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{}

/* The verifier does more data flow analysis than llvm and will not
 * explore branches that are dead at run time. Malicious programs can
 * have dead code too. Therefore replace all dead at-run-time code
 * with 'ja -1'.
 *
 * Just nops are not optimal, e.g. if they would sit at the end of the
 * program and through another bug we would manage to jump there, then
 * we'd execute beyond program memory otherwise. Returning exception
 * code also wouldn't work since we can have subprogs where the dead
 * code could be located.
 */
static void sanitize_dead_code(struct bpf_verifier_env *env)
{}

static bool insn_is_cond_jump(u8 code)
{}

static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
{}

static int opt_remove_dead_code(struct bpf_verifier_env *env)
{}

static int opt_remove_nops(struct bpf_verifier_env *env)
{}

static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
					 const union bpf_attr *attr)
{}

/* convert load instructions that access fields of a context type into a
 * sequence of instructions that access fields of the underlying structure:
 *     struct __sk_buff    -> struct sk_buff
 *     struct bpf_sock_ops -> struct sock
 */
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{}

static int jit_subprogs(struct bpf_verifier_env *env)
{}

static int fixup_call_args(struct bpf_verifier_env *env)
{}

/* replace a generic kfunc with a specialized version if necessary */
static void specialize_kfunc(struct bpf_verifier_env *env,
			     u32 func_id, u16 offset, unsigned long *addr)
{}

static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
					    u16 struct_meta_reg,
					    u16 node_offset_reg,
					    struct bpf_insn *insn,
					    struct bpf_insn *insn_buf,
					    int *cnt)
{}

static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
{}

/* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len)
{}

/* Do various post-verification rewrites in a single program pass.
 * These rewrites simplify JIT and interpreter implementations.
 */
static int do_misc_fixups(struct bpf_verifier_env *env)
{}

static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
					int position,
					s32 stack_base,
					u32 callback_subprogno,
					u32 *cnt)
{}

static bool is_bpf_loop_call(struct bpf_insn *insn)
{}

/* For all sub-programs in the program (including main) check
 * insn_aux_data to see if there are bpf_loop calls that require
 * inlining. If such calls are found the calls are replaced with a
 * sequence of instructions produced by `inline_bpf_loop` function and
 * subprog stack_depth is increased by the size of 3 registers.
 * This stack space is used to spill values of the R6, R7, R8.  These
 * registers are used to store the loop bound, counter and context
 * variables.
 */
static int optimize_bpf_loop(struct bpf_verifier_env *env)
{}

static void free_states(struct bpf_verifier_env *env)
{}

static int do_check_common(struct bpf_verifier_env *env, int subprog)
{}

/* Lazily verify all global functions based on their BTF, if they are called
 * from main BPF program or any of subprograms transitively.
 * BPF global subprogs called from dead code are not validated.
 * All callable global functions must pass verification.
 * Otherwise the whole program is rejected.
 * Consider:
 * int bar(int);
 * int foo(int f)
 * {
 *    return bar(f);
 * }
 * int bar(int b)
 * {
 *    ...
 * }
 * foo() will be verified first for R1=any_scalar_value. During verification it
 * will be assumed that bar() already verified successfully and call to bar()
 * from foo() will be checked for type match only. Later bar() will be verified
 * independently to check that it's safe for R1=any_scalar_value.
 */
static int do_check_subprogs(struct bpf_verifier_env *env)
{}

static int do_check_main(struct bpf_verifier_env *env)
{}


static void print_verification_stats(struct bpf_verifier_env *env)
{}

static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{}
#define SECURITY_PREFIX

static int check_attach_modify_return(unsigned long addr, const char *func_name)
{}

/* list of non-sleepable functions that are otherwise on
 * ALLOW_ERROR_INJECTION list
 */
BTF_SET_START(btf_non_sleepable_error_inject)
/* Three functions below can be called from sleepable and non-sleepable context.
 * Assume non-sleepable from bpf safety point of view.
 */
BTF_ID()
#ifdef CONFIG_FAIL_PAGE_ALLOC
BTF_ID()
#endif
#ifdef CONFIG_FAILSLAB
BTF_ID()
#endif
BTF_SET_END()

static int check_non_sleepable_error_inject(u32 btf_id)
{}

int bpf_check_attach_target(struct bpf_verifier_log *log,
			    const struct bpf_prog *prog,
			    const struct bpf_prog *tgt_prog,
			    u32 btf_id,
			    struct bpf_attach_target_info *tgt_info)
{}

BTF_SET_START(btf_id_deny)
BTF_ID_UNUSED
#ifdef CONFIG_SMP
BTF_ID()
BTF_ID()
#endif
#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
BTF_ID(func, rcu_read_unlock_strict)
#endif
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
BTF_ID()
BTF_ID()
#endif
#ifdef CONFIG_PREEMPT_RCU
BTF_ID()
BTF_ID()
#endif
BTF_SET_END()

static bool can_be_sleepable(struct bpf_prog *prog)
{}

static int check_attach_btf_id(struct bpf_verifier_env *env)
{}

struct btf *bpf_get_btf_vmlinux(void)
{}

int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{}