#include <uapi/linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
#include <linux/error-injection.h>
#include <linux/bpf_lsm.h>
#include <linux/btf_ids.h>
#include <linux/poison.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = …;
struct bpf_mem_alloc bpf_global_percpu_ma;
static bool bpf_global_percpu_ma_set;
struct bpf_verifier_stack_elem { … };
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ …
#define BPF_COMPLEXITY_LIMIT_STATES …
#define BPF_MAP_KEY_POISON …
#define BPF_MAP_KEY_SEEN …
#define BPF_GLOBAL_PERCPU_MA_MAX_SIZE …
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
static int ref_set_non_owning(struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
static void specialize_kfunc(struct bpf_verifier_env *env,
u32 func_id, u16 offset, unsigned long *addr);
static bool is_trusted_reg(const struct bpf_reg_state *reg);
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{ … }
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{ … }
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
struct bpf_map *map,
bool unpriv, bool poison)
{ … }
static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
{ … }
static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
{ … }
static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
{ … }
static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
{ … }
static bool bpf_helper_call(const struct bpf_insn *insn)
{ … }
static bool bpf_pseudo_call(const struct bpf_insn *insn)
{ … }
static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
{ … }
struct bpf_call_arg_meta { … };
struct bpf_kfunc_call_arg_meta { … };
struct btf *btf_vmlinux;
static const char *btf_type_name(const struct btf *btf, u32 id)
{ … }
static DEFINE_MUTEX(bpf_verifier_lock);
static DEFINE_MUTEX(bpf_percpu_ma_lock);
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{ … }
static void verbose_invalid_scalar(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
struct bpf_retval_range range, const char *ctx,
const char *reg_name)
{ … }
static bool type_may_be_null(u32 type)
{ … }
static bool reg_not_null(const struct bpf_reg_state *reg)
{ … }
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
{ … }
static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
{ … }
static const char *subprog_name(const struct bpf_verifier_env *env, int subprog)
{ … }
static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog)
{ … }
static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog)
{ … }
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{ … }
static bool type_is_rdonly_mem(u32 type)
{ … }
static bool is_acquire_function(enum bpf_func_id func_id,
const struct bpf_map *map)
{ … }
static bool is_ptr_cast_function(enum bpf_func_id func_id)
{ … }
static bool is_dynptr_ref_function(enum bpf_func_id func_id)
{ … }
static bool is_sync_callback_calling_kfunc(u32 btf_id);
static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{ … }
static bool is_async_callback_calling_function(enum bpf_func_id func_id)
{ … }
static bool is_callback_calling_function(enum bpf_func_id func_id)
{ … }
static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
{ … }
static bool is_async_callback_calling_insn(struct bpf_insn *insn)
{ … }
static bool is_may_goto_insn(struct bpf_insn *insn)
{ … }
static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
{ … }
static bool is_storage_get_function(enum bpf_func_id func_id)
{ … }
static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
const struct bpf_map *map)
{ … }
static bool is_cmpxchg_insn(const struct bpf_insn *insn)
{ … }
static int __get_spi(s32 off)
{ … }
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{ … }
static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
{ … }
static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
const char *obj_kind, int nr_slots)
{ … }
static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
{ … }
static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
{ … }
static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
{ … }
static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
{ … }
static void __mark_dynptr_reg(struct bpf_reg_state *reg,
enum bpf_dynptr_type type,
bool first_slot, int dynptr_id);
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *sreg1,
struct bpf_reg_state *sreg2,
enum bpf_dynptr_type type)
{ … }
static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
enum bpf_dynptr_type type)
{ … }
static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
{ … }
static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
{ … }
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi)
{ … }
static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type)
{ … }
static void __mark_reg_known_zero(struct bpf_reg_state *reg);
static bool in_rcu_cs(struct bpf_verifier_env *env);
static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta);
static int mark_stack_slots_iter(struct bpf_verifier_env *env,
struct bpf_kfunc_call_arg_meta *meta,
struct bpf_reg_state *reg, int insn_idx,
struct btf *btf, u32 btf_id, int nr_slots)
{ … }
static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, int nr_slots)
{ … }
static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, int nr_slots)
{ … }
static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
struct btf *btf, u32 btf_id, int nr_slots)
{ … }
static bool is_stack_slot_special(const struct bpf_stack_state *stack)
{ … }
static bool is_spilled_reg(const struct bpf_stack_state *stack)
{ … }
static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
{ … }
static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
{ … }
static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
{ … }
static void scrub_spilled_slot(u8 *stype)
{ … }
static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
{ … }
static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
{ … }
static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{ … }
static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{ … }
static int resize_reference_state(struct bpf_func_state *state, size_t n)
{ … }
static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
{ … }
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{ … }
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
{ … }
static void free_func_state(struct bpf_func_state *state)
{ … }
static void clear_jmp_history(struct bpf_verifier_state *state)
{ … }
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{ … }
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{ … }
static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{ … }
static u32 state_htab_size(struct bpf_verifier_env *env)
{ … }
static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx)
{ … }
static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b)
{ … }
static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st)
{ … }
static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr)
{ … }
static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{ … }
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx, bool pop_log)
{ … }
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx,
bool speculative)
{ … }
#define CALLER_SAVED_REGS …
static const int caller_saved[CALLER_SAVED_REGS] = …;
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{ … }
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{ … }
static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{ … }
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{ … }
static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{ … }
static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
bool first_slot, int dynptr_id)
{ … }
static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
{ … }
static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
struct btf_field_graph_root *ds_head)
{ … }
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{ … }
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{ … }
static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
{ … }
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{ … }
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{ … }
static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
{ … }
static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
{ … }
static void __update_reg32_bounds(struct bpf_reg_state *reg)
{ … }
static void __update_reg64_bounds(struct bpf_reg_state *reg)
{ … }
static void __update_reg_bounds(struct bpf_reg_state *reg)
{ … }
static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
{ … }
static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
{ … }
static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
{ … }
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{ … }
static void __reg_bound_offset(struct bpf_reg_state *reg)
{ … }
static void reg_bounds_sync(struct bpf_reg_state *reg)
{ … }
static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, const char *ctx)
{ … }
static bool __reg32_bound_s64(s32 a)
{ … }
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{ … }
static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{ … }
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{ … }
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{ … }
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{ … }
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{ … }
static void mark_btf_ld_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno,
enum bpf_reg_type reg_type,
struct btf *btf, u32 btf_id,
enum bpf_type_flag flag)
{ … }
#define DEF_NOT_SUBREG …
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{ … }
static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
{ … }
#define BPF_MAIN_FUNC …
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int callsite, int frameno, int subprogno)
{ … }
static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx,
int subprog, bool is_sleepable)
{ … }
enum reg_arg_type { … };
static int cmp_subprogs(const void *a, const void *b)
{ … }
static int find_subprog(struct bpf_verifier_env *env, int off)
{ … }
static int add_subprog(struct bpf_verifier_env *env, int off)
{ … }
static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
{ … }
#define MAX_KFUNC_DESCS …
#define MAX_KFUNC_BTFS …
struct bpf_kfunc_desc { … };
struct bpf_kfunc_btf { … };
struct bpf_kfunc_desc_tab { … };
struct bpf_kfunc_btf_tab { … };
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
{ … }
static int kfunc_btf_cmp_by_off(const void *a, const void *b)
{ … }
static const struct bpf_kfunc_desc *
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
{ … }
int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
u16 btf_fd_idx, u8 **func_addr)
{ … }
static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
s16 offset)
{ … }
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
{ … }
static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
{ … }
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
{ … }
static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
{ … }
static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog)
{ … }
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
{ … }
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
const struct bpf_insn *insn)
{ … }
static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
{ … }
static int check_subprogs(struct bpf_verifier_env *env)
{ … }
static int mark_reg_read(struct bpf_verifier_env *env,
const struct bpf_reg_state *state,
struct bpf_reg_state *parent, u8 flag)
{ … }
static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi, int nr_slots)
{ … }
static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
{ … }
static int insn_def_regno(const struct bpf_insn *insn)
{ … }
static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
{ … }
static void mark_insn_zext(struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{ … }
static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
enum reg_arg_type t)
{ … }
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{ … }
static int insn_stack_access_flags(int frameno, int spi)
{ … }
static int insn_stack_access_spi(int insn_flags)
{ … }
static int insn_stack_access_frameno(int insn_flags)
{ … }
static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
{ … }
static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
{ … }
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
int insn_flags)
{ … }
static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
u32 hist_end, int insn_idx)
{ … }
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
u32 *history)
{ … }
static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
{ … }
static inline void bt_init(struct backtrack_state *bt, u32 frame)
{ … }
static inline void bt_reset(struct backtrack_state *bt)
{ … }
static inline u32 bt_empty(struct backtrack_state *bt)
{ … }
static inline int bt_subprog_enter(struct backtrack_state *bt)
{ … }
static inline int bt_subprog_exit(struct backtrack_state *bt)
{ … }
static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
{ … }
static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
{ … }
static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
{ … }
static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
{ … }
static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
{ … }
static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
{ … }
static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
{ … }
static inline u32 bt_reg_mask(struct backtrack_state *bt)
{ … }
static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
{ … }
static inline u64 bt_stack_mask(struct backtrack_state *bt)
{ … }
static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
{ … }
static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
{ … }
static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
{ … }
static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
{ … }
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
{ … }
static void mark_all_scalars_precise(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{ … }
static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{ … }
static bool idset_contains(struct bpf_idset *s, u32 id)
{ … }
static int idset_push(struct bpf_idset *s, u32 id)
{ … }
static void idset_reset(struct bpf_idset *s)
{ … }
static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{ … }
static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
{ … }
int mark_chain_precision(struct bpf_verifier_env *env, int regno)
{ … }
static int mark_chain_precision_batch(struct bpf_verifier_env *env)
{ … }
static bool is_spillable_regtype(enum bpf_reg_type type)
{ … }
static bool register_is_null(struct bpf_reg_state *reg)
{ … }
static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32)
{ … }
static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
{ … }
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{ … }
static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg)
{ … }
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{ … }
static void save_register_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg,
int size)
{ … }
static bool is_bpf_st_mem(struct bpf_insn *insn)
{ … }
static int get_reg_width(struct bpf_reg_state *reg)
{ … }
static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int off, int size, int value_regno,
int insn_idx)
{ … }
static int check_stack_write_var_off(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int ptr_regno, int off, int size,
int value_regno, int insn_idx)
{ … }
static void mark_reg_stack_read(struct bpf_verifier_env *env,
struct bpf_func_state *ptr_state,
int min_off, int max_off, int dst_regno)
{ … }
static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
struct bpf_func_state *reg_state,
int off, int size, int dst_regno)
{ … }
enum bpf_access_src { … };
static int check_stack_range_initialized(struct bpf_verifier_env *env,
int regno, int off, int access_size,
bool zero_size_allowed,
enum bpf_access_src type,
struct bpf_call_arg_meta *meta);
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{ … }
static int check_stack_read_var_off(struct bpf_verifier_env *env,
int ptr_regno, int off, int size, int dst_regno)
{ … }
static int check_stack_read(struct bpf_verifier_env *env,
int ptr_regno, int off, int size,
int dst_regno)
{ … }
static int check_stack_write(struct bpf_verifier_env *env,
int ptr_regno, int off, int size,
int value_regno, int insn_idx)
{ … }
static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
int off, int size, enum bpf_access_type type)
{ … }
static int __check_mem_access(struct bpf_verifier_env *env, int regno,
int off, int size, u32 mem_size,
bool zero_size_allowed)
{ … }
static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, u32 mem_size,
bool zero_size_allowed)
{ … }
static int __check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
bool fixed_off_ok)
{ … }
static int check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno)
{ … }
static int map_kptr_match_type(struct bpf_verifier_env *env,
struct btf_field *kptr_field,
struct bpf_reg_state *reg, u32 regno)
{ … }
static bool in_sleepable(struct bpf_verifier_env *env)
{ … }
static bool in_rcu_cs(struct bpf_verifier_env *env)
{ … }
BTF_SET_START(rcu_protected_types)
BTF_ID(…)
#ifdef CONFIG_CGROUPS
BTF_ID(…)
#endif
#ifdef CONFIG_BPF_JIT
BTF_ID(…)
#endif
BTF_ID(…)
BTF_ID(…)
BTF_SET_END(…)
static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
{ … }
static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field)
{ … }
static bool rcu_safe_kptr(const struct btf_field *field)
{ … }
static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
{ … }
static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
int value_regno, int insn_idx,
struct btf_field *kptr_field)
{ … }
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed,
enum bpf_access_src src)
{ … }
#define MAX_PACKET_OFF …
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{ … }
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{ … }
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
struct btf **btf, u32 *btf_id)
{ … }
static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
int size)
{ … }
static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
u32 regno, int off, int size,
enum bpf_access_type t)
{ … }
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{ … }
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{ … }
static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
{ … }
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{ … }
static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
{ … }
static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
{ … }
static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = …;
static bool is_trusted_reg(const struct bpf_reg_state *reg)
{ … }
static bool is_rcu_reg(const struct bpf_reg_state *reg)
{ … }
static void clear_trusted_flags(enum bpf_type_flag *flag)
{ … }
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
{ … }
static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char *pointer_desc,
int off, int size, bool strict)
{ … }
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int off,
int size, bool strict_alignment_once)
{ … }
static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
{ … }
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
{ … }
static int check_max_stack_depth(struct bpf_verifier_env *env)
{ … }
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static int get_callee_stack_depth(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int idx)
{
int start = idx + insn->imm + 1, subprog;
subprog = find_subprog(env, start);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
start);
return -EFAULT;
}
return env->subprog_info[subprog].stack_depth;
}
#endif
static int __check_buffer_access(struct bpf_verifier_env *env,
const char *buf_info,
const struct bpf_reg_state *reg,
int regno, int off, int size)
{ … }
static int check_tp_buffer_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int regno, int off, int size)
{ … }
static int check_buffer_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int regno, int off, int size,
bool zero_size_allowed,
u32 *max_access)
{ … }
static void zext_32_to_64(struct bpf_reg_state *reg)
{ … }
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{ … }
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
{ … }
static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
{ … }
static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
{ … }
static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
{ … }
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{ … }
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
bool is_ldsx)
{ … }
#define BTF_TYPE_SAFE_RCU(__type) …
#define BTF_TYPE_SAFE_RCU_OR_NULL(__type) …
#define BTF_TYPE_SAFE_TRUSTED(__type) …
#define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type) …
BTF_TYPE_SAFE_RCU(struct task_struct) { … };
BTF_TYPE_SAFE_RCU(struct cgroup) { … };
BTF_TYPE_SAFE_RCU(struct css_set) { … };
BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) { … };
BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) { … };
BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) { … };
BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { … };
BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { … };
BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { … };
BTF_TYPE_SAFE_TRUSTED(struct file) { … };
BTF_TYPE_SAFE_TRUSTED(struct dentry) { … };
BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) { … };
static bool type_is_rcu(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const char *field_name, u32 btf_id)
{ … }
static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const char *field_name, u32 btf_id)
{ … }
static bool type_is_trusted(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const char *field_name, u32 btf_id)
{ … }
static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const char *field_name, u32 btf_id)
{ … }
static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
int regno, int off, int size,
enum bpf_access_type atype,
int value_regno)
{ … }
static int check_ptr_to_map_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
int regno, int off, int size,
enum bpf_access_type atype,
int value_regno)
{ … }
static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
s64 off,
struct bpf_func_state *state,
enum bpf_access_type t)
{ … }
static int check_stack_access_within_bounds(
struct bpf_verifier_env *env,
int regno, int off, int access_size,
enum bpf_access_src src, enum bpf_access_type type)
{ … }
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int off, int bpf_size, enum bpf_access_type t,
int value_regno, bool strict_alignment_once, bool is_ldsx)
{ … }
static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
bool allow_trust_mismatch);
static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{ … }
static int check_stack_range_initialized(
struct bpf_verifier_env *env, int regno, int off,
int access_size, bool zero_size_allowed,
enum bpf_access_src type, struct bpf_call_arg_meta *meta)
{ … }
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{ … }
static int check_mem_size_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{ … }
static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size)
{ … }
static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno)
{ … }
static int process_spin_lock(struct bpf_verifier_env *env, int regno,
bool is_lock)
{ … }
static int process_timer_func(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta)
{ … }
static int process_wq_func(struct bpf_verifier_env *env, int regno,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static int process_kptr_func(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta)
{ … }
static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
enum bpf_arg_type arg_type, int clone_ref_obj_id)
{ … }
static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
{ … }
static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
{ … }
static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
struct bpf_verifier_state *cur,
int insn_idx)
{ … }
static void reset_idmap_scratch(struct bpf_verifier_env *env);
static bool regs_exact(const struct bpf_reg_state *rold,
const struct bpf_reg_state *rcur,
struct bpf_idmap *idmap);
static void maybe_widen_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct bpf_idmap *idmap)
{ … }
static int widen_imprecise_scalars(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{ … }
static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool arg_type_is_mem_size(enum bpf_arg_type type)
{ … }
static bool arg_type_is_release(enum bpf_arg_type type)
{ … }
static bool arg_type_is_dynptr(enum bpf_arg_type type)
{ … }
static int int_ptr_type_to_size(enum bpf_arg_type type)
{ … }
static int resolve_map_arg_type(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_arg_type *arg_type)
{ … }
struct bpf_reg_types { … };
static const struct bpf_reg_types sock_types = …;
#ifdef CONFIG_NET
static const struct bpf_reg_types btf_id_sock_common_types = …;
#endif
static const struct bpf_reg_types mem_types = …;
static const struct bpf_reg_types int_ptr_types = …;
static const struct bpf_reg_types spin_lock_types = …;
static const struct bpf_reg_types fullsock_types = …;
static const struct bpf_reg_types scalar_types = …;
static const struct bpf_reg_types context_types = …;
static const struct bpf_reg_types ringbuf_mem_types = …;
static const struct bpf_reg_types const_map_ptr_types = …;
static const struct bpf_reg_types btf_ptr_types = …;
static const struct bpf_reg_types percpu_btf_ptr_types = …;
static const struct bpf_reg_types func_ptr_types = …;
static const struct bpf_reg_types stack_ptr_types = …;
static const struct bpf_reg_types const_str_ptr_types = …;
static const struct bpf_reg_types timer_types = …;
static const struct bpf_reg_types kptr_types = …;
static const struct bpf_reg_types dynptr_types = …;
static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = …;
static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
const u32 *arg_btf_id,
struct bpf_call_arg_meta *meta)
{ … }
static struct btf_field *
reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
{ … }
static int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
enum bpf_arg_type arg_type)
{ … }
static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
const struct bpf_func_proto *fn,
struct bpf_reg_state *regs)
{ … }
static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{ … }
static int check_reg_const_str(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno)
{ … }
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn,
int insn_idx)
{ … }
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
{ … }
static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
{ … }
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{ … }
static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{ … }
static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
{ … }
static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
{ … }
static bool check_btf_id_ok(const struct bpf_func_proto *fn)
{ … }
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{ … }
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{ … }
enum { … };
static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
{ … }
static int release_reference(struct bpf_verifier_env *env,
int ref_obj_id)
{ … }
static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
{ … }
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{ … }
set_callee_state_fn;
static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
set_callee_state_fn set_callee_state_cb,
struct bpf_verifier_state *state)
{ … }
static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
const struct btf *btf,
struct bpf_reg_state *regs)
{ … }
static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs)
{ … }
static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int insn_idx, int subprog,
set_callee_state_fn set_callee_state_cb)
{ … }
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{ … }
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee)
{ … }
static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx)
{ … }
static int set_map_elem_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static int set_loop_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static int set_timer_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static int set_find_vma_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{ … }
static bool is_rbtree_lock_required_kfunc(u32 btf_id);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
{ … }
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
{ … }
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{ … }
static int do_refine_retval_range(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, int ret_type,
int func_id,
struct bpf_call_arg_meta *meta)
{ … }
static int
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{ … }
static int
record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{ … }
static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
{ … }
static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{ … }
static int check_get_func_ip(struct bpf_verifier_env *env)
{ … }
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
{ … }
static bool loop_flag_is_zero(struct bpf_verifier_env *env)
{ … }
static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
{ … }
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{ … }
static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
size_t reg_size)
{ … }
static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_arg_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg)
{ … }
static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg)
{ … }
static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_map(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
const struct btf_param *arg,
const char *name)
{ … }
enum { … };
BTF_ID_LIST(kf_arg_btf_ids)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
const struct btf_param *arg, int type)
{ … }
static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg)
{ … }
static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
const struct btf_param *arg)
{ … }
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
const struct btf *btf,
const struct btf_type *t, int rec)
{ … }
enum kfunc_ptr_arg_type { … };
enum special_kfunc_type { … };
BTF_SET_START(special_kfunc_set)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
#ifdef CONFIG_CGROUPS
BTF_ID(…)
#endif
BTF_SET_END(…)
BTF_ID_LIST(special_kfunc_list)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
BTF_ID(…)
#ifdef CONFIG_CGROUPS
BTF_ID(…)
#else
BTF_ID_UNUSED
#endif
#ifdef CONFIG_BPF_EVENTS
BTF_ID(…)
#else
BTF_ID_UNUSED
#endif
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta *meta)
{ … }
static enum kfunc_ptr_arg_type
get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
struct bpf_kfunc_call_arg_meta *meta,
const struct btf_type *t, const struct btf_type *ref_t,
const char *ref_tname, const struct btf_param *args,
int argno, int nargs)
{ … }
static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const struct btf_type *ref_t,
const char *ref_tname, u32 ref_id,
struct bpf_kfunc_call_arg_meta *meta,
int argno)
{ … }
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
{ … }
static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ … }
static bool is_bpf_list_api_kfunc(u32 btf_id)
{ … }
static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
{ … }
static bool is_bpf_graph_api_kfunc(u32 btf_id)
{ … }
static bool is_sync_callback_calling_kfunc(u32 btf_id)
{ … }
static bool is_async_callback_calling_kfunc(u32 btf_id)
{ … }
static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
{ … }
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
{ … }
static bool is_callback_calling_kfunc(u32 btf_id)
{ … }
static bool is_rbtree_lock_required_kfunc(u32 btf_id)
{ … }
static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
enum btf_field_type head_field_type,
u32 kfunc_btf_id)
{ … }
static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
enum btf_field_type node_field_type,
u32 kfunc_btf_id)
{ … }
static int
__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta,
enum btf_field_type head_field_type,
struct btf_field **head_field)
{ … }
static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static int
__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta,
enum btf_field_type head_field_type,
enum btf_field_type node_field_type,
struct btf_field **node_field)
{ … }
static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
struct bpf_kfunc_call_arg_meta *meta)
{ … }
static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
{ … }
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
int insn_idx)
{ … }
static int fetch_kfunc_meta(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_kfunc_call_arg_meta *meta,
const char **kfunc_name)
{ … }
static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);
static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{ … }
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{ … }
enum { … };
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
u32 *alu_limit, bool mask_to_left)
{ … }
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
const struct bpf_insn *insn)
{ … }
static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
u32 alu_state, u32 alu_limit)
{ … }
static int sanitize_val_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{ … }
static bool sanitize_needed(u8 opcode)
{ … }
struct bpf_sanitize_info { … };
static struct bpf_verifier_state *
sanitize_speculative_path(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
u32 next_idx, u32 curr_idx)
{ … }
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg,
struct bpf_sanitize_info *info,
const bool commit_window)
{ … }
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
{ … }
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
const struct bpf_reg_state *dst_reg)
{ … }
static int check_stack_access_for_ptr_arithmetic(
struct bpf_verifier_env *env,
int regno,
const struct bpf_reg_state *reg,
int off)
{ … }
static int sanitize_check_bounds(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
const struct bpf_reg_state *dst_reg)
{ … }
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{ … }
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{ … }
static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{ … }
static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{ … }
static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
const struct bpf_reg_state *src_reg)
{ … }
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{ … }
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{ … }
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{ … }
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{ … }
static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
u8 opcode, bool is_jmp32)
{ … }
static int flip_opcode(u32 opcode)
{ … }
static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg,
u8 opcode)
{ … }
static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
u8 opcode, bool is_jmp32)
{ … }
static u8 rev_opcode(u8 opcode)
{ … }
static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
u8 opcode, bool is_jmp32)
{ … }
static int reg_set_min_max(struct bpf_verifier_env *env,
struct bpf_reg_state *true_reg1,
struct bpf_reg_state *true_reg2,
struct bpf_reg_state *false_reg1,
struct bpf_reg_state *false_reg2,
u8 opcode, bool is_jmp32)
{ … }
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
struct bpf_reg_state *reg, u32 id,
bool is_null)
{ … }
static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{ … }
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg,
struct bpf_verifier_state *this_branch,
struct bpf_verifier_state *other_branch)
{ … }
static void find_equal_scalars(struct bpf_verifier_state *vstate,
struct bpf_reg_state *known_reg)
{ … }
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{ … }
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{ … }
static bool may_access_skb(enum bpf_prog_type type)
{ … }
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{ … }
static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
{ … }
enum { … };
static void mark_prune_point(struct bpf_verifier_env *env, int idx)
{ … }
static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
{ … }
static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
{ … }
static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
{ … }
static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
{ … }
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
{ … }
enum { … };
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{ … }
static int visit_func_call_insn(int t, struct bpf_insn *insns,
struct bpf_verifier_env *env,
bool visit_callee)
{ … }
static int visit_insn(int t, struct bpf_verifier_env *env)
{ … }
static int check_cfg(struct bpf_verifier_env *env)
{ … }
static int check_abnormal_return(struct bpf_verifier_env *env)
{ … }
#define MIN_BPF_FUNCINFO_SIZE …
#define MAX_FUNCINFO_REC_SIZE …
static int check_btf_func_early(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
static void adjust_btf_func(struct bpf_verifier_env *env)
{ … }
#define MIN_BPF_LINEINFO_SIZE …
#define MAX_LINEINFO_REC_SIZE …
static int check_btf_line(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
#define MIN_CORE_RELO_SIZE …
#define MAX_CORE_RELO_SIZE …
static int check_core_relo(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
static int check_btf_info_early(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
static int check_btf_info(struct bpf_verifier_env *env,
const union bpf_attr *attr,
bpfptr_t uattr)
{ … }
static bool range_within(const struct bpf_reg_state *old,
const struct bpf_reg_state *cur)
{ … }
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{ … }
static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{ … }
static void clean_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *st)
{ … }
static void clean_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{ … }
static void clean_live_states(struct bpf_verifier_env *env, int insn,
struct bpf_verifier_state *cur)
{ … }
static bool regs_exact(const struct bpf_reg_state *rold,
const struct bpf_reg_state *rcur,
struct bpf_idmap *idmap)
{ … }
enum exact_level { … };
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
enum exact_level exact)
{ … }
static struct bpf_reg_state unbound_reg;
static __init int unbound_reg_init(void)
{ … }
late_initcall(unbound_reg_init);
static bool is_stack_all_misc(struct bpf_verifier_env *env,
struct bpf_stack_state *stack)
{ … }
static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
struct bpf_stack_state *stack)
{ … }
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_idmap *idmap,
enum exact_level exact)
{ … }
static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
struct bpf_idmap *idmap)
{ … }
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, enum exact_level exact)
{ … }
static void reset_idmap_scratch(struct bpf_verifier_env *env)
{ … }
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
enum exact_level exact)
{ … }
static int propagate_liveness_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
struct bpf_reg_state *parent_reg)
{ … }
static int propagate_liveness(struct bpf_verifier_env *env,
const struct bpf_verifier_state *vstate,
struct bpf_verifier_state *vparent)
{ … }
static int propagate_precision(struct bpf_verifier_env *env,
const struct bpf_verifier_state *old)
{ … }
static bool states_maybe_looping(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{ … }
static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
{ … }
static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
{ … }
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{ … }
static bool reg_type_mismatch_ok(enum bpf_reg_type type)
{ … }
static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
{ … }
static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
bool allow_trust_mismatch)
{ … }
static int do_check(struct bpf_verifier_env *env)
{ … }
static int find_btf_percpu_datasec(struct btf *btf)
{ … }
static int check_pseudo_btf_id(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_insn_aux_data *aux)
{ … }
static bool is_tracing_prog_type(enum bpf_prog_type type)
{ … }
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{ … }
static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{ … }
static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{ … }
static void release_maps(struct bpf_verifier_env *env)
{ … }
static void release_btfs(struct bpf_verifier_env *env)
{ … }
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{ … }
static void adjust_insn_aux_data(struct bpf_verifier_env *env,
struct bpf_insn_aux_data *new_data,
struct bpf_prog *new_prog, u32 off, u32 cnt)
{ … }
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{ … }
static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
{ … }
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{ … }
static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
{ … }
static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
u32 off, u32 cnt)
{ … }
static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
u32 cnt)
{ … }
static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{ … }
static void sanitize_dead_code(struct bpf_verifier_env *env)
{ … }
static bool insn_is_cond_jump(u8 code)
{ … }
static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
{ … }
static int opt_remove_dead_code(struct bpf_verifier_env *env)
{ … }
static int opt_remove_nops(struct bpf_verifier_env *env)
{ … }
static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
const union bpf_attr *attr)
{ … }
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{ … }
static int jit_subprogs(struct bpf_verifier_env *env)
{ … }
static int fixup_call_args(struct bpf_verifier_env *env)
{ … }
static void specialize_kfunc(struct bpf_verifier_env *env,
u32 func_id, u16 offset, unsigned long *addr)
{ … }
static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
u16 struct_meta_reg,
u16 node_offset_reg,
struct bpf_insn *insn,
struct bpf_insn *insn_buf,
int *cnt)
{ … }
static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
{ … }
static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len)
{ … }
static int do_misc_fixups(struct bpf_verifier_env *env)
{ … }
static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
int position,
s32 stack_base,
u32 callback_subprogno,
u32 *cnt)
{ … }
static bool is_bpf_loop_call(struct bpf_insn *insn)
{ … }
static int optimize_bpf_loop(struct bpf_verifier_env *env)
{ … }
static void free_states(struct bpf_verifier_env *env)
{ … }
static int do_check_common(struct bpf_verifier_env *env, int subprog)
{ … }
static int do_check_subprogs(struct bpf_verifier_env *env)
{ … }
static int do_check_main(struct bpf_verifier_env *env)
{ … }
static void print_verification_stats(struct bpf_verifier_env *env)
{ … }
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{ … }
#define SECURITY_PREFIX …
static int check_attach_modify_return(unsigned long addr, const char *func_name)
{ … }
BTF_SET_START(btf_non_sleepable_error_inject)
BTF_ID(…)
#ifdef CONFIG_FAIL_PAGE_ALLOC
BTF_ID(…)
#endif
#ifdef CONFIG_FAILSLAB
BTF_ID(…)
#endif
BTF_SET_END(…)
static int check_non_sleepable_error_inject(u32 btf_id)
{ … }
int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *prog,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info)
{ … }
BTF_SET_START(btf_id_deny)
BTF_ID_UNUSED
#ifdef CONFIG_SMP
BTF_ID(…)
BTF_ID(…)
#endif
#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
BTF_ID(func, rcu_read_unlock_strict)
#endif
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
BTF_ID(…)
BTF_ID(…)
#endif
#ifdef CONFIG_PREEMPT_RCU
BTF_ID(…)
BTF_ID(…)
#endif
BTF_SET_END(…)
static bool can_be_sleepable(struct bpf_prog *prog)
{ … }
static int check_attach_btf_id(struct bpf_verifier_env *env)
{ … }
struct btf *bpf_get_btf_vmlinux(void)
{ … }
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{ … }