linux/kernel/bpf/btf.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */

#include <uapi/linux/btf.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/bpf_perf_event.h>
#include <uapi/linux/types.h>
#include <linux/seq_file.h>
#include <linux/compiler.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/sort.h>
#include <linux/bpf_verifier.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/bpf.h>
#include <linux/bpf_lsm.h>
#include <linux/skmsg.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>

#include <net/netfilter/nf_bpf_link.h>

#include <net/sock.h>
#include <net/xdp.h>
#include "../tools/lib/bpf/relo_core.h"

/* BTF (BPF Type Format) is the meta data format which describes
 * the data types of BPF program/map.  Hence, it basically focus
 * on the C programming language which the modern BPF is primary
 * using.
 *
 * ELF Section:
 * ~~~~~~~~~~~
 * The BTF data is stored under the ".BTF" ELF section
 *
 * struct btf_type:
 * ~~~~~~~~~~~~~~~
 * Each 'struct btf_type' object describes a C data type.
 * Depending on the type it is describing, a 'struct btf_type'
 * object may be followed by more data.  F.e.
 * To describe an array, 'struct btf_type' is followed by
 * 'struct btf_array'.
 *
 * 'struct btf_type' and any extra data following it are
 * 4 bytes aligned.
 *
 * Type section:
 * ~~~~~~~~~~~~~
 * The BTF type section contains a list of 'struct btf_type' objects.
 * Each one describes a C type.  Recall from the above section
 * that a 'struct btf_type' object could be immediately followed by extra
 * data in order to describe some particular C types.
 *
 * type_id:
 * ~~~~~~~
 * Each btf_type object is identified by a type_id.  The type_id
 * is implicitly implied by the location of the btf_type object in
 * the BTF type section.  The first one has type_id 1.  The second
 * one has type_id 2...etc.  Hence, an earlier btf_type has
 * a smaller type_id.
 *
 * A btf_type object may refer to another btf_type object by using
 * type_id (i.e. the "type" in the "struct btf_type").
 *
 * NOTE that we cannot assume any reference-order.
 * A btf_type object can refer to an earlier btf_type object
 * but it can also refer to a later btf_type object.
 *
 * For example, to describe "const void *".  A btf_type
 * object describing "const" may refer to another btf_type
 * object describing "void *".  This type-reference is done
 * by specifying type_id:
 *
 * [1] CONST (anon) type_id=2
 * [2] PTR (anon) type_id=0
 *
 * The above is the btf_verifier debug log:
 *   - Each line started with "[?]" is a btf_type object
 *   - [?] is the type_id of the btf_type object.
 *   - CONST/PTR is the BTF_KIND_XXX
 *   - "(anon)" is the name of the type.  It just
 *     happens that CONST and PTR has no name.
 *   - type_id=XXX is the 'u32 type' in btf_type
 *
 * NOTE: "void" has type_id 0
 *
 * String section:
 * ~~~~~~~~~~~~~~
 * The BTF string section contains the names used by the type section.
 * Each string is referred by an "offset" from the beginning of the
 * string section.
 *
 * Each string is '\0' terminated.
 *
 * The first character in the string section must be '\0'
 * which is used to mean 'anonymous'. Some btf_type may not
 * have a name.
 */

/* BTF verification:
 *
 * To verify BTF data, two passes are needed.
 *
 * Pass #1
 * ~~~~~~~
 * The first pass is to collect all btf_type objects to
 * an array: "btf->types".
 *
 * Depending on the C type that a btf_type is describing,
 * a btf_type may be followed by extra data.  We don't know
 * how many btf_type is there, and more importantly we don't
 * know where each btf_type is located in the type section.
 *
 * Without knowing the location of each type_id, most verifications
 * cannot be done.  e.g. an earlier btf_type may refer to a later
 * btf_type (recall the "const void *" above), so we cannot
 * check this type-reference in the first pass.
 *
 * In the first pass, it still does some verifications (e.g.
 * checking the name is a valid offset to the string section).
 *
 * Pass #2
 * ~~~~~~~
 * The main focus is to resolve a btf_type that is referring
 * to another type.
 *
 * We have to ensure the referring type:
 * 1) does exist in the BTF (i.e. in btf->types[])
 * 2) does not cause a loop:
 *	struct A {
 *		struct B b;
 *	};
 *
 *	struct B {
 *		struct A a;
 *	};
 *
 * btf_type_needs_resolve() decides if a btf_type needs
 * to be resolved.
 *
 * The needs_resolve type implements the "resolve()" ops which
 * essentially does a DFS and detects backedge.
 *
 * During resolve (or DFS), different C types have different
 * "RESOLVED" conditions.
 *
 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
 * members because a member is always referring to another
 * type.  A struct's member can be treated as "RESOLVED" if
 * it is referring to a BTF_KIND_PTR.  Otherwise, the
 * following valid C struct would be rejected:
 *
 *	struct A {
 *		int m;
 *		struct A *a;
 *	};
 *
 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
 * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
 * detect a pointer loop, e.g.:
 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
 *                        ^                                         |
 *                        +-----------------------------------------+
 *
 */

#define BITS_PER_U128
#define BITS_PER_BYTE_MASK
#define BITS_PER_BYTE_MASKED(bits)
#define BITS_ROUNDDOWN_BYTES(bits)
#define BITS_ROUNDUP_BYTES(bits)

#define BTF_INFO_MASK
#define BTF_INT_MASK
#define BTF_TYPE_ID_VALID(type_id)
#define BTF_STR_OFFSET_VALID(name_off)

/* 16MB for 64k structs and each has 16 members and
 * a few MB spaces for the string section.
 * The hard limit is S32_MAX.
 */
#define BTF_MAX_SIZE

#define for_each_member_from(i, from, struct_type, member)

#define for_each_vsi_from(i, from, struct_type, member)

DEFINE_IDR();
DEFINE_SPINLOCK();

enum btf_kfunc_hook {};

enum {};

struct btf_kfunc_hook_filter {};

struct btf_kfunc_set_tab {};

struct btf_id_dtor_kfunc_tab {};

struct btf_struct_ops_tab {};

struct btf {};

enum verifier_phase {};

struct resolve_vertex {};

enum visit_state {};

enum resolve_mode {};

#define MAX_RESOLVE_DEPTH

struct btf_sec_info {};

struct btf_verifier_env {};

static const char * const btf_kind_str[NR_BTF_KINDS] =;

const char *btf_type_str(const struct btf_type *t)
{}

/* Chunk size we use in safe copy of data to be shown. */
#define BTF_SHOW_OBJ_SAFE_SIZE

/*
 * This is the maximum size of a base type value (equivalent to a
 * 128-bit int); if we are at the end of our safe buffer and have
 * less than 16 bytes space we can't be assured of being able
 * to copy the next type safely, so in such cases we will initiate
 * a new copy.
 */
#define BTF_SHOW_OBJ_BASE_TYPE_SIZE

/* Type name size */
#define BTF_SHOW_NAME_SIZE

/*
 * The suffix of a type that indicates it cannot alias another type when
 * comparing BTF IDs for kfunc invocations.
 */
#define NOCAST_ALIAS_SUFFIX

/*
 * Common data to all BTF show operations. Private show functions can add
 * their own data to a structure containing a struct btf_show and consult it
 * in the show callback.  See btf_type_show() below.
 *
 * One challenge with showing nested data is we want to skip 0-valued
 * data, but in order to figure out whether a nested object is all zeros
 * we need to walk through it.  As a result, we need to make two passes
 * when handling structs, unions and arrays; the first path simply looks
 * for nonzero data, while the second actually does the display.  The first
 * pass is signalled by show->state.depth_check being set, and if we
 * encounter a non-zero value we set show->state.depth_to_show to
 * the depth at which we encountered it.  When we have completed the
 * first pass, we will know if anything needs to be displayed if
 * depth_to_show > depth.  See btf_[struct,array]_show() for the
 * implementation of this.
 *
 * Another problem is we want to ensure the data for display is safe to
 * access.  To support this, the anonymous "struct {} obj" tracks the data
 * object and our safe copy of it.  We copy portions of the data needed
 * to the object "copy" buffer, but because its size is limited to
 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
 * traverse larger objects for display.
 *
 * The various data type show functions all start with a call to
 * btf_show_start_type() which returns a pointer to the safe copy
 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
 * raw data itself).  btf_show_obj_safe() is responsible for
 * using copy_from_kernel_nofault() to update the safe data if necessary
 * as we traverse the object's data.  skbuff-like semantics are
 * used:
 *
 * - obj.head points to the start of the toplevel object for display
 * - obj.size is the size of the toplevel object
 * - obj.data points to the current point in the original data at
 *   which our safe data starts.  obj.data will advance as we copy
 *   portions of the data.
 *
 * In most cases a single copy will suffice, but larger data structures
 * such as "struct task_struct" will require many copies.  The logic in
 * btf_show_obj_safe() handles the logic that determines if a new
 * copy_from_kernel_nofault() is needed.
 */
struct btf_show {};

struct btf_kind_operations {};

static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
static struct btf_type btf_void;

static int btf_resolve(struct btf_verifier_env *env,
		       const struct btf_type *t, u32 type_id);

static int btf_func_check(struct btf_verifier_env *env,
			  const struct btf_type *t);

static bool btf_type_is_modifier(const struct btf_type *t)
{}

bool btf_type_is_void(const struct btf_type *t)
{}

static bool btf_type_is_fwd(const struct btf_type *t)
{}

static bool btf_type_is_datasec(const struct btf_type *t)
{}

static bool btf_type_is_decl_tag(const struct btf_type *t)
{}

static bool btf_type_nosize(const struct btf_type *t)
{}

static bool btf_type_nosize_or_null(const struct btf_type *t)
{}

static bool btf_type_is_decl_tag_target(const struct btf_type *t)
{}

bool btf_is_vmlinux(const struct btf *btf)
{}

u32 btf_nr_types(const struct btf *btf)
{}

s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
{}

s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
{}

const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
					       u32 id, u32 *res_id)
{}

const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
					    u32 id, u32 *res_id)
{}

const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
						 u32 id, u32 *res_id)
{}

/* Types that act only as a source, not sink or intermediate
 * type when resolving.
 */
static bool btf_type_is_resolve_source_only(const struct btf_type *t)
{}

/* What types need to be resolved?
 *
 * btf_type_is_modifier() is an obvious one.
 *
 * btf_type_is_struct() because its member refers to
 * another type (through member->type).
 *
 * btf_type_is_var() because the variable refers to
 * another type. btf_type_is_datasec() holds multiple
 * btf_type_is_var() types that need resolving.
 *
 * btf_type_is_array() because its element (array->type)
 * refers to another type.  Array can be thought of a
 * special case of struct while array just has the same
 * member-type repeated by array->nelems of times.
 */
static bool btf_type_needs_resolve(const struct btf_type *t)
{}

/* t->size can be used */
static bool btf_type_has_size(const struct btf_type *t)
{}

static const char *btf_int_encoding_str(u8 encoding)
{}

static u32 btf_type_int(const struct btf_type *t)
{}

static const struct btf_array *btf_type_array(const struct btf_type *t)
{}

static const struct btf_enum *btf_type_enum(const struct btf_type *t)
{}

static const struct btf_var *btf_type_var(const struct btf_type *t)
{}

static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
{}

static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
{}

static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
{}

static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{}

static bool __btf_name_char_ok(char c, bool first)
{}

const char *btf_str_by_offset(const struct btf *btf, u32 offset)
{}

static bool __btf_name_valid(const struct btf *btf, u32 offset)
{}

static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
{}

/* Allow any printable character in DATASEC names */
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{}

static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
{}

const char *btf_name_by_offset(const struct btf *btf, u32 offset)
{}

const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
{}
EXPORT_SYMBOL_GPL();

/*
 * Regular int is not a bit field and it must be either
 * u8/u16/u32/u64 or __int128.
 */
static bool btf_type_int_is_regular(const struct btf_type *t)
{}

/*
 * Check that given struct member is a regular int with expected
 * offset and size.
 */
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
			   const struct btf_member *m,
			   u32 expected_offset, u32 expected_size)
{}

/* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
						       u32 id)
{}

#define BTF_SHOW_MAX_ITER

#define BTF_KIND_BIT(kind)

/*
 * Populate show->state.name with type name information.
 * Format of type name is
 *
 * [.member_name = ] (type_name)
 */
static const char *btf_show_name(struct btf_show *show)
{}

static const char *__btf_show_indent(struct btf_show *show)
{}

static const char *btf_show_indent(struct btf_show *show)
{}

static const char *btf_show_newline(struct btf_show *show)
{}

static const char *btf_show_delim(struct btf_show *show)
{}

__printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
{}

/* Macros are used here as btf_show_type_value[s]() prepends and appends
 * format specifiers to the format specifier passed in; these do the work of
 * adding indentation, delimiters etc while the caller simply has to specify
 * the type value(s) in the format specifier + value(s).
 */
#define btf_show_type_value(show, fmt, value)

#define btf_show_type_values(show, fmt, ...)

/* How much is left to copy to safe buffer after @data? */
static int btf_show_obj_size_left(struct btf_show *show, void *data)
{}

/* Is object pointed to by @data of @size already copied to our safe buffer? */
static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
{}

/*
 * If object pointed to by @data of @size falls within our safe buffer, return
 * the equivalent pointer to the same safe data.  Assumes
 * copy_from_kernel_nofault() has already happened and our safe buffer is
 * populated.
 */
static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
{}

/*
 * Return a safe-to-access version of data pointed to by @data.
 * We do this by copying the relevant amount of information
 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
 *
 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
 * safe copy is needed.
 *
 * Otherwise we need to determine if we have the required amount
 * of data (determined by the @data pointer and the size of the
 * largest base type we can encounter (represented by
 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
 * that we will be able to print some of the current object,
 * and if more is needed a copy will be triggered.
 * Some objects such as structs will not fit into the buffer;
 * in such cases additional copies when we iterate over their
 * members may be needed.
 *
 * btf_show_obj_safe() is used to return a safe buffer for
 * btf_show_start_type(); this ensures that as we recurse into
 * nested types we always have safe data for the given type.
 * This approach is somewhat wasteful; it's possible for example
 * that when iterating over a large union we'll end up copying the
 * same data repeatedly, but the goal is safety not performance.
 * We use stack data as opposed to per-CPU buffers because the
 * iteration over a type can take some time, and preemption handling
 * would greatly complicate use of the safe buffer.
 */
static void *btf_show_obj_safe(struct btf_show *show,
			       const struct btf_type *t,
			       void *data)
{}

/*
 * Set the type we are starting to show and return a safe data pointer
 * to be used for showing the associated data.
 */
static void *btf_show_start_type(struct btf_show *show,
				 const struct btf_type *t,
				 u32 type_id, void *data)
{}

static void btf_show_end_type(struct btf_show *show)
{}

static void *btf_show_start_aggr_type(struct btf_show *show,
				      const struct btf_type *t,
				      u32 type_id, void *data)
{}

static void btf_show_end_aggr_type(struct btf_show *show,
				   const char *suffix)
{}

static void btf_show_start_member(struct btf_show *show,
				  const struct btf_member *m)
{}

static void btf_show_start_array_member(struct btf_show *show)
{}

static void btf_show_end_member(struct btf_show *show)
{}

static void btf_show_end_array_member(struct btf_show *show)
{}

static void *btf_show_start_array_type(struct btf_show *show,
				       const struct btf_type *t,
				       u32 type_id,
				       u16 array_encoding,
				       void *data)
{}

static void btf_show_end_array_type(struct btf_show *show)
{}

static void *btf_show_start_struct_type(struct btf_show *show,
					const struct btf_type *t,
					u32 type_id,
					void *data)
{}

static void btf_show_end_struct_type(struct btf_show *show)
{}

__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
					      const char *fmt, ...)
{}

__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
					    const char *fmt, ...)
{}

__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
						   const struct btf_type *t,
						   bool log_details,
						   const char *fmt, ...)
{}

#define btf_verifier_log_type(env, t, ...)
#define btf_verifier_log_basic(env, t, ...)

__printf(4, 5)
static void btf_verifier_log_member(struct btf_verifier_env *env,
				    const struct btf_type *struct_type,
				    const struct btf_member *member,
				    const char *fmt, ...)
{}

__printf(4, 5)
static void btf_verifier_log_vsi(struct btf_verifier_env *env,
				 const struct btf_type *datasec_type,
				 const struct btf_var_secinfo *vsi,
				 const char *fmt, ...)
{}

static void btf_verifier_log_hdr(struct btf_verifier_env *env,
				 u32 btf_data_size)
{}

static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
{}

static int btf_alloc_id(struct btf *btf)
{}

static void btf_free_id(struct btf *btf)
{}

static void btf_free_kfunc_set_tab(struct btf *btf)
{}

static void btf_free_dtor_kfunc_tab(struct btf *btf)
{}

static void btf_struct_metas_free(struct btf_struct_metas *tab)
{}

static void btf_free_struct_meta_tab(struct btf *btf)
{}

static void btf_free_struct_ops_tab(struct btf *btf)
{}

static void btf_free(struct btf *btf)
{}

static void btf_free_rcu(struct rcu_head *rcu)
{}

const char *btf_get_name(const struct btf *btf)
{}

void btf_get(struct btf *btf)
{}

void btf_put(struct btf *btf)
{}

struct btf *btf_base_btf(const struct btf *btf)
{}

const struct btf_header *btf_header(const struct btf *btf)
{}

void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{}

static int env_resolve_init(struct btf_verifier_env *env)
{}

static void btf_verifier_env_free(struct btf_verifier_env *env)
{}

static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
				     const struct btf_type *next_type)
{}

static bool env_type_is_resolved(const struct btf_verifier_env *env,
				 u32 type_id)
{}

static int env_stack_push(struct btf_verifier_env *env,
			  const struct btf_type *t, u32 type_id)
{}

static void env_stack_set_next_member(struct btf_verifier_env *env,
				      u16 next_member)
{}

static void env_stack_pop_resolved(struct btf_verifier_env *env,
				   u32 resolved_type_id,
				   u32 resolved_size)
{}

static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
{}

/* Resolve the size of a passed-in "type"
 *
 * type: is an array (e.g. u32 array[x][y])
 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
 * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
 *             corresponds to the return type.
 * *elem_type: u32
 * *elem_id: id of u32
 * *total_nelems: (x * y).  Hence, individual elem size is
 *                (*type_size / *total_nelems)
 * *type_id: id of type if it's changed within the function, 0 if not
 *
 * type: is not an array (e.g. const struct X)
 * return type: type "struct X"
 * *type_size: sizeof(struct X)
 * *elem_type: same as return type ("struct X")
 * *elem_id: 0
 * *total_nelems: 1
 * *type_id: id of type if it's changed within the function, 0 if not
 */
static const struct btf_type *
__btf_resolve_size(const struct btf *btf, const struct btf_type *type,
		   u32 *type_size, const struct btf_type **elem_type,
		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
{}

const struct btf_type *
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
		 u32 *type_size)
{}

static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
{}

/* The input param "type_id" must point to a needs_resolve type */
static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
						  u32 *type_id)
{}

static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
{}

const struct btf_type *btf_type_id_size(const struct btf *btf,
					u32 *type_id, u32 *ret_size)
{}

static int btf_df_check_member(struct btf_verifier_env *env,
			       const struct btf_type *struct_type,
			       const struct btf_member *member,
			       const struct btf_type *member_type)
{}

static int btf_df_check_kflag_member(struct btf_verifier_env *env,
				     const struct btf_type *struct_type,
				     const struct btf_member *member,
				     const struct btf_type *member_type)
{}

/* Used for ptr, array struct/union and float type members.
 * int, enum and modifier types have their specific callback functions.
 */
static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
					  const struct btf_type *struct_type,
					  const struct btf_member *member,
					  const struct btf_type *member_type)
{}

static int btf_df_resolve(struct btf_verifier_env *env,
			  const struct resolve_vertex *v)
{}

static void btf_df_show(const struct btf *btf, const struct btf_type *t,
			u32 type_id, void *data, u8 bits_offsets,
			struct btf_show *show)
{}

static int btf_int_check_member(struct btf_verifier_env *env,
				const struct btf_type *struct_type,
				const struct btf_member *member,
				const struct btf_type *member_type)
{}

static int btf_int_check_kflag_member(struct btf_verifier_env *env,
				      const struct btf_type *struct_type,
				      const struct btf_member *member,
				      const struct btf_type *member_type)
{}

static s32 btf_int_check_meta(struct btf_verifier_env *env,
			      const struct btf_type *t,
			      u32 meta_left)
{}

static void btf_int_log(struct btf_verifier_env *env,
			const struct btf_type *t)
{}

static void btf_int128_print(struct btf_show *show, void *data)
{}

static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
			     u16 right_shift_bits)
{}

static void btf_bitfield_show(void *data, u8 bits_offset,
			      u8 nr_bits, struct btf_show *show)
{}


static void btf_int_bits_show(const struct btf *btf,
			      const struct btf_type *t,
			      void *data, u8 bits_offset,
			      struct btf_show *show)
{}

static void btf_int_show(const struct btf *btf, const struct btf_type *t,
			 u32 type_id, void *data, u8 bits_offset,
			 struct btf_show *show)
{}

static const struct btf_kind_operations int_ops =;

static int btf_modifier_check_member(struct btf_verifier_env *env,
				     const struct btf_type *struct_type,
				     const struct btf_member *member,
				     const struct btf_type *member_type)
{}

static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
					   const struct btf_type *struct_type,
					   const struct btf_member *member,
					   const struct btf_type *member_type)
{}

static int btf_ptr_check_member(struct btf_verifier_env *env,
				const struct btf_type *struct_type,
				const struct btf_member *member,
				const struct btf_type *member_type)
{}

static int btf_ref_type_check_meta(struct btf_verifier_env *env,
				   const struct btf_type *t,
				   u32 meta_left)
{}

static int btf_modifier_resolve(struct btf_verifier_env *env,
				const struct resolve_vertex *v)
{}

static int btf_var_resolve(struct btf_verifier_env *env,
			   const struct resolve_vertex *v)
{}

static int btf_ptr_resolve(struct btf_verifier_env *env,
			   const struct resolve_vertex *v)
{}

static void btf_modifier_show(const struct btf *btf,
			      const struct btf_type *t,
			      u32 type_id, void *data,
			      u8 bits_offset, struct btf_show *show)
{}

static void btf_var_show(const struct btf *btf, const struct btf_type *t,
			 u32 type_id, void *data, u8 bits_offset,
			 struct btf_show *show)
{}

static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
			 u32 type_id, void *data, u8 bits_offset,
			 struct btf_show *show)
{}

static void btf_ref_type_log(struct btf_verifier_env *env,
			     const struct btf_type *t)
{}

static struct btf_kind_operations modifier_ops =;

static struct btf_kind_operations ptr_ops =;

static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
			      const struct btf_type *t,
			      u32 meta_left)
{}

static void btf_fwd_type_log(struct btf_verifier_env *env,
			     const struct btf_type *t)
{}

static struct btf_kind_operations fwd_ops =;

static int btf_array_check_member(struct btf_verifier_env *env,
				  const struct btf_type *struct_type,
				  const struct btf_member *member,
				  const struct btf_type *member_type)
{}

static s32 btf_array_check_meta(struct btf_verifier_env *env,
				const struct btf_type *t,
				u32 meta_left)
{}

static int btf_array_resolve(struct btf_verifier_env *env,
			     const struct resolve_vertex *v)
{}

static void btf_array_log(struct btf_verifier_env *env,
			  const struct btf_type *t)
{}

static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
			     u32 type_id, void *data, u8 bits_offset,
			     struct btf_show *show)
{}

static void btf_array_show(const struct btf *btf, const struct btf_type *t,
			   u32 type_id, void *data, u8 bits_offset,
			   struct btf_show *show)
{}

static struct btf_kind_operations array_ops =;

static int btf_struct_check_member(struct btf_verifier_env *env,
				   const struct btf_type *struct_type,
				   const struct btf_member *member,
				   const struct btf_type *member_type)
{}

static s32 btf_struct_check_meta(struct btf_verifier_env *env,
				 const struct btf_type *t,
				 u32 meta_left)
{}

static int btf_struct_resolve(struct btf_verifier_env *env,
			      const struct resolve_vertex *v)
{}

static void btf_struct_log(struct btf_verifier_env *env,
			   const struct btf_type *t)
{}

enum {};

struct btf_field_info {};

static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
			   u32 off, int sz, enum btf_field_type field_type,
			   struct btf_field_info *info)
{}

static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
			 u32 off, int sz, struct btf_field_info *info)
{}

int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
			   int comp_idx, const char *tag_key, int last_id)
{}

const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
				    int comp_idx, const char *tag_key)
{}

static int
btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
		    const struct btf_type *t, int comp_idx, u32 off,
		    int sz, struct btf_field_info *info,
		    enum btf_field_type head_type)
{}

#define field_mask_test_name

static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
			      u32 field_mask, u32 *seen_mask,
			      int *align, int *sz)
{}

#undef field_mask_test_name

/* Repeat a number of fields for a specified number of times.
 *
 * Copy the fields starting from the first field and repeat them for
 * repeat_cnt times. The fields are repeated by adding the offset of each
 * field with
 *   (i + 1) * elem_size
 * where i is the repeat index and elem_size is the size of an element.
 */
static int btf_repeat_fields(struct btf_field_info *info,
			     u32 field_cnt, u32 repeat_cnt, u32 elem_size)
{}

static int btf_find_struct_field(const struct btf *btf,
				 const struct btf_type *t, u32 field_mask,
				 struct btf_field_info *info, int info_cnt,
				 u32 level);

/* Find special fields in the struct type of a field.
 *
 * This function is used to find fields of special types that is not a
 * global variable or a direct field of a struct type. It also handles the
 * repetition if it is the element type of an array.
 */
static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
				  u32 off, u32 nelems,
				  u32 field_mask, struct btf_field_info *info,
				  int info_cnt, u32 level)
{}

static int btf_find_field_one(const struct btf *btf,
			      const struct btf_type *var,
			      const struct btf_type *var_type,
			      int var_idx,
			      u32 off, u32 expected_size,
			      u32 field_mask, u32 *seen_mask,
			      struct btf_field_info *info, int info_cnt,
			      u32 level)
{}

static int btf_find_struct_field(const struct btf *btf,
				 const struct btf_type *t, u32 field_mask,
				 struct btf_field_info *info, int info_cnt,
				 u32 level)
{}

static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
				u32 field_mask, struct btf_field_info *info,
				int info_cnt, u32 level)
{}

static int btf_find_field(const struct btf *btf, const struct btf_type *t,
			  u32 field_mask, struct btf_field_info *info,
			  int info_cnt)
{}

static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
			  struct btf_field_info *info)
{}

static int btf_parse_graph_root(const struct btf *btf,
				struct btf_field *field,
				struct btf_field_info *info,
				const char *node_type_name,
				size_t node_type_align)
{}

static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
			       struct btf_field_info *info)
{}

static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
			     struct btf_field_info *info)
{}

static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
{}

struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
				    u32 field_mask, u32 value_size)
{}

int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
{}

static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
			      u32 type_id, void *data, u8 bits_offset,
			      struct btf_show *show)
{}

static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
			    u32 type_id, void *data, u8 bits_offset,
			    struct btf_show *show)
{}

static struct btf_kind_operations struct_ops =;

static int btf_enum_check_member(struct btf_verifier_env *env,
				 const struct btf_type *struct_type,
				 const struct btf_member *member,
				 const struct btf_type *member_type)
{}

static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
				       const struct btf_type *struct_type,
				       const struct btf_member *member,
				       const struct btf_type *member_type)
{}

static s32 btf_enum_check_meta(struct btf_verifier_env *env,
			       const struct btf_type *t,
			       u32 meta_left)
{}

static void btf_enum_log(struct btf_verifier_env *env,
			 const struct btf_type *t)
{}

static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
			  u32 type_id, void *data, u8 bits_offset,
			  struct btf_show *show)
{}

static struct btf_kind_operations enum_ops =;

static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
				 const struct btf_type *t,
				 u32 meta_left)
{}

static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
			    u32 type_id, void *data, u8 bits_offset,
			    struct btf_show *show)
{}

static struct btf_kind_operations enum64_ops =;

static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
				     const struct btf_type *t,
				     u32 meta_left)
{}

static void btf_func_proto_log(struct btf_verifier_env *env,
			       const struct btf_type *t)
{}

static struct btf_kind_operations func_proto_ops =;

static s32 btf_func_check_meta(struct btf_verifier_env *env,
			       const struct btf_type *t,
			       u32 meta_left)
{}

static int btf_func_resolve(struct btf_verifier_env *env,
			    const struct resolve_vertex *v)
{}

static struct btf_kind_operations func_ops =;

static s32 btf_var_check_meta(struct btf_verifier_env *env,
			      const struct btf_type *t,
			      u32 meta_left)
{}

static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
{}

static const struct btf_kind_operations var_ops =;

static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
				  const struct btf_type *t,
				  u32 meta_left)
{}

static int btf_datasec_resolve(struct btf_verifier_env *env,
			       const struct resolve_vertex *v)
{}

static void btf_datasec_log(struct btf_verifier_env *env,
			    const struct btf_type *t)
{}

static void btf_datasec_show(const struct btf *btf,
			     const struct btf_type *t, u32 type_id,
			     void *data, u8 bits_offset,
			     struct btf_show *show)
{}

static const struct btf_kind_operations datasec_ops =;

static s32 btf_float_check_meta(struct btf_verifier_env *env,
				const struct btf_type *t,
				u32 meta_left)
{}

static int btf_float_check_member(struct btf_verifier_env *env,
				  const struct btf_type *struct_type,
				  const struct btf_member *member,
				  const struct btf_type *member_type)
{}

static void btf_float_log(struct btf_verifier_env *env,
			  const struct btf_type *t)
{}

static const struct btf_kind_operations float_ops =;

static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
			      const struct btf_type *t,
			      u32 meta_left)
{}

static int btf_decl_tag_resolve(struct btf_verifier_env *env,
			   const struct resolve_vertex *v)
{}

static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
{}

static const struct btf_kind_operations decl_tag_ops =;

static int btf_func_proto_check(struct btf_verifier_env *env,
				const struct btf_type *t)
{}

static int btf_func_check(struct btf_verifier_env *env,
			  const struct btf_type *t)
{}

static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] =;

static s32 btf_check_meta(struct btf_verifier_env *env,
			  const struct btf_type *t,
			  u32 meta_left)
{}

static int btf_check_all_metas(struct btf_verifier_env *env)
{}

static bool btf_resolve_valid(struct btf_verifier_env *env,
			      const struct btf_type *t,
			      u32 type_id)
{}

static int btf_resolve(struct btf_verifier_env *env,
		       const struct btf_type *t, u32 type_id)
{}

static int btf_check_all_types(struct btf_verifier_env *env)
{}

static int btf_parse_type_sec(struct btf_verifier_env *env)
{}

static int btf_parse_str_sec(struct btf_verifier_env *env)
{}

static const size_t btf_sec_info_offset[] =;

static int btf_sec_info_cmp(const void *a, const void *b)
{}

static int btf_check_sec_info(struct btf_verifier_env *env,
			      u32 btf_data_size)
{}

static int btf_parse_hdr(struct btf_verifier_env *env)
{}

static const char *alloc_obj_fields[] =;

static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
{}

struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
{}

static int btf_check_type_tags(struct btf_verifier_env *env,
			       struct btf *btf, int start_id)
{}

static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
{}

static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{}

extern char __start_BTF[];
extern char __stop_BTF[];
extern struct btf *btf_vmlinux;

#define BPF_MAP_TYPE
#define BPF_LINK_TYPE
static union {} bpf_ctx_convert;
enum {};
static u8 bpf_ctx_convert_map[] =;
#undef BPF_MAP_TYPE
#undef BPF_LINK_TYPE

static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
{}

static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
{}

bool btf_is_projection_of(const char *pname, const char *tname)
{}

bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
			  const struct btf_type *t, enum bpf_prog_type prog_type,
			  int arg)
{}

/* forward declarations for arch-specific underlying types of
 * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
 * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
 * works correctly with __builtin_types_compatible_p() on respective
 * architectures
 */
struct user_regs_struct;
struct user_pt_regs;

static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
				      const struct btf_type *t, int arg,
				      enum bpf_prog_type prog_type,
				      enum bpf_attach_type attach_type)
{}

static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
				     struct btf *btf,
				     const struct btf_type *t,
				     enum bpf_prog_type prog_type,
				     int arg)
{}

int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
{}

BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID()

static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
				  void *data, unsigned int data_size)
{}

struct btf *btf_parse_vmlinux(void)
{}

/* If .BTF_ids section was created with distilled base BTF, both base and
 * split BTF ids will need to be mapped to actual base/split ids for
 * BTF now that it has been relocated.
 */
static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
{}

#ifdef CONFIG_DEBUG_INFO_BTF_MODULES

static struct btf *btf_parse_module(const char *module_name, const void *data,
				    unsigned int data_size, void *base_data,
				    unsigned int base_data_size)
{
	struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
	struct btf_verifier_env *env = NULL;
	struct bpf_verifier_log *log;
	int err = 0;

	vmlinux_btf = bpf_get_btf_vmlinux();
	if (IS_ERR(vmlinux_btf))
		return vmlinux_btf;
	if (!vmlinux_btf)
		return ERR_PTR(-EINVAL);

	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
	if (!env)
		return ERR_PTR(-ENOMEM);

	log = &env->log;
	log->level = BPF_LOG_KERNEL;

	if (base_data) {
		base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
		if (IS_ERR(base_btf)) {
			err = PTR_ERR(base_btf);
			goto errout;
		}
	} else {
		base_btf = vmlinux_btf;
	}

	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
	if (!btf) {
		err = -ENOMEM;
		goto errout;
	}
	env->btf = btf;

	btf->base_btf = base_btf;
	btf->start_id = base_btf->nr_types;
	btf->start_str_off = base_btf->hdr.str_len;
	btf->kernel_btf = true;
	snprintf(btf->name, sizeof(btf->name), "%s", module_name);

	btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
	if (!btf->data) {
		err = -ENOMEM;
		goto errout;
	}
	memcpy(btf->data, data, data_size);
	btf->data_size = data_size;

	err = btf_parse_hdr(env);
	if (err)
		goto errout;

	btf->nohdr_data = btf->data + btf->hdr.hdr_len;

	err = btf_parse_str_sec(env);
	if (err)
		goto errout;

	err = btf_check_all_metas(env);
	if (err)
		goto errout;

	err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
	if (err)
		goto errout;

	if (base_btf != vmlinux_btf) {
		err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
		if (err)
			goto errout;
		btf_free(base_btf);
		base_btf = vmlinux_btf;
	}

	btf_verifier_env_free(env);
	refcount_set(&btf->refcnt, 1);
	return btf;

errout:
	btf_verifier_env_free(env);
	if (base_btf != vmlinux_btf)
		btf_free(base_btf);
	if (btf) {
		kvfree(btf->data);
		kvfree(btf->types);
		kfree(btf);
	}
	return ERR_PTR(err);
}

#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */

struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
{}

static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
{}

static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
			   int off)
{}

static bool prog_args_trusted(const struct bpf_prog *prog)
{}

int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
		       u32 arg_no)
{}

bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info)
{}
EXPORT_SYMBOL_GPL();

enum bpf_struct_walk_result {};

static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
			   const struct btf_type *t, int off, int size,
			   u32 *next_btf_id, enum bpf_type_flag *flag,
			   const char **field_name)
{}

int btf_struct_access(struct bpf_verifier_log *log,
		      const struct bpf_reg_state *reg,
		      int off, int size, enum bpf_access_type atype __maybe_unused,
		      u32 *next_btf_id, enum bpf_type_flag *flag,
		      const char **field_name)
{}

/* Check that two BTF types, each specified as an BTF object + id, are exactly
 * the same. Trivial ID check is not enough due to module BTFs, because we can
 * end up with two different module BTFs, but IDs point to the common type in
 * vmlinux BTF.
 */
bool btf_types_are_same(const struct btf *btf1, u32 id1,
			const struct btf *btf2, u32 id2)
{}

bool btf_struct_ids_match(struct bpf_verifier_log *log,
			  const struct btf *btf, u32 id, int off,
			  const struct btf *need_btf, u32 need_type_id,
			  bool strict)
{}

static int __get_type_size(struct btf *btf, u32 btf_id,
			   const struct btf_type **ret_type)
{}

static u8 __get_type_fmodel_flags(const struct btf_type *t)
{}

int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func,
			   const char *tname,
			   struct btf_func_model *m)
{}

/* Compare BTFs of two functions assuming only scalars and pointers to context.
 * t1 points to BTF_KIND_FUNC in btf1
 * t2 points to BTF_KIND_FUNC in btf2
 * Returns:
 * EINVAL - function prototype mismatch
 * EFAULT - verifier bug
 * 0 - 99% match. The last 1% is validated by the verifier.
 */
static int btf_check_func_type_match(struct bpf_verifier_log *log,
				     struct btf *btf1, const struct btf_type *t1,
				     struct btf *btf2, const struct btf_type *t2)
{}

/* Compare BTFs of given program with BTF of target program */
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
			 struct btf *btf2, const struct btf_type *t2)
{}

static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
{}

struct bpf_cand_cache {};

static DEFINE_MUTEX(cand_cache_mutex);

static struct bpf_cand_cache *
bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);

static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
				 const struct btf *btf, const struct btf_type *t)
{}

enum btf_arg_tag {};

/* Process BTF of a function to produce high-level expectation of function
 * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
 * is cached in subprog info for reuse.
 * Returns:
 * EFAULT - there is a verifier bug. Abort verification.
 * EINVAL - cannot convert BTF.
 * 0 - Successfully processed BTF and constructed argument expectations.
 */
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
{}

static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
			  struct btf_show *show)
{}

__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
					va_list args)
{}

int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
			    void *obj, struct seq_file *m, u64 flags)
{}

void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
		       struct seq_file *m)
{}

struct btf_show_snprintf {};

__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
					     va_list args)
{}

int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
			   char *buf, int len, u64 flags)
{}

#ifdef CONFIG_PROC_FS
static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
{}
#endif

static int btf_release(struct inode *inode, struct file *filp)
{}

const struct file_operations btf_fops =;

static int __btf_new_fd(struct btf *btf)
{}

int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{}

struct btf *btf_get_by_fd(int fd)
{}

int btf_get_info_by_fd(const struct btf *btf,
		       const union bpf_attr *attr,
		       union bpf_attr __user *uattr)
{}

int btf_get_fd_by_id(u32 id)
{}

u32 btf_obj_id(const struct btf *btf)
{}

bool btf_is_kernel(const struct btf *btf)
{}

bool btf_is_module(const struct btf *btf)
{}

enum {};

#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
struct btf_module {
	struct list_head list;
	struct module *module;
	struct btf *btf;
	struct bin_attribute *sysfs_attr;
	int flags;
};

static LIST_HEAD(btf_modules);
static DEFINE_MUTEX(btf_module_mutex);

static ssize_t
btf_module_read(struct file *file, struct kobject *kobj,
		struct bin_attribute *bin_attr,
		char *buf, loff_t off, size_t len)
{
	const struct btf *btf = bin_attr->private;

	memcpy(buf, btf->data + off, len);
	return len;
}

static void purge_cand_cache(struct btf *btf);

static int btf_module_notify(struct notifier_block *nb, unsigned long op,
			     void *module)
{
	struct btf_module *btf_mod, *tmp;
	struct module *mod = module;
	struct btf *btf;
	int err = 0;

	if (mod->btf_data_size == 0 ||
	    (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
	     op != MODULE_STATE_GOING))
		goto out;

	switch (op) {
	case MODULE_STATE_COMING:
		btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
		if (!btf_mod) {
			err = -ENOMEM;
			goto out;
		}
		btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
				       mod->btf_base_data, mod->btf_base_data_size);
		if (IS_ERR(btf)) {
			kfree(btf_mod);
			if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
				pr_warn("failed to validate module [%s] BTF: %ld\n",
					mod->name, PTR_ERR(btf));
				err = PTR_ERR(btf);
			} else {
				pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
			}
			goto out;
		}
		err = btf_alloc_id(btf);
		if (err) {
			btf_free(btf);
			kfree(btf_mod);
			goto out;
		}

		purge_cand_cache(NULL);
		mutex_lock(&btf_module_mutex);
		btf_mod->module = module;
		btf_mod->btf = btf;
		list_add(&btf_mod->list, &btf_modules);
		mutex_unlock(&btf_module_mutex);

		if (IS_ENABLED(CONFIG_SYSFS)) {
			struct bin_attribute *attr;

			attr = kzalloc(sizeof(*attr), GFP_KERNEL);
			if (!attr)
				goto out;

			sysfs_bin_attr_init(attr);
			attr->attr.name = btf->name;
			attr->attr.mode = 0444;
			attr->size = btf->data_size;
			attr->private = btf;
			attr->read = btf_module_read;

			err = sysfs_create_bin_file(btf_kobj, attr);
			if (err) {
				pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
					mod->name, err);
				kfree(attr);
				err = 0;
				goto out;
			}

			btf_mod->sysfs_attr = attr;
		}

		break;
	case MODULE_STATE_LIVE:
		mutex_lock(&btf_module_mutex);
		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
			if (btf_mod->module != module)
				continue;

			btf_mod->flags |= BTF_MODULE_F_LIVE;
			break;
		}
		mutex_unlock(&btf_module_mutex);
		break;
	case MODULE_STATE_GOING:
		mutex_lock(&btf_module_mutex);
		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
			if (btf_mod->module != module)
				continue;

			list_del(&btf_mod->list);
			if (btf_mod->sysfs_attr)
				sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
			purge_cand_cache(btf_mod->btf);
			btf_put(btf_mod->btf);
			kfree(btf_mod->sysfs_attr);
			kfree(btf_mod);
			break;
		}
		mutex_unlock(&btf_module_mutex);
		break;
	}
out:
	return notifier_from_errno(err);
}

static struct notifier_block btf_module_nb = {
	.notifier_call = btf_module_notify,
};

static int __init btf_module_init(void)
{
	register_module_notifier(&btf_module_nb);
	return 0;
}

fs_initcall(btf_module_init);
#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */

struct module *btf_try_get_module(const struct btf *btf)
{}

/* Returns struct btf corresponding to the struct module.
 * This function can return NULL or ERR_PTR.
 */
static struct btf *btf_get_module_btf(const struct module *module)
{}

static int check_btf_kconfigs(const struct module *module, const char *feature)
{}

BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
{}

const struct bpf_func_proto bpf_btf_find_by_name_kind_proto =;

BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
#define BTF_TRACING_TYPE
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE

static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
				 const struct btf_type *func, u32 func_flags)
{}

static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
{}

/* Kernel Function (kfunc) BTF ID set registration API */

static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
				  const struct btf_kfunc_id_set *kset)
{}

static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
					enum btf_kfunc_hook hook,
					u32 kfunc_btf_id,
					const struct bpf_prog *prog)
{}

static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
{}

/* Caution:
 * Reference to the module (obtained using btf_try_get_module) corresponding to
 * the struct btf *MUST* be held when calling this function from verifier
 * context. This is usually true as we stash references in prog's kfunc_btf_tab;
 * keeping the reference for the duration of the call provides the necessary
 * protection for looking up a well-formed btf->kfunc_set_tab.
 */
u32 *btf_kfunc_id_set_contains(const struct btf *btf,
			       u32 kfunc_btf_id,
			       const struct bpf_prog *prog)
{}

u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
				const struct bpf_prog *prog)
{}

static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
				       const struct btf_kfunc_id_set *kset)
{}

/* This function must be invoked only from initcalls/module init functions */
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
			      const struct btf_kfunc_id_set *kset)
{}
EXPORT_SYMBOL_GPL();

/* This function must be invoked only from initcalls/module init functions */
int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
{}
EXPORT_SYMBOL_GPL();

s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
{}

static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
{}

/* This function must be invoked only from initcalls/module init functions */
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
				struct module *owner)
{}
EXPORT_SYMBOL_GPL();

#define MAX_TYPES_ARE_COMPAT_DEPTH

/* Check local and target types for compatibility. This check is used for
 * type-based CO-RE relocations and follow slightly different rules than
 * field-based relocations. This function assumes that root types were already
 * checked for name match. Beyond that initial root-level name check, names
 * are completely ignored. Compatibility rules are as follows:
 *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
 *     kind should match for local and target types (i.e., STRUCT is not
 *     compatible with UNION);
 *   - for ENUMs/ENUM64s, the size is ignored;
 *   - for INT, size and signedness are ignored;
 *   - for ARRAY, dimensionality is ignored, element types are checked for
 *     compatibility recursively;
 *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
 *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
 *   - FUNC_PROTOs are compatible if they have compatible signature: same
 *     number of input args and compatible return and argument types.
 * These rules are not set in stone and probably will be adjusted as we get
 * more experience with using BPF CO-RE relocations.
 */
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
			      const struct btf *targ_btf, __u32 targ_id)
{}

#define MAX_TYPES_MATCH_DEPTH

int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
			 const struct btf *targ_btf, u32 targ_id)
{}

static bool bpf_core_is_flavor_sep(const char *s)
{}

size_t bpf_core_essential_name_len(const char *name)
{}

static void bpf_free_cands(struct bpf_cand_cache *cands)
{}

static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
{}

#define VMLINUX_CAND_CACHE_SIZE
static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];

#define MODULE_CAND_CACHE_SIZE
static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];

static void __print_cand_cache(struct bpf_verifier_log *log,
			       struct bpf_cand_cache **cache,
			       int cache_size)
{}

static void print_cand_cache(struct bpf_verifier_log *log)
{}

static u32 hash_cands(struct bpf_cand_cache *cands)
{}

static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
					       struct bpf_cand_cache **cache,
					       int cache_size)
{}

static size_t sizeof_cands(int cnt)
{}

static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
						  struct bpf_cand_cache **cache,
						  int cache_size)
{}

#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
			       int cache_size)
{
	struct bpf_cand_cache *cc;
	int i, j;

	for (i = 0; i < cache_size; i++) {
		cc = cache[i];
		if (!cc)
			continue;
		if (!btf) {
			/* when new module is loaded purge all of module_cand_cache,
			 * since new module might have candidates with the name
			 * that matches cached cands.
			 */
			bpf_free_cands_from_cache(cc);
			cache[i] = NULL;
			continue;
		}
		/* when module is unloaded purge cache entries
		 * that match module's btf
		 */
		for (j = 0; j < cc->cnt; j++)
			if (cc->cands[j].btf == btf) {
				bpf_free_cands_from_cache(cc);
				cache[i] = NULL;
				break;
			}
	}

}

static void purge_cand_cache(struct btf *btf)
{
	mutex_lock(&cand_cache_mutex);
	__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
	mutex_unlock(&cand_cache_mutex);
}
#endif

static struct bpf_cand_cache *
bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
		   int targ_start_id)
{}

static struct bpf_cand_cache *
bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
{}

int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
		   int relo_idx, void *insn)
{}

bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
				const struct bpf_reg_state *reg,
				const char *field_name, u32 btf_id, const char *suffix)
{}

bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
			       const struct btf *reg_btf, u32 reg_id,
			       const struct btf *arg_btf, u32 arg_id)
{}

#ifdef CONFIG_BPF_JIT
static int
btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
		   struct bpf_verifier_log *log)
{}

const struct bpf_struct_ops_desc *
bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
{}

const struct bpf_struct_ops_desc *
bpf_struct_ops_find(struct btf *btf, u32 type_id)
{}

int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
{}
EXPORT_SYMBOL_GPL();
#endif

bool btf_param_match_suffix(const struct btf *btf,
			    const struct btf_param *arg,
			    const char *suffix)
{}