linux/kernel/bpf/helpers.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/topology.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/uidgid.h>
#include <linux/filter.h>
#include <linux/ctype.h>
#include <linux/jiffies.h>
#include <linux/pid_namespace.h>
#include <linux/poison.h>
#include <linux/proc_ns.h>
#include <linux/sched/task.h>
#include <linux/security.h>
#include <linux/btf_ids.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/kasan.h>

#include "../../lib/kstrtox.h"

/* If kernel subsystem is allowing eBPF programs to call this function,
 * inside its own verifier_ops->get_func_proto() callback it should return
 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
 *
 * Different map implementations will rely on rcu in map methods
 * lookup/update/delete, therefore eBPF programs must run under rcu lock
 * if program is allowed to access maps, so check rcu_read_lock_held() or
 * rcu_read_lock_trace_held() in all three functions.
 */
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{}

const struct bpf_func_proto bpf_map_lookup_elem_proto =;

BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
	   void *, value, u64, flags)
{}

const struct bpf_func_proto bpf_map_update_elem_proto =;

BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{}

const struct bpf_func_proto bpf_map_delete_elem_proto =;

BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
{}

const struct bpf_func_proto bpf_map_push_elem_proto =;

BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
{}

const struct bpf_func_proto bpf_map_pop_elem_proto =;

BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
{}

const struct bpf_func_proto bpf_map_peek_elem_proto =;

BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
{}

const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto =;

const struct bpf_func_proto bpf_get_prandom_u32_proto =;

BPF_CALL_0(bpf_get_smp_processor_id)
{}

const struct bpf_func_proto bpf_get_smp_processor_id_proto =;

BPF_CALL_0(bpf_get_numa_node_id)
{}

const struct bpf_func_proto bpf_get_numa_node_id_proto =;

BPF_CALL_0(bpf_ktime_get_ns)
{}

const struct bpf_func_proto bpf_ktime_get_ns_proto =;

BPF_CALL_0(bpf_ktime_get_boot_ns)
{}

const struct bpf_func_proto bpf_ktime_get_boot_ns_proto =;

BPF_CALL_0(bpf_ktime_get_coarse_ns)
{}

const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto =;

BPF_CALL_0(bpf_ktime_get_tai_ns)
{}

const struct bpf_func_proto bpf_ktime_get_tai_ns_proto =;

BPF_CALL_0(bpf_get_current_pid_tgid)
{}

const struct bpf_func_proto bpf_get_current_pid_tgid_proto =;

BPF_CALL_0(bpf_get_current_uid_gid)
{}

const struct bpf_func_proto bpf_get_current_uid_gid_proto =;

BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
{}

const struct bpf_func_proto bpf_get_current_comm_proto =;

#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)

static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{}

static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
{}

#else

static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{
	atomic_t *l = (void *)lock;

	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
	do {
		atomic_cond_read_relaxed(l, !VAL);
	} while (atomic_xchg(l, 1));
}

static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
{
	atomic_t *l = (void *)lock;

	atomic_set_release(l, 0);
}

#endif

static DEFINE_PER_CPU(unsigned long, irqsave_flags);

static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
{}

NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
{}

const struct bpf_func_proto bpf_spin_lock_proto =;

static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
{}

NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
{}

const struct bpf_func_proto bpf_spin_unlock_proto =;

void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src)
{}

BPF_CALL_0(bpf_jiffies64)
{}

const struct bpf_func_proto bpf_jiffies64_proto =;

#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{}

const struct bpf_func_proto bpf_get_current_cgroup_id_proto =;

BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
{}

const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto =;
#endif /* CONFIG_CGROUPS */

#define BPF_STRTOX_BASE_MASK

static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
			  unsigned long long *res, bool *is_negative)
{}

static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
			 long long *res)
{}

BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
	   long *, res)
{}

const struct bpf_func_proto bpf_strtol_proto =;

BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
	   unsigned long *, res)
{}

const struct bpf_func_proto bpf_strtoul_proto =;

BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
{}

static const struct bpf_func_proto bpf_strncmp_proto =;

BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
	   struct bpf_pidns_info *, nsdata, u32, size)
{}

const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto =;

static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto =;

BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
	   u64, flags, void *, data, u64, size)
{}

const struct bpf_func_proto bpf_event_output_data_proto =;

BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
	   const void __user *, user_ptr)
{}

const struct bpf_func_proto bpf_copy_from_user_proto =;

BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
{}

const struct bpf_func_proto bpf_copy_from_user_task_proto =;

BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
{}

const struct bpf_func_proto bpf_per_cpu_ptr_proto =;

BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
{}

const struct bpf_func_proto bpf_this_cpu_ptr_proto =;

static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
		size_t bufsz)
{}

/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
 * arguments representation.
 */
#define MAX_BPRINTF_BIN_ARGS

/* Support executing three nested bprintf helper calls on a given CPU */
#define MAX_BPRINTF_NEST_LEVEL
struct bpf_bprintf_buffers {};

static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);

static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
{}

void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
{}

/*
 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
 *
 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
 *
 * This can be used in two ways:
 * - Format string verification only: when data->get_bin_args is false
 * - Arguments preparation: in addition to the above verification, it writes in
 *   data->bin_args a binary representation of arguments usable by bstr_printf
 *   where pointers from BPF have been sanitized.
 *
 * In argument preparation mode, if 0 is returned, safe temporary buffers are
 * allocated and bpf_bprintf_cleanup should be called to free them after use.
 */
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
			u32 num_args, struct bpf_bprintf_data *data)
{}

BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
	   const void *, args, u32, data_len)
{}

const struct bpf_func_proto bpf_snprintf_proto =;

struct bpf_async_cb {};

/* BPF map elements can contain 'struct bpf_timer'.
 * Such map owns all of its BPF timers.
 * 'struct bpf_timer' is allocated as part of map element allocation
 * and it's zero initialized.
 * That space is used to keep 'struct bpf_async_kern'.
 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
 * remembers 'struct bpf_map *' pointer it's part of.
 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
 * bpf_timer_start() arms the timer.
 * If user space reference to a map goes to zero at this point
 * ops->map_release_uref callback is responsible for cancelling the timers,
 * freeing their memory, and decrementing prog's refcnts.
 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
 * Inner maps can contain bpf timers as well. ops->map_release_uref is
 * freeing the timers when inner map is replaced or deleted by user space.
 */
struct bpf_hrtimer {};

struct bpf_work {};

/* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
struct bpf_async_kern {} __attribute__((aligned));

enum bpf_async_type {};

static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);

static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
{}

static void bpf_wq_work(struct work_struct *work)
{}

static void bpf_wq_delete_work(struct work_struct *work)
{}

static void bpf_timer_delete_work(struct work_struct *work)
{}

static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
			    enum bpf_async_type type)
{}

BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
	   u64, flags)
{}

static const struct bpf_func_proto bpf_timer_init_proto =;

static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
				    struct bpf_prog_aux *aux, unsigned int flags,
				    enum bpf_async_type type)
{}

BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
	   struct bpf_prog_aux *, aux)
{}

static const struct bpf_func_proto bpf_timer_set_callback_proto =;

BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
{}

static const struct bpf_func_proto bpf_timer_start_proto =;

static void drop_prog_refcnt(struct bpf_async_cb *async)
{}

BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
{}

static const struct bpf_func_proto bpf_timer_cancel_proto =;

static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
{}

/* This function is called by map_delete/update_elem for individual element and
 * by ops->map_release_uref when the user space reference to a map reaches zero.
 */
void bpf_timer_cancel_and_free(void *val)
{}

/* This function is called by map_delete/update_elem for individual element and
 * by ops->map_release_uref when the user space reference to a map reaches zero.
 */
void bpf_wq_cancel_and_free(void *val)
{}

BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
{}

/* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
 * denote type that verifier will determine.
 */
static const struct bpf_func_proto bpf_kptr_xchg_proto =;

/* Since the upper 8 bits of dynptr->size is reserved, the
 * maximum supported size is 2^24 - 1.
 */
#define DYNPTR_MAX_SIZE
#define DYNPTR_TYPE_SHIFT
#define DYNPTR_SIZE_MASK
#define DYNPTR_RDONLY_BIT

bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{}

void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
{}

static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
{}

static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
{}

u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
{}

static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
{}

int bpf_dynptr_check_size(u32 size)
{}

void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
		     enum bpf_dynptr_type type, u32 offset, u32 size)
{}

void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
{}

static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
{}

BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
{}

static const struct bpf_func_proto bpf_dynptr_from_mem_proto =;

BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
	   u32, offset, u64, flags)
{}

static const struct bpf_func_proto bpf_dynptr_read_proto =;

BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
	   u32, len, u64, flags)
{}

static const struct bpf_func_proto bpf_dynptr_write_proto =;

BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
{}

static const struct bpf_func_proto bpf_dynptr_data_proto =;

const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;

const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

void bpf_list_head_free(const struct btf_field *field, void *list_head,
			struct bpf_spin_lock *spin_lock)
{}

/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
 * 'rb_node *', so field name of rb_node within containing struct is not
 * needed.
 *
 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
 * graph_root.node_offset, it's not necessary to know field name
 * or type of node struct
 */
#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root)

void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
		      struct bpf_spin_lock *spin_lock)
{}

__bpf_kfunc_start_defs();

__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{}

__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{}

/* Must be called under migrate_disable(), as required by bpf_mem_free */
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
{}

__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{}

__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
{}

__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
{}

static int __bpf_list_add(struct bpf_list_node_kern *node,
			  struct bpf_list_head *head,
			  bool tail, struct btf_record *rec, u64 off)
{}

__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
					 struct bpf_list_node *node,
					 void *meta__ign, u64 off)
{}

__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
					struct bpf_list_node *node,
					void *meta__ign, u64 off)
{}

static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
{}

__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{}

__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{}

__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
						  struct bpf_rb_node *node)
{}

/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
 * program
 */
static int __bpf_rbtree_add(struct bpf_rb_root *root,
			    struct bpf_rb_node_kern *node,
			    void *less, struct btf_record *rec, u64 off)
{}

__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
				    void *meta__ign, u64 off)
{}

__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
{}

/**
 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
 * kfunc which is not stored in a map as a kptr, must be released by calling
 * bpf_task_release().
 * @p: The task on which a reference is being acquired.
 */
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{}

/**
 * bpf_task_release - Release the reference acquired on a task.
 * @p: The task on which a reference is being released.
 */
__bpf_kfunc void bpf_task_release(struct task_struct *p)
{}

__bpf_kfunc void bpf_task_release_dtor(void *p)
{}
CFI_NOSEAL();

#ifdef CONFIG_CGROUPS
/**
 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
 * this kfunc which is not stored in a map as a kptr, must be released by
 * calling bpf_cgroup_release().
 * @cgrp: The cgroup on which a reference is being acquired.
 */
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{}

/**
 * bpf_cgroup_release - Release the reference acquired on a cgroup.
 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
 * not be freed until the current grace period has ended, even if its refcount
 * drops to 0.
 * @cgrp: The cgroup on which a reference is being released.
 */
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{}

__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
{}
CFI_NOSEAL();

/**
 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
 * array. A cgroup returned by this kfunc which is not subsequently stored in a
 * map, must be released by calling bpf_cgroup_release().
 * @cgrp: The cgroup for which we're performing a lookup.
 * @level: The level of ancestor to look up.
 */
__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{}

/**
 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
 * kfunc which is not subsequently stored in a map, must be released by calling
 * bpf_cgroup_release().
 * @cgid: cgroup id.
 */
__bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
{}

/**
 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
 * task's membership of cgroup ancestry.
 * @task: the task to be tested
 * @ancestor: possible ancestor of @task's cgroup
 *
 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
 * It follows all the same rules as cgroup_is_descendant, and only applies
 * to the default hierarchy.
 */
__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
				       struct cgroup *ancestor)
{}

/**
 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
 * hierarchy ID.
 * @task: The target task
 * @hierarchy_id: The ID of a cgroup1 hierarchy
 *
 * On success, the cgroup is returen. On failure, NULL is returned.
 */
__bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
{}
#endif /* CONFIG_CGROUPS */

/**
 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
 * in the root pid namespace idr. If a task is returned, it must either be
 * stored in a map, or released with bpf_task_release().
 * @pid: The pid of the task being looked up.
 */
__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{}

/**
 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
 * @p: The dynptr whose data slice to retrieve
 * @offset: Offset into the dynptr
 * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
 *               length of the requested slice. This must be a constant.
 *
 * For non-skb and non-xdp type dynptrs, there is no difference between
 * bpf_dynptr_slice and bpf_dynptr_data.
 *
 *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
 *
 * If the intention is to write to the data slice, please use
 * bpf_dynptr_slice_rdwr.
 *
 * The user must check that the returned pointer is not null before using it.
 *
 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
 * does not change the underlying packet data pointers, so a call to
 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
 * the bpf program.
 *
 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
 * data slice (can be either direct pointer to the data or a pointer to the user
 * provided buffer, with its contents containing the data, if unable to obtain
 * direct pointer)
 */
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
				   void *buffer__opt, u32 buffer__szk)
{}

/**
 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
 * @p: The dynptr whose data slice to retrieve
 * @offset: Offset into the dynptr
 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
 *               length of the requested slice. This must be a constant.
 *
 * For non-skb and non-xdp type dynptrs, there is no difference between
 * bpf_dynptr_slice and bpf_dynptr_data.
 *
 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
 *
 * The returned pointer is writable and may point to either directly the dynptr
 * data at the requested offset or to the buffer if unable to obtain a direct
 * data pointer to (example: the requested slice is to the paged area of an skb
 * packet). In the case where the returned pointer is to the buffer, the user
 * is responsible for persisting writes through calling bpf_dynptr_write(). This
 * usually looks something like this pattern:
 *
 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
 * if (!eth)
 *	return TC_ACT_SHOT;
 *
 * // mutate eth header //
 *
 * if (eth == buffer)
 *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
 *
 * Please note that, as in the example above, the user must check that the
 * returned pointer is not null before using it.
 *
 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
 * does not change the underlying packet data pointers, so a call to
 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
 * the bpf program.
 *
 * Return: NULL if the call failed (eg invalid dynptr), pointer to a
 * data slice (can be either direct pointer to the data or a pointer to the user
 * provided buffer, with its contents containing the data, if unable to obtain
 * direct pointer)
 */
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
					void *buffer__opt, u32 buffer__szk)
{}

__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
{}

__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
{}

__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
{}

__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
{}

__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
				 struct bpf_dynptr *clone__uninit)
{}

__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{}

__bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
{}

__bpf_kfunc void bpf_rcu_read_lock(void)
{}

__bpf_kfunc void bpf_rcu_read_unlock(void)
{}

struct bpf_throw_ctx {};

static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
{}

__bpf_kfunc void bpf_throw(u64 cookie)
{}

__bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
{}

__bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
{}

__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
					 int (callback_fn)(void *map, int *key, void *value),
					 unsigned int flags,
					 void *aux__ign)
{}

__bpf_kfunc void bpf_preempt_disable(void)
{}

__bpf_kfunc void bpf_preempt_enable(void)
{}

struct bpf_iter_bits {} __aligned();

struct bpf_iter_bits_kern {} __aligned();

/**
 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
 * @it: The new bpf_iter_bits to be created
 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
 * @nr_words: The size of the specified memory area, measured in 8-byte units.
 * Due to the limitation of memalloc, it can't be greater than 512.
 *
 * This function initializes a new bpf_iter_bits structure for iterating over
 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
 * copies the data of the memory area to the newly created bpf_iter_bits @it for
 * subsequent iteration operations.
 *
 * On success, 0 is returned. On failure, ERR is returned.
 */
__bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
{}

/**
 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
 * @it: The bpf_iter_bits to be checked
 *
 * This function returns a pointer to a number representing the value of the
 * next bit in the bits.
 *
 * If there are no further bits available, it returns NULL.
 */
__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
{}

/**
 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
 * @it: The bpf_iter_bits to be destroyed
 *
 * Destroy the resource associated with the bpf_iter_bits.
 */
__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
{}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(generic_btf_ids)
#ifdef CONFIG_CRASH_DUMP
BTF_ID_FLAGS()
#endif
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()

#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
#endif
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_KFUNCS_END()

static const struct btf_kfunc_id_set generic_kfunc_set =;


BTF_ID_LIST(generic_dtor_ids)
BTF_ID()
BTF_ID()
#ifdef CONFIG_CGROUPS
BTF_ID()
BTF_ID()
#endif

BTF_KFUNCS_START(common_btf_ids)
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
#endif
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_ID_FLAGS()
BTF_KFUNCS_END()

static const struct btf_kfunc_id_set common_kfunc_set =;

static int __init kfunc_init(void)
{}

late_initcall(kfunc_init);

/* Get a pointer to dynptr data up to len bytes for read only access. If
 * the dynptr doesn't have continuous data up to len bytes, return NULL.
 */
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
{}

/* Get a pointer to dynptr data up to len bytes for read write access. If
 * the dynptr doesn't have continuous data up to len bytes, or the dynptr
 * is read only, return NULL.
 */
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
{}