#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/topology.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/uidgid.h>
#include <linux/filter.h>
#include <linux/ctype.h>
#include <linux/jiffies.h>
#include <linux/pid_namespace.h>
#include <linux/poison.h>
#include <linux/proc_ns.h>
#include <linux/sched/task.h>
#include <linux/security.h>
#include <linux/btf_ids.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/kasan.h>
#include "../../lib/kstrtox.h"
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{ … }
const struct bpf_func_proto bpf_map_lookup_elem_proto = …;
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
void *, value, u64, flags)
{ … }
const struct bpf_func_proto bpf_map_update_elem_proto = …;
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{ … }
const struct bpf_func_proto bpf_map_delete_elem_proto = …;
BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
{ … }
const struct bpf_func_proto bpf_map_push_elem_proto = …;
BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
{ … }
const struct bpf_func_proto bpf_map_pop_elem_proto = …;
BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
{ … }
const struct bpf_func_proto bpf_map_peek_elem_proto = …;
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
{ … }
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = …;
const struct bpf_func_proto bpf_get_prandom_u32_proto = …;
BPF_CALL_0(bpf_get_smp_processor_id)
{ … }
const struct bpf_func_proto bpf_get_smp_processor_id_proto = …;
BPF_CALL_0(bpf_get_numa_node_id)
{ … }
const struct bpf_func_proto bpf_get_numa_node_id_proto = …;
BPF_CALL_0(bpf_ktime_get_ns)
{ … }
const struct bpf_func_proto bpf_ktime_get_ns_proto = …;
BPF_CALL_0(bpf_ktime_get_boot_ns)
{ … }
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = …;
BPF_CALL_0(bpf_ktime_get_coarse_ns)
{ … }
const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = …;
BPF_CALL_0(bpf_ktime_get_tai_ns)
{ … }
const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = …;
BPF_CALL_0(bpf_get_current_pid_tgid)
{ … }
const struct bpf_func_proto bpf_get_current_pid_tgid_proto = …;
BPF_CALL_0(bpf_get_current_uid_gid)
{ … }
const struct bpf_func_proto bpf_get_current_uid_gid_proto = …;
BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
{ … }
const struct bpf_func_proto bpf_get_current_comm_proto = …;
#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{ … }
static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
{ … }
#else
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{
atomic_t *l = (void *)lock;
BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
do {
atomic_cond_read_relaxed(l, !VAL);
} while (atomic_xchg(l, 1));
}
static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
{
atomic_t *l = (void *)lock;
atomic_set_release(l, 0);
}
#endif
static DEFINE_PER_CPU(unsigned long, irqsave_flags);
static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
{ … }
NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
{ … }
const struct bpf_func_proto bpf_spin_lock_proto = …;
static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
{ … }
NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
{ … }
const struct bpf_func_proto bpf_spin_unlock_proto = …;
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src)
{ … }
BPF_CALL_0(bpf_jiffies64)
{ … }
const struct bpf_func_proto bpf_jiffies64_proto = …;
#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{ … }
const struct bpf_func_proto bpf_get_current_cgroup_id_proto = …;
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
{ … }
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = …;
#endif
#define BPF_STRTOX_BASE_MASK …
static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
unsigned long long *res, bool *is_negative)
{ … }
static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
long long *res)
{ … }
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
long *, res)
{ … }
const struct bpf_func_proto bpf_strtol_proto = …;
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
unsigned long *, res)
{ … }
const struct bpf_func_proto bpf_strtoul_proto = …;
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
{ … }
static const struct bpf_func_proto bpf_strncmp_proto = …;
BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
struct bpf_pidns_info *, nsdata, u32, size)
{ … }
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = …;
static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = …;
BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{ … }
const struct bpf_func_proto bpf_event_output_data_proto = …;
BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
const void __user *, user_ptr)
{ … }
const struct bpf_func_proto bpf_copy_from_user_proto = …;
BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
{ … }
const struct bpf_func_proto bpf_copy_from_user_task_proto = …;
BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
{ … }
const struct bpf_func_proto bpf_per_cpu_ptr_proto = …;
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
{ … }
const struct bpf_func_proto bpf_this_cpu_ptr_proto = …;
static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
size_t bufsz)
{ … }
#define MAX_BPRINTF_BIN_ARGS …
#define MAX_BPRINTF_NEST_LEVEL …
struct bpf_bprintf_buffers { … };
static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
{ … }
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
{ … }
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 num_args, struct bpf_bprintf_data *data)
{ … }
BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
const void *, args, u32, data_len)
{ … }
const struct bpf_func_proto bpf_snprintf_proto = …;
struct bpf_async_cb { … };
struct bpf_hrtimer { … };
struct bpf_work { … };
struct bpf_async_kern { … } __attribute__((aligned …));
enum bpf_async_type { … };
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
{ … }
static void bpf_wq_work(struct work_struct *work)
{ … }
static void bpf_wq_delete_work(struct work_struct *work)
{ … }
static void bpf_timer_delete_work(struct work_struct *work)
{ … }
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
enum bpf_async_type type)
{ … }
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
u64, flags)
{ … }
static const struct bpf_func_proto bpf_timer_init_proto = …;
static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
struct bpf_prog_aux *aux, unsigned int flags,
enum bpf_async_type type)
{ … }
BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
struct bpf_prog_aux *, aux)
{ … }
static const struct bpf_func_proto bpf_timer_set_callback_proto = …;
BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
{ … }
static const struct bpf_func_proto bpf_timer_start_proto = …;
static void drop_prog_refcnt(struct bpf_async_cb *async)
{ … }
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
{ … }
static const struct bpf_func_proto bpf_timer_cancel_proto = …;
static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
{ … }
void bpf_timer_cancel_and_free(void *val)
{ … }
void bpf_wq_cancel_and_free(void *val)
{ … }
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
{ … }
static const struct bpf_func_proto bpf_kptr_xchg_proto = …;
#define DYNPTR_MAX_SIZE …
#define DYNPTR_TYPE_SHIFT …
#define DYNPTR_SIZE_MASK …
#define DYNPTR_RDONLY_BIT …
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{ … }
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
{ … }
static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
{ … }
static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
{ … }
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
{ … }
static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
{ … }
int bpf_dynptr_check_size(u32 size)
{ … }
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size)
{ … }
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
{ … }
static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
{ … }
BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
{ … }
static const struct bpf_func_proto bpf_dynptr_from_mem_proto = …;
BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
u32, offset, u64, flags)
{ … }
static const struct bpf_func_proto bpf_dynptr_read_proto = …;
BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_dynptr_write_proto = …;
BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
{ … }
static const struct bpf_func_proto bpf_dynptr_data_proto = …;
const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock)
{ … }
#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) …
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
struct bpf_spin_lock *spin_lock)
{ … }
__bpf_kfunc_start_defs();
__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{ … }
__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{ … }
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
{ … }
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{ … }
__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
{ … }
__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
{ … }
static int __bpf_list_add(struct bpf_list_node_kern *node,
struct bpf_list_head *head,
bool tail, struct btf_record *rec, u64 off)
{ … }
__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
{ … }
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
{ … }
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
{ … }
__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{ … }
__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{ … }
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
struct bpf_rb_node *node)
{ … }
static int __bpf_rbtree_add(struct bpf_rb_root *root,
struct bpf_rb_node_kern *node,
void *less, struct btf_record *rec, u64 off)
{ … }
__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
void *meta__ign, u64 off)
{ … }
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
{ … }
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{ … }
__bpf_kfunc void bpf_task_release(struct task_struct *p)
{ … }
__bpf_kfunc void bpf_task_release_dtor(void *p)
{ … }
CFI_NOSEAL(…);
#ifdef CONFIG_CGROUPS
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{ … }
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{ … }
__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
{ … }
CFI_NOSEAL(…);
__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{ … }
__bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
{ … }
__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
struct cgroup *ancestor)
{ … }
__bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
{ … }
#endif
__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{ … }
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
void *buffer__opt, u32 buffer__szk)
{ … }
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
void *buffer__opt, u32 buffer__szk)
{ … }
__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
{ … }
__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
{ … }
__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
{ … }
__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
{ … }
__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
struct bpf_dynptr *clone__uninit)
{ … }
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{ … }
__bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
{ … }
__bpf_kfunc void bpf_rcu_read_lock(void)
{ … }
__bpf_kfunc void bpf_rcu_read_unlock(void)
{ … }
struct bpf_throw_ctx { … };
static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
{ … }
__bpf_kfunc void bpf_throw(u64 cookie)
{ … }
__bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
{ … }
__bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
{ … }
__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
int (callback_fn)(void *map, int *key, void *value),
unsigned int flags,
void *aux__ign)
{ … }
__bpf_kfunc void bpf_preempt_disable(void)
{ … }
__bpf_kfunc void bpf_preempt_enable(void)
{ … }
struct bpf_iter_bits { … } __aligned(…);
struct bpf_iter_bits_kern { … } __aligned(…);
__bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
{ … }
__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
{ … }
__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
{ … }
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
#ifdef CONFIG_CRASH_DUMP
BTF_ID_FLAGS(…)
#endif
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
#endif
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
static const struct btf_kfunc_id_set generic_kfunc_set = …;
BTF_ID_LIST(generic_dtor_ids)
BTF_ID(…)
BTF_ID(…)
#ifdef CONFIG_CGROUPS
BTF_ID(…)
BTF_ID(…)
#endif
BTF_KFUNCS_START(common_btf_ids)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
#endif
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
static const struct btf_kfunc_id_set common_kfunc_set = …;
static int __init kfunc_init(void)
{ … }
late_initcall(kfunc_init);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
{ … }
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
{ … }