linux/kernel/bpf/syscall.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#include <linux/bpf.h>
#include <linux/bpf-cgroup.h>
#include <linux/bpf_trace.h>
#include <linux/bpf_lirc.h>
#include <linux/bpf_verifier.h>
#include <linux/bsearch.h>
#include <linux/btf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
#include <linux/anon_inodes.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/license.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/cred.h>
#include <linux/timekeeping.h>
#include <linux/ctype.h>
#include <linux/nospec.h>
#include <linux/audit.h>
#include <uapi/linux/btf.h>
#include <linux/pgtable.h>
#include <linux/bpf_lsm.h>
#include <linux/poll.h>
#include <linux/sort.h>
#include <linux/bpf-netns.h>
#include <linux/rcupdate_trace.h>
#include <linux/memcontrol.h>
#include <linux/trace_events.h>

#include <net/netfilter/nf_bpf_link.h>
#include <net/netkit.h>
#include <net/tcx.h>

#define IS_FD_ARRAY(map)
#define IS_FD_PROG_ARRAY(map)
#define IS_FD_HASH(map)
#define IS_FD_MAP(map)

#define BPF_OBJ_FLAG_MASK

DEFINE_PER_CPU(int, bpf_prog_active);
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
static DEFINE_IDR(link_idr);
static DEFINE_SPINLOCK(link_idr_lock);

int sysctl_unprivileged_bpf_disabled __read_mostly =;

static const struct bpf_map_ops * const bpf_map_types[] =;

/*
 * If we're handed a bigger struct than we know of, ensure all the unknown bits
 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
 * we don't know about yet.
 *
 * There is a ToCToU between this function call and the following
 * copy_from_user() call. However, this is not a concern since this function is
 * meant to be a future-proofing of bits.
 */
int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
			     size_t expected_size,
			     size_t actual_size)
{}

const struct bpf_map_ops bpf_map_offload_ops =;

static void bpf_map_write_active_inc(struct bpf_map *map)
{}

static void bpf_map_write_active_dec(struct bpf_map *map)
{}

bool bpf_map_write_active(const struct bpf_map *map)
{}

static u32 bpf_map_value_size(const struct bpf_map *map)
{}

static void maybe_wait_bpf_programs(struct bpf_map *map)
{}

static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
				void *key, void *value, __u64 flags)
{}

static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
			      __u64 flags)
{}

/* Please, do not use this function outside from the map creation path
 * (e.g. in map update path) without taking care of setting the active
 * memory cgroup (see at bpf_map_kmalloc_node() for example).
 */
static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
{}

void *bpf_map_area_alloc(u64 size, int numa_node)
{}

void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
{}

void bpf_map_area_free(void *area)
{}

static u32 bpf_map_flags_retain_permanent(u32 flags)
{}

void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
{}

static int bpf_map_alloc_id(struct bpf_map *map)
{}

void bpf_map_free_id(struct bpf_map *map)
{}

#ifdef CONFIG_MEMCG
static void bpf_map_save_memcg(struct bpf_map *map)
{}

static void bpf_map_release_memcg(struct bpf_map *map)
{}

static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
{}

void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
			   int node)
{}

void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
{}

void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
		       gfp_t flags)
{}

void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
				    size_t align, gfp_t flags)
{}

#else
static void bpf_map_save_memcg(struct bpf_map *map)
{
}

static void bpf_map_release_memcg(struct bpf_map *map)
{
}
#endif

int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
			unsigned long nr_pages, struct page **pages)
{}


static int btf_field_cmp(const void *a, const void *b)
{}

struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
				  u32 field_mask)
{}

void btf_record_free(struct btf_record *rec)
{}

void bpf_map_free_record(struct bpf_map *map)
{}

struct btf_record *btf_record_dup(const struct btf_record *rec)
{}

bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
{}

void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
{}

void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
{}

void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
{}

/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{}

static void bpf_map_put_uref(struct bpf_map *map)
{}

static void bpf_map_free_in_work(struct bpf_map *map)
{}

static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
{}

static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
{}

/* decrement map refcnt and schedule it for freeing via workqueue
 * (underlying map implementation ops->map_free() might sleep)
 */
void bpf_map_put(struct bpf_map *map)
{}
EXPORT_SYMBOL_GPL();

void bpf_map_put_with_uref(struct bpf_map *map)
{}

static int bpf_map_release(struct inode *inode, struct file *filp)
{}

static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
{}

#ifdef CONFIG_PROC_FS
/* Show the memory usage of a bpf map */
static u64 bpf_map_memory_usage(const struct bpf_map *map)
{}

static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{}
#endif

static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
			      loff_t *ppos)
{}

static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
			       size_t siz, loff_t *ppos)
{}

/* called for any extra memory-mapped regions (except initial) */
static void bpf_map_mmap_open(struct vm_area_struct *vma)
{}

/* called for all unmapped memory region (including initial) */
static void bpf_map_mmap_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct bpf_map_default_vmops =;

static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
{}

static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
{}

static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
					   unsigned long len, unsigned long pgoff,
					   unsigned long flags)
{}

const struct file_operations bpf_map_fops =;

int bpf_map_new_fd(struct bpf_map *map, int flags)
{}

int bpf_get_file_flag(int flags)
{}

/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD)

/* dst and src must have at least "size" number of bytes.
 * Return strlen on success and < 0 on error.
 */
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
{}

int map_check_no_btf(const struct bpf_map *map,
		     const struct btf *btf,
		     const struct btf_type *key_type,
		     const struct btf_type *value_type)
{}

static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
			 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
{}

static bool bpf_net_capable(void)
{}

#define BPF_MAP_CREATE_LAST_FIELD
/* called via syscall */
static int map_create(union bpf_attr *attr)
{}

/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
struct bpf_map *__bpf_map_get(struct fd f)
{}

void bpf_map_inc(struct bpf_map *map)
{}
EXPORT_SYMBOL_GPL();

void bpf_map_inc_with_uref(struct bpf_map *map)
{}
EXPORT_SYMBOL_GPL();

struct bpf_map *bpf_map_get(u32 ufd)
{}
EXPORT_SYMBOL();

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{}

/* map_idr_lock should have been held or the map should have been
 * protected by rcu read lock.
 */
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
{}

struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
{}
EXPORT_SYMBOL_GPL();

int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{}

static void *__bpf_copy_key(void __user *ukey, u64 key_size)
{}

static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
{}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD

static int map_lookup_elem(union bpf_attr *attr)
{}


#define BPF_MAP_UPDATE_ELEM_LAST_FIELD

static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD

static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
{}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD

static int map_get_next_key(union bpf_attr *attr)
{}

int generic_map_delete_batch(struct bpf_map *map,
			     const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{}

int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
			     const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{}

#define MAP_LOOKUP_RETRIES

int generic_map_lookup_batch(struct bpf_map *map,
				    const union bpf_attr *attr,
				    union bpf_attr __user *uattr)
{}

#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD

static int map_lookup_and_delete_elem(union bpf_attr *attr)
{}

#define BPF_MAP_FREEZE_LAST_FIELD

static int map_freeze(const union bpf_attr *attr)
{}

static const struct bpf_prog_ops * const bpf_prog_types[] =;

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{}

enum bpf_audit {};

static const char * const bpf_audit_str[BPF_AUDIT_MAX] =;

static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
{}

static int bpf_prog_alloc_id(struct bpf_prog *prog)
{}

void bpf_prog_free_id(struct bpf_prog *prog)
{}

static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{}

static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
{}

static void bpf_prog_put_deferred(struct work_struct *work)
{}

static void __bpf_prog_put(struct bpf_prog *prog)
{}

void bpf_prog_put(struct bpf_prog *prog)
{}
EXPORT_SYMBOL_GPL();

static int bpf_prog_release(struct inode *inode, struct file *filp)
{}

struct bpf_prog_kstats {};

void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
{}

static void bpf_prog_get_stats(const struct bpf_prog *prog,
			       struct bpf_prog_kstats *stats)
{}

#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{}
#endif

const struct file_operations bpf_prog_fops =;

int bpf_prog_new_fd(struct bpf_prog *prog)
{}

static struct bpf_prog *____bpf_prog_get(struct fd f)
{}

void bpf_prog_add(struct bpf_prog *prog, int i)
{}
EXPORT_SYMBOL_GPL();

void bpf_prog_sub(struct bpf_prog *prog, int i)
{}
EXPORT_SYMBOL_GPL();

void bpf_prog_inc(struct bpf_prog *prog)
{}
EXPORT_SYMBOL_GPL();

/* prog_idr_lock should have been held */
struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{}
EXPORT_SYMBOL_GPL();

bool bpf_prog_get_ok(struct bpf_prog *prog,
			    enum bpf_prog_type *attach_type, bool attach_drv)
{}

static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
				       bool attach_drv)
{}

struct bpf_prog *bpf_prog_get(u32 ufd)
{}

struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
				       bool attach_drv)
{}
EXPORT_SYMBOL_GPL();

/* Initially all BPF programs could be loaded w/o specifying
 * expected_attach_type. Later for some of them specifying expected_attach_type
 * at load time became required so that program could be validated properly.
 * Programs of types that are allowed to be loaded both w/ and w/o (for
 * backward compatibility) expected_attach_type, should have the default attach
 * type assigned to expected_attach_type for the latter case, so that it can be
 * validated later at attach time.
 *
 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
 * prog type requires it but has some attach types that have to be backward
 * compatible.
 */
static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
{}

static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
			   enum bpf_attach_type expected_attach_type,
			   struct btf *attach_btf, u32 btf_id,
			   struct bpf_prog *dst_prog)
{}

static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
{}

static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
{}

/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD

static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{}

#define BPF_OBJ_LAST_FIELD

static int bpf_obj_pin(const union bpf_attr *attr)
{}

static int bpf_obj_get(const union bpf_attr *attr)
{}

void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
{}

static void bpf_link_free_id(int id)
{}

/* Clean up bpf_link and corresponding anon_inode file and FD. After
 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
 * anon_inode's release() call. This helper marks bpf_link as
 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
 * is not decremented, it's the responsibility of a calling code that failed
 * to complete bpf_link initialization.
 * This helper eventually calls link's dealloc callback, but does not call
 * link's release callback.
 */
void bpf_link_cleanup(struct bpf_link_primer *primer)
{}

void bpf_link_inc(struct bpf_link *link)
{}

static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
{}

static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
{}

/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{}

static void bpf_link_put_deferred(struct work_struct *work)
{}

/* bpf_link_put might be called from atomic context. It needs to be called
 * from sleepable context in order to acquire sleeping locks during the process.
 */
void bpf_link_put(struct bpf_link *link)
{}
EXPORT_SYMBOL();

static void bpf_link_put_direct(struct bpf_link *link)
{}

static int bpf_link_release(struct inode *inode, struct file *filp)
{}

#ifdef CONFIG_PROC_FS
#define BPF_PROG_TYPE
#define BPF_MAP_TYPE
#define BPF_LINK_TYPE
static const char *bpf_link_type_strs[] =;
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
#undef BPF_LINK_TYPE

static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
{}
#endif

static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
{}

static const struct file_operations bpf_link_fops =;

static const struct file_operations bpf_link_fops_poll =;

static int bpf_link_alloc_id(struct bpf_link *link)
{}

/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
 * reserving unused FD and allocating ID from link_idr. This is to be paired
 * with bpf_link_settle() to install FD and ID and expose bpf_link to
 * user-space, if bpf_link is successfully attached. If not, bpf_link and
 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
 * transient state is passed around in struct bpf_link_primer.
 * This is preferred way to create and initialize bpf_link, especially when
 * there are complicated and expensive operations in between creating bpf_link
 * itself and attaching it to BPF hook. By using bpf_link_prime() and
 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
 * expensive (and potentially failing) roll back operations in a rare case
 * that file, FD, or ID can't be allocated.
 */
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
{}

int bpf_link_settle(struct bpf_link_primer *primer)
{}

int bpf_link_new_fd(struct bpf_link *link)
{}

struct bpf_link *bpf_link_get_from_fd(u32 ufd)
{}
EXPORT_SYMBOL();

static void bpf_tracing_link_release(struct bpf_link *link)
{}

static void bpf_tracing_link_dealloc(struct bpf_link *link)
{}

static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
					 struct seq_file *seq)
{}

static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
					   struct bpf_link_info *info)
{}

static const struct bpf_link_ops bpf_tracing_link_lops =;

static int bpf_tracing_prog_attach(struct bpf_prog *prog,
				   int tgt_prog_fd,
				   u32 btf_id,
				   u64 bpf_cookie)
{}

static void bpf_raw_tp_link_release(struct bpf_link *link)
{}

static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
{}

static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
					struct seq_file *seq)
{}

static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
			    u32 len)
{}

static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
					  struct bpf_link_info *info)
{}

static const struct bpf_link_ops bpf_raw_tp_link_lops =;

#ifdef CONFIG_PERF_EVENTS
struct bpf_perf_link {};

static void bpf_perf_link_release(struct bpf_link *link)
{}

static void bpf_perf_link_dealloc(struct bpf_link *link)
{}

static int bpf_perf_link_fill_common(const struct perf_event *event,
				     char __user *uname, u32 ulen,
				     u64 *probe_offset, u64 *probe_addr,
				     u32 *fd_type, unsigned long *missed)
{}

#ifdef CONFIG_KPROBE_EVENTS
static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
				     struct bpf_link_info *info)
{}
#endif

#ifdef CONFIG_UPROBE_EVENTS
static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
				     struct bpf_link_info *info)
{}
#endif

static int bpf_perf_link_fill_probe(const struct perf_event *event,
				    struct bpf_link_info *info)
{}

static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
					 struct bpf_link_info *info)
{}

static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
					 struct bpf_link_info *info)
{}

static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
					struct bpf_link_info *info)
{}

static const struct bpf_link_ops bpf_perf_link_lops =;

static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{}
#else
static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_PERF_EVENTS */

static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
				  const char __user *user_tp_name, u64 cookie)
{}

#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD

static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
{}

static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)
{}

static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
					     enum bpf_attach_type attach_type)
{}

#define BPF_PROG_ATTACH_LAST_FIELD

#define BPF_F_ATTACH_MASK_BASE

#define BPF_F_ATTACH_MASK_MPROG

static int bpf_prog_attach(const union bpf_attr *attr)
{}

#define BPF_PROG_DETACH_LAST_FIELD

static int bpf_prog_detach(const union bpf_attr *attr)
{}

#define BPF_PROG_QUERY_LAST_FIELD

static int bpf_prog_query(const union bpf_attr *attr,
			  union bpf_attr __user *uattr)
{}

#define BPF_PROG_TEST_RUN_LAST_FIELD

static int bpf_prog_test_run(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{}

#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD

static int bpf_obj_get_next_id(const union bpf_attr *attr,
			       union bpf_attr __user *uattr,
			       struct idr *idr,
			       spinlock_t *lock)
{}

struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
{}

struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
{}

#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD

struct bpf_prog *bpf_prog_by_id(u32 id)
{}

static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
{}

#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD

static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{}

static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
					      unsigned long addr, u32 *off,
					      u32 *type)
{}

static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
					      const struct cred *f_cred)
{}

static int set_info_rec_size(struct bpf_prog_info *info)
{}

static int bpf_prog_get_info_by_fd(struct file *file,
				   struct bpf_prog *prog,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr)
{}

static int bpf_map_get_info_by_fd(struct file *file,
				  struct bpf_map *map,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{}

static int bpf_btf_get_info_by_fd(struct file *file,
				  struct btf *btf,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{}

static int bpf_link_get_info_by_fd(struct file *file,
				  struct bpf_link *link,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{}


#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD

static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{}

#define BPF_BTF_LOAD_LAST_FIELD

static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{}

#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD

static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
{}

static int bpf_task_fd_query_copy(const union bpf_attr *attr,
				    union bpf_attr __user *uattr,
				    u32 prog_id, u32 fd_type,
				    const char *buf, u64 probe_offset,
				    u64 probe_addr)
{}

#define BPF_TASK_FD_QUERY_LAST_FIELD

static int bpf_task_fd_query(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{}

#define BPF_MAP_BATCH_LAST_FIELD

#define BPF_DO_BATCH(fn, ...)

static int bpf_map_do_batch(const union bpf_attr *attr,
			    union bpf_attr __user *uattr,
			    int cmd)
{}

#define BPF_LINK_CREATE_LAST_FIELD
static int link_create(union bpf_attr *attr, bpfptr_t uattr)
{}

static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
{}

#define BPF_LINK_UPDATE_LAST_FIELD

static int link_update(union bpf_attr *attr)
{}

#define BPF_LINK_DETACH_LAST_FIELD

static int link_detach(union bpf_attr *attr)
{}

struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
{}
EXPORT_SYMBOL();

struct bpf_link *bpf_link_by_id(u32 id)
{}

struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
{}

#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD

static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
{}

DEFINE_MUTEX();

static int bpf_stats_release(struct inode *inode, struct file *file)
{}

static const struct file_operations bpf_stats_fops =;

static int bpf_enable_runtime_stats(void)
{}

#define BPF_ENABLE_STATS_LAST_FIELD

static int bpf_enable_stats(union bpf_attr *attr)
{}

#define BPF_ITER_CREATE_LAST_FIELD

static int bpf_iter_create(union bpf_attr *attr)
{}

#define BPF_PROG_BIND_MAP_LAST_FIELD

static int bpf_prog_bind_map(union bpf_attr *attr)
{}

#define BPF_TOKEN_CREATE_LAST_FIELD

static int token_create(union bpf_attr *attr)
{}

static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
{}

SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{}

static bool syscall_prog_is_valid_access(int off, int size,
					 enum bpf_access_type type,
					 const struct bpf_prog *prog,
					 struct bpf_insn_access_aux *info)
{}

BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
{}


/* To shut up -Wmissing-prototypes.
 * This function is used by the kernel light skeleton
 * to load bpf programs when modules are loaded or during kernel boot.
 * See tools/lib/bpf/skel_internal.h
 */
int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);

int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
{}
EXPORT_SYMBOL();

static const struct bpf_func_proto bpf_sys_bpf_proto =;

const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

BPF_CALL_1(bpf_sys_close, u32, fd)
{}

static const struct bpf_func_proto bpf_sys_close_proto =;

BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
{}

static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto =;

static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{}

const struct bpf_verifier_ops bpf_syscall_verifier_ops =;

const struct bpf_prog_ops bpf_syscall_prog_ops =;

#ifdef CONFIG_SYSCTL
static int bpf_stats_handler(const struct ctl_table *table, int write,
			     void *buffer, size_t *lenp, loff_t *ppos)
{}

void __weak unpriv_ebpf_notify(int new_state)
{}

static int bpf_unpriv_handler(const struct ctl_table *table, int write,
			      void *buffer, size_t *lenp, loff_t *ppos)
{}

static struct ctl_table bpf_syscall_table[] =;

static int __init bpf_syscall_sysctl_init(void)
{}
late_initcall(bpf_syscall_sysctl_init);
#endif /* CONFIG_SYSCTL */