#include <linux/atomic.h>
#include <linux/bpf_verifier.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/sock_diag.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/gfp.h>
#include <net/inet_common.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/skmsg.h>
#include <net/sock.h>
#include <net/flow_dissector.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <net/sch_generic.h>
#include <net/cls_cgroup.h>
#include <net/dst_metadata.h>
#include <net/dst.h>
#include <net/sock_reuseport.h>
#include <net/busy_poll.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/udp.h>
#include <linux/bpf_trace.h>
#include <net/xdp_sock.h>
#include <linux/inetdevice.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/ip_fib.h>
#include <net/nexthop.h>
#include <net/flow.h>
#include <net/arp.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
#include <net/lwtunnel.h>
#include <net/ipv6_stubs.h>
#include <net/bpf_sk_storage.h>
#include <net/transp_v6.h>
#include <linux/btf_ids.h>
#include <net/tls.h>
#include <net/xdp.h>
#include <net/mptcp.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netkit.h>
#include <linux/un.h>
#include <net/xdp_sock_drv.h>
#include "dev.h"
static_assert(…);
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{ … }
EXPORT_SYMBOL(…);
BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
{ … }
BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
{ … }
BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
{ … }
BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{ … }
BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
int, offset)
{ … }
BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{ … }
BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
int, offset)
{ … }
BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{ … }
BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
int, offset)
{ … }
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
struct bpf_insn *insn_buf)
{ … }
static bool convert_bpf_extensions(struct sock_filter *fp,
struct bpf_insn **insnp)
{ … }
static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
{ … }
static int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_prog *new_prog, int *new_len,
bool *seen_ld_abs)
{ … }
static int check_load_and_stores(const struct sock_filter *filter, int flen)
{ … }
static bool chk_code_allowed(u16 code_to_probe)
{ … }
static bool bpf_check_basics_ok(const struct sock_filter *filter,
unsigned int flen)
{ … }
static int bpf_check_classic(const struct sock_filter *filter,
unsigned int flen)
{ … }
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
const struct sock_fprog *fprog)
{ … }
static void bpf_release_orig_filter(struct bpf_prog *fp)
{ … }
static void __bpf_prog_release(struct bpf_prog *prog)
{ … }
static void __sk_filter_release(struct sk_filter *fp)
{ … }
static void sk_filter_release_rcu(struct rcu_head *rcu)
{ … }
static void sk_filter_release(struct sk_filter *fp)
{ … }
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{ … }
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{ … }
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{ … }
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
{ … }
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
bpf_aux_classic_check_t trans)
{ … }
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
{ … }
EXPORT_SYMBOL_GPL(…);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig)
{ … }
EXPORT_SYMBOL_GPL(…);
void bpf_prog_destroy(struct bpf_prog *fp)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{ … }
static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
{ … }
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{ … }
EXPORT_SYMBOL_GPL(…);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{ … }
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
{ … }
int sk_attach_bpf(u32 ufd, struct sock *sk)
{ … }
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
{ … }
void sk_reuseport_prog_free(struct bpf_prog *prog)
{ … }
struct bpf_scratchpad { … };
static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp) = …;
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{ … }
static inline int bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{ … }
static int bpf_try_make_head_writable(struct sk_buff *skb)
{ … }
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{ … }
static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
{ … }
BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
const void *, from, u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_store_bytes_proto = …;
int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
u32 len, u64 flags)
{ … }
BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
void *, to, u32, len)
{ … }
static const struct bpf_func_proto bpf_skb_load_bytes_proto = …;
int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
{ … }
BPF_CALL_4(bpf_flow_dissector_load_bytes,
const struct bpf_flow_dissector *, ctx, u32, offset,
void *, to, u32, len)
{ … }
static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = …;
BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
u32, offset, void *, to, u32, len, u32, start_header)
{ … }
static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = …;
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
{ … }
static const struct bpf_func_proto bpf_skb_pull_data_proto = …;
BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
{ … }
static const struct bpf_func_proto bpf_sk_fullsock_proto = …;
static inline int sk_skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{ … }
BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
{ … }
static const struct bpf_func_proto sk_skb_pull_data_proto = …;
BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
u64, from, u64, to, u64, flags)
{ … }
static const struct bpf_func_proto bpf_l3_csum_replace_proto = …;
BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
u64, from, u64, to, u64, flags)
{ … }
static const struct bpf_func_proto bpf_l4_csum_replace_proto = …;
BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
__be32 *, to, u32, to_size, __wsum, seed)
{ … }
static const struct bpf_func_proto bpf_csum_diff_proto = …;
BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
{ … }
static const struct bpf_func_proto bpf_csum_update_proto = …;
BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
{ … }
static const struct bpf_func_proto bpf_csum_level_proto = …;
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{ … }
static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
struct sk_buff *skb)
{ … }
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{ … }
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{ … }
static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{ … }
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{ … }
#if IS_ENABLED(CONFIG_IPV6)
static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
struct net_device *dev, struct bpf_nh_params *nh)
{ … }
static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{ … }
#else
static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
}
#endif
#if IS_ENABLED(CONFIG_INET)
static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
struct net_device *dev, struct bpf_nh_params *nh)
{ … }
static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{ … }
#else
static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
}
#endif
static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{ … }
enum { … };
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{ … }
static const struct bpf_func_proto bpf_clone_redirect_proto = …;
static struct net_device *skb_get_peer_dev(struct net_device *dev)
{ … }
int skb_do_redirect(struct sk_buff *skb)
{ … }
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{ … }
static const struct bpf_func_proto bpf_redirect_proto = …;
BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{ … }
static const struct bpf_func_proto bpf_redirect_peer_proto = …;
BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
int, plen, u64, flags)
{ … }
static const struct bpf_func_proto bpf_redirect_neigh_proto = …;
BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
{ … }
static const struct bpf_func_proto bpf_msg_apply_bytes_proto = …;
BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
{ … }
static void sk_msg_reset_curr(struct sk_msg *msg)
{ … }
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = …;
BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
u32, end, u64, flags)
{ … }
static const struct bpf_func_proto bpf_msg_pull_data_proto = …;
BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_msg_push_data_proto = …;
static void sk_msg_shift_left(struct sk_msg *msg, int i)
{ … }
static void sk_msg_shift_right(struct sk_msg *msg, int i)
{ … }
BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_msg_pop_data_proto = …;
#ifdef CONFIG_CGROUP_NET_CLASSID
BPF_CALL_0(bpf_get_cgroup_classid_curr)
{ … }
const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = …;
BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = …;
#endif
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_get_cgroup_classid_proto = …;
BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_get_route_realm_proto = …;
BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_get_hash_recalc_proto = …;
BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_set_hash_invalid_proto = …;
BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
{ … }
static const struct bpf_func_proto bpf_set_hash_proto = …;
BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
u16, vlan_tci)
{ … }
static const struct bpf_func_proto bpf_skb_vlan_push_proto = …;
BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_skb_vlan_pop_proto = …;
static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
{ … }
static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
{ … }
static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
{ … }
static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
{ … }
static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
{ … }
static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
{ … }
static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
{ … }
BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_change_proto_proto = …;
BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
{ … }
static const struct bpf_func_proto bpf_skb_change_type_proto = …;
static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
{ … }
#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK …
#define BPF_F_ADJ_ROOM_DECAP_L3_MASK …
#define BPF_F_ADJ_ROOM_MASK …
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{ … }
static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{ … }
#define BPF_SKB_MAX_LEN …
BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{ … }
static const struct bpf_func_proto sk_skb_adjust_room_proto = …;
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_adjust_room_proto = …;
static u32 __bpf_skb_min_len(const struct sk_buff *skb)
{ … }
static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
{ … }
static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
{ … }
static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
u64 flags)
{ … }
BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_change_tail_proto = …;
BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
u64, flags)
{ … }
static const struct bpf_func_proto sk_skb_change_tail_proto = …;
static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
u64 flags)
{ … }
BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_change_head_proto = …;
BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
u64, flags)
{ … }
static const struct bpf_func_proto sk_skb_change_head_proto = …;
BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp)
{ … }
static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = …;
BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = …;
static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
{ … }
BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
{ … }
static const struct bpf_func_proto bpf_xdp_adjust_head_proto = …;
void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
void *buf, unsigned long len, bool flush)
{ … }
void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
{ … }
BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset,
void *, buf, u32, len)
{ … }
static const struct bpf_func_proto bpf_xdp_load_bytes_proto = …;
int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
{ … }
BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset,
void *, buf, u32, len)
{ … }
static const struct bpf_func_proto bpf_xdp_store_bytes_proto = …;
int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
{ … }
static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
{ … }
static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
struct xdp_mem_info *mem_info, bool release)
{ … }
static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
int shrink)
{ … }
static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
{ … }
BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
{ … }
static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = …;
BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
{ … }
static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = …;
void xdp_do_flush(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
void xdp_do_check_flushed(struct napi_struct *napi)
{ … }
#endif
DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
EXPORT_SYMBOL_GPL(…);
u32 xdp_master_redirect(struct xdp_buff *xdp)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{ … }
static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
struct net_device *dev,
struct xdp_frame *xdpf,
struct bpf_prog *xdp_prog)
{ … }
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{ … }
EXPORT_SYMBOL_GPL(…);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
{ … }
EXPORT_SYMBOL_GPL(…);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, void *fwd,
enum bpf_map_type map_type, u32 map_id,
u32 flags)
{ … }
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{ … }
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_redirect_proto = …;
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u64, key,
u64, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = …;
static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
unsigned long off, unsigned long len)
{ … }
BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
u64, flags, void *, meta, u64, meta_size)
{ … }
static const struct bpf_func_proto bpf_skb_event_output_proto = …;
BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
const struct bpf_func_proto bpf_skb_output_proto = …;
static unsigned short bpf_tunnel_key_af(u64 flags)
{ … }
BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
u32, size, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = …;
BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
{ … }
static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = …;
static struct metadata_dst __percpu *md_dst;
BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
const struct bpf_tunnel_key *, from, u32, size, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = …;
BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
const u8 *, from, u32, size)
{ … }
static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = …;
static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{ … }
BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
u32, idx)
{ … }
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = …;
#ifdef CONFIG_SOCK_CGROUP_DATA
static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
{ … }
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = …;
static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
int ancestor_level)
{ … }
BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
ancestor_level)
{ … }
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = …;
BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
{ … }
static const struct bpf_func_proto bpf_sk_cgroup_id_proto = …;
BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
{ … }
static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = …;
#endif
static unsigned long bpf_xdp_copy(void *dst, const void *ctx,
unsigned long off, unsigned long len)
{ … }
BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
u64, flags, void *, meta, u64, meta_size)
{ … }
static const struct bpf_func_proto bpf_xdp_event_output_proto = …;
BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_output_proto = …;
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_get_socket_cookie_proto = …;
BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = …;
BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = …;
BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = …;
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = …;
static u64 __bpf_get_netns_cookie(struct sock *sk)
{ … }
BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = …;
BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = …;
BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = …;
BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx)
{ … }
static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = …;
BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
{ … }
static const struct bpf_func_proto bpf_get_socket_uid_proto = …;
static int sol_socket_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{ … }
static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
char *optval, int optlen)
{ … }
static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval,
int *optlen, bool getopt)
{ … }
static int sol_tcp_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{ … }
static int sol_ip_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{ … }
static int sol_ipv6_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{ … }
static int __bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{ … }
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{ … }
static int __bpf_getsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{ … }
static int _bpf_getsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{ … }
BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{ … }
const struct bpf_func_proto bpf_sk_setsockopt_proto = …;
BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{ … }
const struct bpf_func_proto bpf_sk_getsockopt_proto = …;
BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{ … }
const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = …;
BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{ … }
const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = …;
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{ … }
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = …;
BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{ … }
static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = …;
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{ … }
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = …;
static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
int optname, const u8 **start)
{ … }
BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{ … }
static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = …;
BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
int, argval)
{ … }
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = …;
const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
EXPORT_SYMBOL_GPL(…);
BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
int, addr_len)
{ … }
static const struct bpf_func_proto bpf_bind_proto = …;
#ifdef CONFIG_XFRM
#if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
(IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
struct metadata_dst __percpu *xfrm_bpf_md_dst;
EXPORT_SYMBOL_GPL(xfrm_bpf_md_dst);
#endif
BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
struct bpf_xfrm_state *, to, u32, size, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = …;
#endif
#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu)
{ … }
#endif
#if IS_ENABLED(CONFIG_INET)
static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
u32 flags, bool check_mtu)
{ … }
#endif
#if IS_ENABLED(CONFIG_IPV6)
static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
u32 flags, bool check_mtu)
{ … }
#endif
#define BPF_FIB_LOOKUP_MASK …
BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = …;
BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{ … }
static const struct bpf_func_proto bpf_skb_fib_lookup_proto = …;
static struct net_device *__dev_via_ifindex(struct net_device *dev_curr,
u32 ifindex)
{ … }
BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
{ … }
BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skb_check_mtu_proto = …;
static const struct bpf_func_proto bpf_xdp_check_mtu_proto = …;
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
{ … }
#endif
#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
bool ingress)
{ … }
#endif
BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
u32, len)
{ … }
BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
void *, hdr, u32, len)
{ … }
static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = …;
static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = …;
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
const void *, from, u32, len)
{ … }
static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = …;
static void bpf_update_srh_state(struct sk_buff *skb)
{ … }
BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
u32, action, void *, param, u32, param_len)
{ … }
static const struct bpf_func_proto bpf_lwt_seg6_action_proto = …;
BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
s32, len)
{ … }
static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = …;
#endif
#ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
int dif, int sdif, u8 family, u8 proto)
{ … }
static struct sock *
__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
u64 flags, int sdif)
{ … }
static struct sock *
__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
u64 flags, int sdif)
{ … }
static struct sock *
bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags)
{ … }
static struct sock *
bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags)
{ … }
BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = …;
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = …;
BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sk_lookup_udp_proto = …;
BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = …;
BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = …;
BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = …;
BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{ … }
static const struct bpf_func_proto bpf_sk_release_proto = …;
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = …;
BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = …;
BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = …;
BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = …;
BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = …;
BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = …;
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{ … }
u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_tcp_sock_proto = …;
BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
{ … }
static const struct bpf_func_proto bpf_get_listener_sock_proto = …;
BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
{ … }
bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{ … }
u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = …;
BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
struct tcphdr *, th, u32, th_len)
{ … }
static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = …;
BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
struct tcphdr *, th, u32, th_len)
{ … }
static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = …;
BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sk_assign_proto = …;
static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
u8 search_kind, const u8 *magic,
u8 magic_len, bool *eol)
{ … }
BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
void *, search_res, u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = …;
BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
const void *, from, u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = …;
BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
u32, len, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = …;
BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb,
u64, tstamp, u32, tstamp_type)
{ … }
static const struct bpf_func_proto bpf_skb_set_tstamp_proto = …;
#ifdef CONFIG_SYN_COOKIES
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv4, struct iphdr *, iph,
struct tcphdr *, th, u32, th_len)
{ … }
static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = …;
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph,
struct tcphdr *, th, u32, th_len)
{ … }
static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = …;
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph,
struct tcphdr *, th)
{ … }
static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv4_proto = …;
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv6, struct ipv6hdr *, iph,
struct tcphdr *, th)
{ … }
static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = …;
#endif
#endif
bool bpf_helper_changes_pkt_data(void *func)
{ … }
const struct bpf_func_proto bpf_event_output_data_proto __weak;
const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak;
static const struct bpf_func_proto *
sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
static const struct bpf_func_proto *
cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
const struct bpf_func_proto bpf_sock_map_update_proto __weak;
const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
static const struct bpf_func_proto *
sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
static const struct bpf_func_proto *
sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
static const struct bpf_func_proto *
sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static const struct bpf_func_proto *
lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool cg_skb_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool lwt_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool __sock_filter_check_attach_type(int off,
enum bpf_access_type access_type,
enum bpf_attach_type attach_type)
{ … }
bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{ … }
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{ … }
static bool sock_filter_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
{ … }
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog, int drop_verdict)
{ … }
static int bpf_gen_ld_abs(const struct bpf_insn *orig,
struct bpf_insn *insn_buf)
{ … }
static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
{ … }
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
DEFINE_MUTEX(…) …;
EXPORT_SYMBOL_GPL(…);
int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size);
EXPORT_SYMBOL_GPL(…);
static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size)
{ … }
static bool __is_valid_xdp_access(int off, int size)
{ … }
static bool xdp_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act)
{ … }
EXPORT_SYMBOL_GPL(…);
static int xdp_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size)
{ … }
static bool sock_addr_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool sock_ops_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
{ … }
static bool sk_skb_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool sk_msg_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static bool flow_dissector_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{ … }
static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si,
struct bpf_insn *insn)
{ … }
static struct bpf_insn *bpf_convert_shinfo_access(__u8 dst_reg, __u8 skb_reg,
struct bpf_insn *insn)
{ … }
static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
const struct bpf_insn *si,
struct bpf_insn *insn)
{ … }
static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
const struct bpf_insn *si,
struct bpf_insn *insn)
{ … }
#define BPF_EMIT_STORE(size, si, off) …
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
static u32 xdp_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) …
#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) …
#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) …
#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
TF) …
#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) …
static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{ … }
static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si,
struct bpf_insn *insn)
{ … }
static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{ … }
const struct bpf_verifier_ops sk_filter_verifier_ops = …;
const struct bpf_prog_ops sk_filter_prog_ops = …;
const struct bpf_verifier_ops tc_cls_act_verifier_ops = …;
const struct bpf_prog_ops tc_cls_act_prog_ops = …;
const struct bpf_verifier_ops xdp_verifier_ops = …;
const struct bpf_prog_ops xdp_prog_ops = …;
const struct bpf_verifier_ops cg_skb_verifier_ops = …;
const struct bpf_prog_ops cg_skb_prog_ops = …;
const struct bpf_verifier_ops lwt_in_verifier_ops = …;
const struct bpf_prog_ops lwt_in_prog_ops = …;
const struct bpf_verifier_ops lwt_out_verifier_ops = …;
const struct bpf_prog_ops lwt_out_prog_ops = …;
const struct bpf_verifier_ops lwt_xmit_verifier_ops = …;
const struct bpf_prog_ops lwt_xmit_prog_ops = …;
const struct bpf_verifier_ops lwt_seg6local_verifier_ops = …;
const struct bpf_prog_ops lwt_seg6local_prog_ops = …;
const struct bpf_verifier_ops cg_sock_verifier_ops = …;
const struct bpf_prog_ops cg_sock_prog_ops = …;
const struct bpf_verifier_ops cg_sock_addr_verifier_ops = …;
const struct bpf_prog_ops cg_sock_addr_prog_ops = …;
const struct bpf_verifier_ops sock_ops_verifier_ops = …;
const struct bpf_prog_ops sock_ops_prog_ops = …;
const struct bpf_verifier_ops sk_skb_verifier_ops = …;
const struct bpf_prog_ops sk_skb_prog_ops = …;
const struct bpf_verifier_ops sk_msg_verifier_ops = …;
const struct bpf_prog_ops sk_msg_prog_ops = …;
const struct bpf_verifier_ops flow_dissector_verifier_ops = …;
const struct bpf_prog_ops flow_dissector_prog_ops = …;
int sk_detach_filter(struct sock *sk)
{ … }
EXPORT_SYMBOL_GPL(…);
int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len)
{ … }
#ifdef CONFIG_INET
static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
struct sock_reuseport *reuse,
struct sock *sk, struct sk_buff *skb,
struct sock *migrating_sk,
u32 hash)
{ … }
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
struct sock *migrating_sk,
u32 hash)
{ … }
BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
struct bpf_map *, map, void *, key, u32, flags)
{ … }
static const struct bpf_func_proto sk_select_reuseport_proto = …;
BPF_CALL_4(sk_reuseport_load_bytes,
const struct sk_reuseport_kern *, reuse_kern, u32, offset,
void *, to, u32, len)
{ … }
static const struct bpf_func_proto sk_reuseport_load_bytes_proto = …;
BPF_CALL_5(sk_reuseport_load_bytes_relative,
const struct sk_reuseport_kern *, reuse_kern, u32, offset,
void *, to, u32, len, u32, start_header)
{ … }
static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = …;
static const struct bpf_func_proto *
sk_reuseport_func_proto(enum bpf_func_id func_id,
const struct bpf_prog *prog)
{ … }
static bool
sk_reuseport_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
#define SK_REUSEPORT_LOAD_FIELD(F) …
#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) …
#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) …
static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{ … }
const struct bpf_verifier_ops sk_reuseport_verifier_ops = …;
const struct bpf_prog_ops sk_reuseport_prog_ops = …;
DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
EXPORT_SYMBOL(…);
BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
struct sock *, sk, u64, flags)
{ … }
static const struct bpf_func_proto bpf_sk_lookup_assign_proto = …;
static const struct bpf_func_proto *
sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
static bool sk_lookup_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{ … }
static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{ … }
const struct bpf_prog_ops sk_lookup_prog_ops = …;
const struct bpf_verifier_ops sk_lookup_verifier_ops = …;
#endif
DEFINE_BPF_DISPATCHER(…)
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
{ … }
BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE)
#define BTF_SOCK_TYPE …
BTF_SOCK_TYPE_xxx
#undef BTF_SOCK_TYPE
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = …;
BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = …;
BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = …;
BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = …;
BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = …;
BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_unix_sock_proto = …;
BPF_CALL_1(bpf_skc_to_mptcp_sock, struct sock *, sk)
{ … }
const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto = …;
BPF_CALL_1(bpf_sock_from_file, struct file *, file)
{ … }
BTF_ID_LIST(bpf_sock_from_file_btf_ids)
BTF_ID(…)
BTF_ID(…)
const struct bpf_func_proto bpf_sock_from_file_proto = …;
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ … }
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
struct bpf_dynptr *ptr__uninit)
{ … }
__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags,
struct bpf_dynptr *ptr__uninit)
{ … }
__bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
const u8 *sun_path, u32 sun_path__sz)
{ … }
__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk,
struct bpf_tcp_req_attrs *attrs, int attrs__sz)
{ … }
__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
struct bpf_dynptr *ptr__uninit)
{ … }
BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
static const struct btf_kfunc_id_set bpf_kfunc_set_skb = …;
static const struct btf_kfunc_id_set bpf_kfunc_set_xdp = …;
static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = …;
static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = …;
static int __init bpf_kfunc_init(void)
{ … }
late_initcall(bpf_kfunc_init);
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
{ … }
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(…)
BTF_KFUNCS_END(…)
static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
{ … }
static const struct btf_kfunc_id_set bpf_sk_iter_kfunc_set = …;
static int init_subsystem(void)
{ … }
late_initcall(init_subsystem);