#define pr_fmt(fmt) …
#include <linux/if_xdp.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/vmalloc.h>
#include <net/xdp_sock_drv.h>
#include <net/busy_poll.h>
#include <net/netdev_rx_queue.h>
#include <net/xdp.h>
#include "xsk_queue.h"
#include "xdp_umem.h"
#include "xsk.h"
#define TX_BATCH_SIZE …
#define MAX_PER_SOCKET_BUDGET …
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
u16 queue_id)
{ … }
EXPORT_SYMBOL(…);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{ … }
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id)
{ … }
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
u32 flags)
{ … }
static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{ … }
static void *xsk_copy_xdp_start(struct xdp_buff *from)
{ … }
static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
u32 *from_len, skb_frag_t **frag, u32 rem)
{ … }
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{ … }
static bool xsk_tx_writeable(struct xdp_sock *xs)
{ … }
static bool xsk_is_bound(struct xdp_sock *xs)
{ … }
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{ … }
static void xsk_flush(struct xdp_sock *xs)
{ … }
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ … }
static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ … }
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{ … }
void __xsk_map_flush(struct list_head *flush_list)
{ … }
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{ … }
EXPORT_SYMBOL(…);
void xsk_tx_release(struct xsk_buff_pool *pool)
{ … }
EXPORT_SYMBOL(…);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{ … }
EXPORT_SYMBOL(…);
static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
{ … }
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
{ … }
EXPORT_SYMBOL(…);
static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
{ … }
static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
{ … }
static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
{ … }
static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
{ … }
static u32 xsk_get_num_desc(struct sk_buff *skb)
{ … }
static void xsk_destruct_skb(struct sk_buff *skb)
{ … }
static void xsk_set_destructor_arg(struct sk_buff *skb)
{ … }
static void xsk_consume_skb(struct sk_buff *skb)
{ … }
static void xsk_drop_skb(struct sk_buff *skb)
{ … }
static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
struct xdp_desc *desc)
{ … }
static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
struct xdp_desc *desc)
{ … }
static int __xsk_generic_xmit(struct sock *sk)
{ … }
static int xsk_generic_xmit(struct sock *sk)
{ … }
static bool xsk_no_wakeup(struct sock *sk)
{ … }
static int xsk_check_common(struct xdp_sock *xs)
{ … }
static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{ … }
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{ … }
static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
{ … }
static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
{ … }
static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{ … }
static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
bool umem_queue)
{ … }
static void xsk_unbind_dev(struct xdp_sock *xs)
{ … }
static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
struct xdp_sock __rcu ***map_entry)
{ … }
static void xsk_delete_from_maps(struct xdp_sock *xs)
{ … }
static int xsk_release(struct socket *sock)
{ … }
static struct socket *xsk_lookup_xsk_from_fd(int fd)
{ … }
static bool xsk_validate_queues(struct xdp_sock *xs)
{ … }
static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{ … }
struct xdp_umem_reg_v1 { … };
struct xdp_umem_reg_v2 { … };
static int xsk_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{ … }
static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
{ … }
static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
{ … }
struct xdp_statistics_v1 { … };
static int xsk_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{ … }
static int xsk_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{ … }
static int xsk_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{ … }
static struct proto xsk_proto = …;
static const struct proto_ops xsk_proto_ops = …;
static void xsk_destruct(struct sock *sk)
{ … }
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{ … }
static const struct net_proto_family xsk_family_ops = …;
static struct notifier_block xsk_netdev_notifier = …;
static int __net_init xsk_net_init(struct net *net)
{ … }
static void __net_exit xsk_net_exit(struct net *net)
{ … }
static struct pernet_operations xsk_net_ops = …;
static int __init xsk_init(void)
{ … }
fs_initcall(xsk_init);