linux/drivers/net/virtio_net.c

// SPDX-License-Identifier: GPL-2.0-or-later
/* A network driver using virtio.
 *
 * Copyright 2007 Rusty Russell <[email protected]> IBM Corporation
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/dim.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
#include <net/netdev_rx_queue.h>
#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>

static int napi_weight =;
module_param(napi_weight, int, 0444);

static bool csum =, gso =, napi_tx =;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
module_param(napi_tx, bool, 0644);

/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN
#define GOOD_COPY_LEN

#define VIRTNET_RX_PAD

/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX
#define VIRTIO_XDP_REDIR

#define VIRTIO_XDP_FLAG
#define VIRTIO_ORPHAN_FLAG

/* RX packet size EWMA. The average packet size is used to determine the packet
 * buffer size when refilling RX rings. As the entire RX ring may be refilled
 * at once, the weight is chosen so that the EWMA will be insensitive to short-
 * term, transient changes in packet size.
 */
DECLARE_EWMA(pkt_len, 0, 64)

#define VIRTNET_DRIVER_VERSION

static const unsigned long guest_offloads[] =;

#define GUEST_OFFLOAD_GRO_HW_MASK

struct virtnet_stat_desc {};

struct virtnet_sq_free_stats {};

struct virtnet_sq_stats {};

struct virtnet_rq_stats {};

#define VIRTNET_SQ_STAT(name, m)
#define VIRTNET_RQ_STAT(name, m)

#define VIRTNET_SQ_STAT_QSTAT(name, m)

#define VIRTNET_RQ_STAT_QSTAT(name, m)

static const struct virtnet_stat_desc virtnet_sq_stats_desc[] =;

static const struct virtnet_stat_desc virtnet_rq_stats_desc[] =;

static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] =;

#define VIRTNET_STATS_DESC_CQ(name)

#define VIRTNET_STATS_DESC_RX(class, name)

#define VIRTNET_STATS_DESC_TX(class, name)


static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] =;

#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field)

#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field)

static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] =;

static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] =;

#define VIRTNET_Q_TYPE_RX
#define VIRTNET_Q_TYPE_TX
#define VIRTNET_Q_TYPE_CQ

struct virtnet_interrupt_coalesce {};

/* The dma information of pages allocated at a time. */
struct virtnet_rq_dma {};

/* Internal representation of a send virtqueue */
struct send_queue {};

/* Internal representation of a receive virtqueue */
struct receive_queue {};

/* This structure can contain rss message with maximum settings for indirection table and keysize
 * Note, that default structure that describes RSS configuration virtio_net_rss_config
 * contains same info but can't handle table values.
 * In any case, structure would be passed to virtio hw through sg_buf split by parts
 * because table sizes may be differ according to the device configuration.
 */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE
#define VIRTIO_NET_RSS_MAX_TABLE_LEN
struct virtio_net_ctrl_rss {};

/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {};

struct virtnet_info {};

struct padded_vnet_hdr {};

struct virtio_net_common_hdr {};

static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
			       struct net_device *dev,
			       unsigned int *xdp_xmit,
			       struct virtnet_rq_stats *stats);
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
				 struct sk_buff *skb, u8 flags);
static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
					       struct sk_buff *curr_skb,
					       struct page *page, void *buf,
					       int len, int truesize);

static bool is_xdp_frame(void *ptr)
{}

static void *xdp_to_ptr(struct xdp_frame *ptr)
{}

static struct xdp_frame *ptr_to_xdp(void *ptr)
{}

static bool is_orphan_skb(void *ptr)
{}

static void *skb_to_ptr(struct sk_buff *skb, bool orphan)
{}

static struct sk_buff *ptr_to_skb(void *ptr)
{}

static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
			    bool in_napi, struct virtnet_sq_free_stats *stats)
{}

/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{}

static int txq2vq(int txq)
{}

static int vq2rxq(struct virtqueue *vq)
{}

static int rxq2vq(int rxq)
{}

static int vq_type(struct virtnet_info *vi, int qid)
{}

static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff *skb)
{}

/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
static void give_pages(struct receive_queue *rq, struct page *page)
{}

static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
{}

static void virtnet_rq_free_buf(struct virtnet_info *vi,
				struct receive_queue *rq, void *buf)
{}

static void enable_delayed_refill(struct virtnet_info *vi)
{}

static void disable_delayed_refill(struct virtnet_info *vi)
{}

static void enable_rx_mode_work(struct virtnet_info *vi)
{}

static void disable_rx_mode_work(struct virtnet_info *vi)
{}

static void virtqueue_napi_schedule(struct napi_struct *napi,
				    struct virtqueue *vq)
{}

static bool virtqueue_napi_complete(struct napi_struct *napi,
				    struct virtqueue *vq, int processed)
{}

static void skb_xmit_done(struct virtqueue *vq)
{}

#define MRG_CTX_HEADER_SHIFT
static void *mergeable_len_to_ctx(unsigned int truesize,
				  unsigned int headroom)
{}

static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{}

static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{}

static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
					 unsigned int headroom,
					 unsigned int len)
{}

/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct receive_queue *rq,
				   struct page *page, unsigned int offset,
				   unsigned int len, unsigned int truesize,
				   unsigned int headroom)
{}

static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
{}

static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{}

static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{}

static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
{}

static void virtnet_rq_set_premapped(struct virtnet_info *vi)
{}

static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{}

static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
			  bool in_napi)
{}

static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{}

static void check_sq_full_and_disable(struct virtnet_info *vi,
				      struct net_device *dev,
				      struct send_queue *sq)
{}

static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
{}

static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
				   struct receive_queue *rq, void *buf, u32 len)
{}

static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
					 struct xdp_buff *xdp)
{}

static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
						 struct receive_queue *rq, struct xdp_buff *xdp,
						 unsigned int *xdp_xmit,
						 struct virtnet_rq_stats *stats)
{}

static void xsk_drop_follow_bufs(struct net_device *dev,
				 struct receive_queue *rq,
				 u32 num_buf,
				 struct virtnet_rq_stats *stats)
{}

static int xsk_append_merge_buffer(struct virtnet_info *vi,
				   struct receive_queue *rq,
				   struct sk_buff *head_skb,
				   u32 num_buf,
				   struct virtio_net_hdr_mrg_rxbuf *hdr,
				   struct virtnet_rq_stats *stats)
{}

static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
						 struct receive_queue *rq, struct xdp_buff *xdp,
						 unsigned int *xdp_xmit,
						 struct virtnet_rq_stats *stats)
{}

static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
				    void *buf, u32 len,
				    unsigned int *xdp_xmit,
				    struct virtnet_rq_stats *stats)
{}

static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
				   struct xsk_buff_pool *pool, gfp_t gfp)
{}

static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
{}

static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
				   struct send_queue *sq,
				   struct xdp_frame *xdpf)
{}

/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
 * the current cpu, so it does not need to be locked.
 *
 * Here we use marco instead of inline functions because we have to deal with
 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
 * functions to perfectly solve these three problems at the same time.
 */
#define virtnet_xdp_get_sq(vi)

#define virtnet_xdp_put_sq(vi, q)

static int virtnet_xdp_xmit(struct net_device *dev,
			    int n, struct xdp_frame **frames, u32 flags)
{}

static void put_xdp_frags(struct xdp_buff *xdp)
{}

static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
			       struct net_device *dev,
			       unsigned int *xdp_xmit,
			       struct virtnet_rq_stats *stats)
{}

static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{}

/* We copy the packet for XDP in the following cases:
 *
 * 1) Packet is scattered across multiple rx buffers.
 * 2) Headroom space is insufficient.
 *
 * This is inefficient but it's a temporary condition that
 * we hit right after XDP is enabled and until queue is refilled
 * with large buffers with sufficient headroom - so it should affect
 * at most queue size packets.
 * Afterwards, the conditions to enable
 * XDP should preclude the underlying device from sending packets
 * across multiple buffers (num_buf > 1), and we make sure buffers
 * have enough headroom.
 */
static struct page *xdp_linearize_page(struct receive_queue *rq,
				       int *num_buf,
				       struct page *p,
				       int offset,
				       int page_off,
				       unsigned int *len)
{}

static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
					       unsigned int xdp_headroom,
					       void *buf,
					       unsigned int len)
{}

static struct sk_buff *receive_small_xdp(struct net_device *dev,
					 struct virtnet_info *vi,
					 struct receive_queue *rq,
					 struct bpf_prog *xdp_prog,
					 void *buf,
					 unsigned int xdp_headroom,
					 unsigned int len,
					 unsigned int *xdp_xmit,
					 struct virtnet_rq_stats *stats)
{}

static struct sk_buff *receive_small(struct net_device *dev,
				     struct virtnet_info *vi,
				     struct receive_queue *rq,
				     void *buf, void *ctx,
				     unsigned int len,
				     unsigned int *xdp_xmit,
				     struct virtnet_rq_stats *stats)
{}

static struct sk_buff *receive_big(struct net_device *dev,
				   struct virtnet_info *vi,
				   struct receive_queue *rq,
				   void *buf,
				   unsigned int len,
				   struct virtnet_rq_stats *stats)
{}

static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
			       struct net_device *dev,
			       struct virtnet_rq_stats *stats)
{}

/* Why not use xdp_build_skb_from_frame() ?
 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
 * virtio-net there are 2 points that do not match its requirements:
 *  1. The size of the prefilled buffer is not fixed before xdp is set.
 *  2. xdp_build_skb_from_frame() does more checks that we don't need,
 *     like eth_type_trans() (which virtio-net does in receive_buf()).
 */
static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
					       struct virtnet_info *vi,
					       struct xdp_buff *xdp,
					       unsigned int xdp_frags_truesz)
{}

/* TODO: build xdp in big mode */
static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
				      struct virtnet_info *vi,
				      struct receive_queue *rq,
				      struct xdp_buff *xdp,
				      void *buf,
				      unsigned int len,
				      unsigned int frame_sz,
				      int *num_buf,
				      unsigned int *xdp_frags_truesize,
				      struct virtnet_rq_stats *stats)
{}

static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
				   struct receive_queue *rq,
				   struct bpf_prog *xdp_prog,
				   void *ctx,
				   unsigned int *frame_sz,
				   int *num_buf,
				   struct page **page,
				   int offset,
				   unsigned int *len,
				   struct virtio_net_hdr_mrg_rxbuf *hdr)
{}

static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
					     struct virtnet_info *vi,
					     struct receive_queue *rq,
					     struct bpf_prog *xdp_prog,
					     void *buf,
					     void *ctx,
					     unsigned int len,
					     unsigned int *xdp_xmit,
					     struct virtnet_rq_stats *stats)
{}

static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
					       struct sk_buff *curr_skb,
					       struct page *page, void *buf,
					       int len, int truesize)
{}

static struct sk_buff *receive_mergeable(struct net_device *dev,
					 struct virtnet_info *vi,
					 struct receive_queue *rq,
					 void *buf,
					 void *ctx,
					 unsigned int len,
					 unsigned int *xdp_xmit,
					 struct virtnet_rq_stats *stats)
{}

static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
				struct sk_buff *skb)
{}

static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
				 struct sk_buff *skb, u8 flags)
{}

static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
			void *buf, unsigned int len, void **ctx,
			unsigned int *xdp_xmit,
			struct virtnet_rq_stats *stats)
{}

/* Unlike mergeable buffers, all buffers are allocated to the
 * same size, except for the headroom. For this reason we do
 * not need to use  mergeable_len_to_ctx here - it is enough
 * to store the headroom as the context ignoring the truesize.
 */
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
			     gfp_t gfp)
{}

static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
			   gfp_t gfp)
{}

static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
					  struct ewma_pkt_len *avg_pkt_len,
					  unsigned int room)
{}

static int add_recvbuf_mergeable(struct virtnet_info *vi,
				 struct receive_queue *rq, gfp_t gfp)
{}

/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
			  gfp_t gfp)
{}

static void skb_recv_done(struct virtqueue *rvq)
{}

static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{}

static void virtnet_napi_tx_enable(struct virtnet_info *vi,
				   struct virtqueue *vq,
				   struct napi_struct *napi)
{}

static void virtnet_napi_tx_disable(struct napi_struct *napi)
{}

static void refill_work(struct work_struct *work)
{}

static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
				    struct receive_queue *rq,
				    int budget,
				    unsigned int *xdp_xmit,
				    struct virtnet_rq_stats *stats)
{}

static int virtnet_receive_packets(struct virtnet_info *vi,
				   struct receive_queue *rq,
				   int budget,
				   unsigned int *xdp_xmit,
				   struct virtnet_rq_stats *stats)
{}

static int virtnet_receive(struct receive_queue *rq, int budget,
			   unsigned int *xdp_xmit)
{}

static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
{}

static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
{}

static int virtnet_poll(struct napi_struct *napi, int budget)
{}

static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{}

static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
{}

static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
{}

static int virtnet_open(struct net_device *dev)
{}

static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{}

static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
{}

static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{}

static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
{}

static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
{}

static int virtnet_rx_resize(struct virtnet_info *vi,
			     struct receive_queue *rq, u32 ring_num)
{}

static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
{}

static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
{}

static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
			     u32 ring_num)
{}

/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formatted.
 */
static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
				       struct scatterlist *out,
				       struct scatterlist *in)
{}

static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
				 struct scatterlist *out)
{}

static int virtnet_set_mac_address(struct net_device *dev, void *p)
{}

static void virtnet_stats(struct net_device *dev,
			  struct rtnl_link_stats64 *tot)
{}

static void virtnet_ack_link_announce(struct virtnet_info *vi)
{}

static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{}

static int virtnet_close(struct net_device *dev)
{}

static void virtnet_rx_mode_work(struct work_struct *work)
{}

static void virtnet_set_rx_mode(struct net_device *dev)
{}

static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
{}

static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
{}

static void virtnet_clean_affinity(struct virtnet_info *vi)
{}

static void virtnet_set_affinity(struct virtnet_info *vi)
{}

static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
{}

static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
{}

static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
{}

static enum cpuhp_state virtionet_online;

static int virtnet_cpu_notif_add(struct virtnet_info *vi)
{}

static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
{}

static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
					 u16 vqn, u32 max_usecs, u32 max_packets)
{}

static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
					    u16 queue, u32 max_usecs,
					    u32 max_packets)
{}

static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
					    u16 queue, u32 max_usecs,
					    u32 max_packets)
{}

static void virtnet_get_ringparam(struct net_device *dev,
				  struct ethtool_ringparam *ring,
				  struct kernel_ethtool_ringparam *kernel_ring,
				  struct netlink_ext_ack *extack)
{}

static int virtnet_set_ringparam(struct net_device *dev,
				 struct ethtool_ringparam *ring,
				 struct kernel_ethtool_ringparam *kernel_ring,
				 struct netlink_ext_ack *extack)
{}

static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{}

static void virtnet_init_default_rss(struct virtnet_info *vi)
{}

static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
{}

static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
{}

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{}

/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{}

static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
				  int num, int qid, const struct virtnet_stat_desc *desc)
{}

/* qid == -1: for rx/tx queue total field */
static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
{}

struct virtnet_stats_ctx {};

static void virtnet_stats_ctx_init(struct virtnet_info *vi,
				   struct virtnet_stats_ctx *ctx,
				   u64 *data, bool to_qstat)
{}

/* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
 * @sum: the position to store the sum values
 * @num: field num
 * @q_value: the first queue fields
 * @q_num: number of the queues
 */
static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
{}

static void virtnet_fill_total_fields(struct virtnet_info *vi,
				      struct virtnet_stats_ctx *ctx)
{}

static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
				     struct virtnet_stats_ctx *ctx,
				     const u8 *base, bool drv_stats, u8 reply_type)
{}

/* virtnet_fill_stats - copy the stats to qstats or ethtool -S
 * The stats source is the device or the driver.
 *
 * @vi: virtio net info
 * @qid: the vq id
 * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
 * @base: pointer to the device reply or the driver stats structure.
 * @drv_stats: designate the base type (device reply, driver stats)
 * @type: the type of the device reply (if drv_stats is true, this must be zero)
 */
static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
			       struct virtnet_stats_ctx *ctx,
			       const u8 *base, bool drv_stats, u8 reply_type)
{}

static int __virtnet_get_hw_stats(struct virtnet_info *vi,
				  struct virtnet_stats_ctx *ctx,
				  struct virtio_net_ctrl_queue_stats *req,
				  int req_size, void *reply, int res_size)
{}

static void virtnet_make_stat_req(struct virtnet_info *vi,
				  struct virtnet_stats_ctx *ctx,
				  struct virtio_net_ctrl_queue_stats *req,
				  int qid, int *idx)
{}

/* qid: -1: get stats of all vq.
 *     > 0: get the stats for the special vq. This must not be cvq.
 */
static int virtnet_get_hw_stats(struct virtnet_info *vi,
				struct virtnet_stats_ctx *ctx, int qid)
{}

static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{}

static int virtnet_get_sset_count(struct net_device *dev, int sset)
{}

static void virtnet_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *stats, u64 *data)
{}

static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{}

static int virtnet_set_link_ksettings(struct net_device *dev,
				      const struct ethtool_link_ksettings *cmd)
{}

static int virtnet_get_link_ksettings(struct net_device *dev,
				      struct ethtool_link_ksettings *cmd)
{}

static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
					  struct ethtool_coalesce *ec)
{}

static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
					  struct ethtool_coalesce *ec)
{}

static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
				       struct ethtool_coalesce *ec)
{}

static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
					     struct ethtool_coalesce *ec,
					     u16 queue)
{}

static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
					  struct ethtool_coalesce *ec,
					  u16 queue)
{}

static void virtnet_rx_dim_work(struct work_struct *work)
{}

static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
{}

static int virtnet_should_update_vq_weight(int dev_flags, int weight,
					   int vq_weight, bool *should_update)
{}

static int virtnet_set_coalesce(struct net_device *dev,
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
{}

static int virtnet_get_coalesce(struct net_device *dev,
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
{}

static int virtnet_set_per_queue_coalesce(struct net_device *dev,
					  u32 queue,
					  struct ethtool_coalesce *ec)
{}

static int virtnet_get_per_queue_coalesce(struct net_device *dev,
					  u32 queue,
					  struct ethtool_coalesce *ec)
{}

static void virtnet_init_settings(struct net_device *dev)
{}

static void virtnet_update_settings(struct virtnet_info *vi)
{}

static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
{}

static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
{}

static int virtnet_get_rxfh(struct net_device *dev,
			    struct ethtool_rxfh_param *rxfh)
{}

static int virtnet_set_rxfh(struct net_device *dev,
			    struct ethtool_rxfh_param *rxfh,
			    struct netlink_ext_ack *extack)
{}

static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{}

static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
{}

static const struct ethtool_ops virtnet_ethtool_ops =;

static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
				       struct netdev_queue_stats_rx *stats)
{}

static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
				       struct netdev_queue_stats_tx *stats)
{}

static void virtnet_get_base_stats(struct net_device *dev,
				   struct netdev_queue_stats_rx *rx,
				   struct netdev_queue_stats_tx *tx)
{}

static const struct netdev_stat_ops virtnet_stat_ops =;

static void virtnet_freeze_down(struct virtio_device *vdev)
{}

static int init_vqs(struct virtnet_info *vi);

static int virtnet_restore_up(struct virtio_device *vdev)
{}

static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{}

static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
{}

static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
{}

static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
				    struct xsk_buff_pool *pool)
{}

static int virtnet_xsk_pool_enable(struct net_device *dev,
				   struct xsk_buff_pool *pool,
				   u16 qid)
{}

static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
{}

static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
			   struct netlink_ext_ack *extack)
{}

static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
				      size_t len)
{}

static int virtnet_set_features(struct net_device *dev,
				netdev_features_t features)
{}

static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{}

static int virtnet_init_irq_moder(struct virtnet_info *vi)
{}

static void virtnet_free_irq_moder(struct virtnet_info *vi)
{}

static const struct net_device_ops virtnet_netdev =;

static void virtnet_config_changed_work(struct work_struct *work)
{}

static void virtnet_config_changed(struct virtio_device *vdev)
{}

static void virtnet_free_queues(struct virtnet_info *vi)
{}

static void _free_receive_bufs(struct virtnet_info *vi)
{}

static void free_receive_bufs(struct virtnet_info *vi)
{}

static void free_receive_page_frags(struct virtnet_info *vi)
{}

static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
{}

static void free_unused_bufs(struct virtnet_info *vi)
{}

static void virtnet_del_vqs(struct virtnet_info *vi)
{}

/* How large should a single buffer be so a queue full of these can fit at
 * least one full packet?
 * Logic below assumes the mergeable buffer header is used.
 */
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{}

static int virtnet_find_vqs(struct virtnet_info *vi)
{}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{}

static int init_vqs(struct virtnet_info *vi)
{}

#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
		char *buf)
{}

static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =;

static struct attribute *virtio_net_mrg_rx_attrs[] =;

static const struct attribute_group virtio_net_mrg_rx_group =;
#endif

static bool virtnet_fail_on_feature(struct virtio_device *vdev,
				    unsigned int fbit,
				    const char *fname, const char *dname)
{}

#define VIRTNET_FAIL_ON(vdev, fbit, dbit)

static bool virtnet_validate_features(struct virtio_device *vdev)
{}

#define MIN_MTU
#define MAX_MTU

static int virtnet_validate(struct virtio_device *vdev)
{}

static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
{}

static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
{}

#define VIRTIO_NET_HASH_REPORT_MAX_TABLE
static enum xdp_rss_hash_type
virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] =;

static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
			       enum xdp_rss_hash_type *rss_type)
{}

static const struct xdp_metadata_ops virtnet_xdp_metadata_ops =;

static int virtnet_probe(struct virtio_device *vdev)
{}

static void remove_vq_common(struct virtnet_info *vi)
{}

static void virtnet_remove(struct virtio_device *vdev)
{}

static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
{}

static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
{}

static struct virtio_device_id id_table[] =;

#define VIRTNET_FEATURES

static unsigned int features[] =;

static unsigned int features_legacy[] =;

static struct virtio_driver virtio_net_driver =;

static __init int virtio_net_driver_init(void)
{}
module_init();

static __exit void virtio_net_driver_exit(void)
{}
module_exit(virtio_net_driver_exit);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION();
MODULE_LICENSE();