linux/drivers/vhost/net.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
 * Author: Michael S. Tsirkin <[email protected]>
 *
 * virtio-net server in host kernel.
 */

#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/vmalloc.h>

#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
#include <linux/if_tap.h>
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
#include <linux/skbuff.h>

#include <net/sock.h>
#include <net/xdp.h>

#include "vhost.h"

static int experimental_zcopytx =;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC();

/* Max number of bytes transferred before requeueing the job.
 * Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT

/* Max number of packets transferred before requeueing the job.
 * Using this limit prevents one virtqueue from starving others with small
 * pkts.
 */
#define VHOST_NET_PKT_WEIGHT

/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND
#define VHOST_GOODCOPY_LEN

/*
 * For transmit, used buffer len is unused; we override it to track buffer
 * status internally; used for zerocopy tx only.
 */
/* Lower device DMA failed */
#define VHOST_DMA_FAILED_LEN
/* Lower device DMA done */
#define VHOST_DMA_DONE_LEN
/* Lower device DMA in progress */
#define VHOST_DMA_IN_PROGRESS
/* Buffer unused */
#define VHOST_DMA_CLEAR_LEN

#define VHOST_DMA_IS_DONE(len)

enum {};

enum {};

enum {};

struct vhost_net_ubuf_ref {};

#define VHOST_NET_BATCH
struct vhost_net_buf {};

struct vhost_net_virtqueue {};

struct vhost_net {};

static unsigned vhost_net_zcopy_mask __read_mostly;

static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
{}

static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
{}

static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
{}

static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
{}

static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
{}

static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{}

static int vhost_net_buf_peek_len(void *ptr)
{}

static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{}

static void vhost_net_buf_init(struct vhost_net_buf *rxq)
{}

static void vhost_net_enable_zcopy(int vq)
{}

static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{}

static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{}

static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{}

static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{}

static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{}

static int vhost_net_set_ubuf_info(struct vhost_net *n)
{}

static void vhost_net_vq_reset(struct vhost_net *n)
{}

static void vhost_net_tx_packet(struct vhost_net *net)
{}

static void vhost_net_tx_err(struct vhost_net *net)
{}

static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
{}

static bool vhost_sock_zcopy(struct socket *sock)
{}

static bool vhost_sock_xdp(struct socket *sock)
{}

/* In case of DMA done not in order in lower device driver for some reason.
 * upend_idx is used to track end of used idx, done_idx is used to track head
 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 * guest used idx.
 */
static void vhost_zerocopy_signal_used(struct vhost_net *net,
				       struct vhost_virtqueue *vq)
{}

static void vhost_zerocopy_complete(struct sk_buff *skb,
				    struct ubuf_info *ubuf_base, bool success)
{}

static const struct ubuf_info_ops vhost_ubuf_ops =;

static inline unsigned long busy_clock(void)
{}

static bool vhost_can_busy_poll(unsigned long endtime)
{}

static void vhost_net_disable_vq(struct vhost_net *n,
				 struct vhost_virtqueue *vq)
{}

static int vhost_net_enable_vq(struct vhost_net *n,
				struct vhost_virtqueue *vq)
{}

static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
{}

static void vhost_tx_batch(struct vhost_net *net,
			   struct vhost_net_virtqueue *nvq,
			   struct socket *sock,
			   struct msghdr *msghdr)
{}

static int sock_has_rx_data(struct socket *sock)
{}

static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
					  struct vhost_virtqueue *vq)
{}

static void vhost_net_busy_poll(struct vhost_net *net,
				struct vhost_virtqueue *rvq,
				struct vhost_virtqueue *tvq,
				bool *busyloop_intr,
				bool poll_rx)
{}

static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
				    struct vhost_net_virtqueue *tnvq,
				    unsigned int *out_num, unsigned int *in_num,
				    struct msghdr *msghdr, bool *busyloop_intr)
{}

static bool vhost_exceeds_maxpend(struct vhost_net *net)
{}

static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
			    size_t hdr_size, int out)
{}

static int get_tx_bufs(struct vhost_net *net,
		       struct vhost_net_virtqueue *nvq,
		       struct msghdr *msg,
		       unsigned int *out, unsigned int *in,
		       size_t *len, bool *busyloop_intr)
{}

static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
{}

#define VHOST_NET_RX_PAD

static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
			       struct iov_iter *from)
{}

static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
{}

static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
{}

/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{}

static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
{}

static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
				      bool *busyloop_intr)
{}

/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
 * @quota       - headcount quota, 1 for big buffer
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
		       unsigned *log_num,
		       unsigned int quota)
{}

/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{}

static void handle_tx_kick(struct vhost_work *work)
{}

static void handle_rx_kick(struct vhost_work *work)
{}

static void handle_tx_net(struct vhost_work *work)
{}

static void handle_rx_net(struct vhost_work *work)
{}

static int vhost_net_open(struct inode *inode, struct file *f)
{}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,
					struct vhost_virtqueue *vq)
{}

static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
			   struct socket **rx_sock)
{}

static void vhost_net_flush(struct vhost_net *n)
{}

static int vhost_net_release(struct inode *inode, struct file *f)
{}

static struct socket *get_raw_socket(int fd)
{}

static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{}

static struct socket *get_tap_socket(int fd)
{}

static struct socket *get_socket(int fd)
{}

static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{}

static long vhost_net_reset_owner(struct vhost_net *n)
{}

static int vhost_net_set_features(struct vhost_net *n, u64 features)
{}

static long vhost_net_set_owner(struct vhost_net *n)
{}

static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
			    unsigned long arg)
{}

static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{}

static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{}

static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{}

static const struct file_operations vhost_net_fops =;

static struct miscdevice vhost_net_misc =;

static int __init vhost_net_init(void)
{}
module_init();

static void __exit vhost_net_exit(void)
{}
module_exit(vhost_net_exit);

MODULE_VERSION();
MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_ALIAS_MISCDEV();
MODULE_ALIAS();