linux/drivers/net/xen-netback/netback.c

/*
 * Back-end of the driver for virtual network devices. This portion of the
 * driver exports a 'unified' network-device interface that can be accessed
 * by any operating system that implements a compatible front end. A
 * reference front-end implementation can be found in:
 *  drivers/net/xen-netfront.c
 *
 * Copyright (c) 2002-2005, K A Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <linux/highmem.h>
#include <linux/skbuff_ref.h>

#include <net/tcp.h>

#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/memory.h>
#include <xen/page.h>

#include <asm/xen/hypercall.h>

/* Provide an option to disable split event channels at load time as
 * event channels are limited resource. Split event channels are
 * enabled by default.
 */
bool separate_tx_rx_irq =;
module_param(separate_tx_rx_irq, bool, 0644);

/* The time that packets can stay on the guest Rx internal queue
 * before they are dropped.
 */
unsigned int rx_drain_timeout_msecs =;
module_param(rx_drain_timeout_msecs, uint, 0444);

/* The length of time before the frontend is considered unresponsive
 * because it isn't providing Rx slots.
 */
unsigned int rx_stall_timeout_msecs =;
module_param(rx_stall_timeout_msecs, uint, 0444);

#define MAX_QUEUES_DEFAULT
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC();

/*
 * This is the maximum slots a skb can have. If a guest sends a skb
 * which exceeds this limit it is considered malicious.
 */
#define FATAL_SKB_SLOTS_DEFAULT
static unsigned int fatal_skb_slots =;
module_param(fatal_skb_slots, uint, 0444);

/* The amount to copy out of the first guest Tx slot into the skb's
 * linear area.  If the first slot has more data, it will be mapped
 * and put into the first frag.
 *
 * This is sized to avoid pulling headers from the frags for most
 * TCP/IP packets.
 */
#define XEN_NETBACK_TX_COPY_LEN

/* This is the maximum number of flows in the hash cache. */
#define XENVIF_HASH_CACHE_SIZE_DEFAULT
unsigned int xenvif_hash_cache_size =;
module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
MODULE_PARM_DESC();

/* The module parameter tells that we have to put data
 * for xen-netfront with the XDP_PACKET_HEADROOM offset
 * needed for XDP processing
 */
bool provides_xdp_headroom =;
module_param(provides_xdp_headroom, bool, 0644);

static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
			       s8 status);

static void make_tx_response(struct xenvif_queue *queue,
			     const struct xen_netif_tx_request *txp,
			     unsigned int extra_count,
			     s8 status);

static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);

static inline int tx_work_todo(struct xenvif_queue *queue);

static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
				       u16 idx)
{}

static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
					 u16 idx)
{}

#define callback_param(vif, pending_idx)

/* Find the containing VIF's structure from a pointer in pending_tx_info array
 */
static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
{}

static u16 frag_get_pending_idx(skb_frag_t *frag)
{}

static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{}

static inline pending_ring_idx_t pending_index(unsigned i)
{}

void xenvif_kick_thread(struct xenvif_queue *queue)
{}

void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
{}

static void tx_add_credit(struct xenvif_queue *queue)
{}

void xenvif_tx_credit_callback(struct timer_list *t)
{}

static void xenvif_tx_err(struct xenvif_queue *queue,
			  struct xen_netif_tx_request *txp,
			  unsigned int extra_count, RING_IDX end)
{}

static void xenvif_fatal_tx_err(struct xenvif *vif)
{}

static int xenvif_count_requests(struct xenvif_queue *queue,
				 struct xen_netif_tx_request *first,
				 unsigned int extra_count,
				 struct xen_netif_tx_request *txp,
				 int work_to_do)
{}


struct xenvif_tx_cb {};

#define XENVIF_TX_CB(skb)
#define copy_pending_idx(skb, i)
#define copy_count(skb)

static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
					   u16 pending_idx,
					   struct xen_netif_tx_request *txp,
					   unsigned int extra_count,
					   struct gnttab_map_grant_ref *mop)
{}

static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
{}

static void xenvif_get_requests(struct xenvif_queue *queue,
				struct sk_buff *skb,
				struct xen_netif_tx_request *first,
				struct xen_netif_tx_request *txfrags,
			        unsigned *copy_ops,
			        unsigned *map_ops,
				unsigned int frag_overflow,
				struct sk_buff *nskb,
				unsigned int extra_count,
				unsigned int data_len)
{}

static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
					   u16 pending_idx,
					   grant_handle_t handle)
{}

static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
					     u16 pending_idx)
{}

static int xenvif_tx_check_gop(struct xenvif_queue *queue,
			       struct sk_buff *skb,
			       struct gnttab_map_grant_ref **gopp_map,
			       struct gnttab_copy **gopp_copy)
{}

static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
{}

static int xenvif_get_extras(struct xenvif_queue *queue,
			     struct xen_netif_extra_info *extras,
			     unsigned int *extra_count,
			     int work_to_do)
{}

static int xenvif_set_skb_gso(struct xenvif *vif,
			      struct sk_buff *skb,
			      struct xen_netif_extra_info *gso)
{}

static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
{}

static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
{}

/* No locking is required in xenvif_mcast_add/del() as they are
 * only ever invoked from NAPI poll. An RCU list is used because
 * xenvif_mcast_match() is called asynchronously, during start_xmit.
 */

static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
{}

static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
{}

bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
{}

void xenvif_mcast_addr_list_free(struct xenvif *vif)
{}

static void xenvif_tx_build_gops(struct xenvif_queue *queue,
				     int budget,
				     unsigned *copy_ops,
				     unsigned *map_ops)
{}

/* Consolidate skb with a frag_list into a brand new one with local pages on
 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
 */
static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
{}

static int xenvif_tx_submit(struct xenvif_queue *queue)
{}

static void xenvif_zerocopy_callback(struct sk_buff *skb,
				     struct ubuf_info *ubuf_base,
				     bool zerocopy_success)
{}

const struct ubuf_info_ops xenvif_ubuf_ops =;

static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{}


/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{}

static void _make_tx_response(struct xenvif_queue *queue,
			     const struct xen_netif_tx_request *txp,
			     unsigned int extra_count,
			     s8 status)
{}

static void push_tx_responses(struct xenvif_queue *queue)
{}

static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
			       s8 status)
{}

static void make_tx_response(struct xenvif_queue *queue,
			     const struct xen_netif_tx_request *txp,
			     unsigned int extra_count,
			     s8 status)
{}

static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{}

static inline int tx_work_todo(struct xenvif_queue *queue)
{}

static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
{}

void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
{}

int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
				   grant_ref_t tx_ring_ref,
				   grant_ref_t rx_ring_ref)
{}

static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
{}

int xenvif_dealloc_kthread(void *data)
{}

static void make_ctrl_response(struct xenvif *vif,
			       const struct xen_netif_ctrl_request *req,
			       u32 status, u32 data)
{}

static void push_ctrl_response(struct xenvif *vif)
{}

static void process_ctrl_request(struct xenvif *vif,
				 const struct xen_netif_ctrl_request *req)
{}

static void xenvif_ctrl_action(struct xenvif *vif)
{}

static bool xenvif_ctrl_work_todo(struct xenvif *vif)
{}

irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{}

static int __init netback_init(void)
{}

module_init();

static void __exit netback_fini(void)
{}
module_exit(netback_fini);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS();