linux/drivers/net/xen-netback/rx.c

/*
 * Copyright (c) 2016 Citrix Systems Inc.
 * Copyright (c) 2002-2005, K A Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */
#include "common.h"

#include <linux/kthread.h>

#include <xen/xen.h>
#include <xen/events.h>

/*
 * Update the needed ring page slots for the first SKB queued.
 * Note that any call sequence outside the RX thread calling this function
 * needs to wake up the RX thread via a call of xenvif_kick_thread()
 * afterwards in order to avoid a race with putting the thread to sleep.
 */
static void xenvif_update_needed_slots(struct xenvif_queue *queue,
				       const struct sk_buff *skb)
{}

static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{}

bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{}

static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
{}

static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
{}

static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
{}

static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
{}

static void xenvif_rx_copy_add(struct xenvif_queue *queue,
			       struct xen_netif_rx_request *req,
			       unsigned int offset, void *data, size_t len)
{}

static unsigned int xenvif_gso_type(struct sk_buff *skb)
{}

struct xenvif_pkt_state {};

static void xenvif_rx_next_skb(struct xenvif_queue *queue,
			       struct xenvif_pkt_state *pkt)
{}

static void xenvif_rx_complete(struct xenvif_queue *queue,
			       struct xenvif_pkt_state *pkt)
{}

static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
{}

static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
				 struct xenvif_pkt_state *pkt,
				 unsigned int offset, void **data,
				 size_t *len)
{}

static void xenvif_rx_data_slot(struct xenvif_queue *queue,
				struct xenvif_pkt_state *pkt,
				struct xen_netif_rx_request *req,
				struct xen_netif_rx_response *rsp)
{}

static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
				 struct xenvif_pkt_state *pkt,
				 struct xen_netif_rx_request *req,
				 struct xen_netif_rx_response *rsp)
{}

static void xenvif_rx_skb(struct xenvif_queue *queue)
{}

#define RX_BATCH_SIZE

static void xenvif_rx_action(struct xenvif_queue *queue)
{}

static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{}

static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
{}

static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{}

bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{}

static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
{}

/* Wait until the guest Rx thread has work.
 *
 * The timeout needs to be adjusted based on the current head of the
 * queue (and not just the head at the beginning).  In particular, if
 * the queue is initially empty an infinite timeout is used and this
 * needs to be reduced when a skb is queued.
 *
 * This cannot be done with wait_event_timeout() because it only
 * calculates the timeout once.
 */
static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{}

static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
{}

static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
{}

int xenvif_kthread_guest_rx(void *data)
{}