linux/drivers/net/ethernet/sfc/falcon/tx.c

// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
 * Driver for Solarflare network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2005-2013 Solarflare Communications Inc.
 */

#include <linux/pci.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
#include "io.h"
#include "nic.h"
#include "tx.h"
#include "workarounds.h"

static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
					 struct ef4_tx_buffer *buffer)
{}

u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
				   struct ef4_tx_buffer *buffer, size_t len)
{}

static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
			       struct ef4_tx_buffer *buffer,
			       unsigned int *pkts_compl,
			       unsigned int *bytes_compl)
{}

unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
{}

static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
{}

static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
				struct sk_buff *skb)
{}

static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
					      dma_addr_t dma_addr,
					      size_t len)
{}

/* Map all data from an SKB for DMA and create descriptors on the queue.
 */
static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{}

/* Remove buffers put into a tx_queue.  None of the buffers must have
 * an skb attached.
 */
static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
{}

/*
 * Add a socket buffer to a TX queue
 *
 * This maps all fragments of a socket buffer for DMA and adds them to
 * the TX queue.  The queue's insert pointer will be incremented by
 * the number of fragments in the socket buffer.
 *
 * If any DMA mapping fails, any mapped fragments will be unmapped,
 * the queue's insert pointer will be restored to its original value.
 *
 * This function is split out from ef4_hard_start_xmit to allow the
 * loopback test to direct packets via specific TX queues.
 *
 * Returns NETDEV_TX_OK.
 * You must hold netif_tx_lock() to call this function.
 */
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{}

/* Remove packets from the TX queue
 *
 * This removes packets from the TX queue, up to and including the
 * specified index.
 */
static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
				unsigned int index,
				unsigned int *pkts_compl,
				unsigned int *bytes_compl)
{}

/* Initiate a packet transmission.  We use one channel per CPU
 * (sharing when we have more CPUs than channels).  On Falcon, the TX
 * completion events will be directed back to the CPU that transmitted
 * the packet, which should be cache-efficient.
 *
 * Context: non-blocking.
 * Note that returning anything other than NETDEV_TX_OK will cause the
 * OS to free the skb.
 */
netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
				struct net_device *net_dev)
{}

void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
{}

int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
		 void *type_data)
{}

void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
{}

static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
{}

int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
{}

void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
{}

void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
{}

void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
{}