linux/drivers/net/ethernet/google/gve/gve_tx.c

// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2021 Google, Inc.
 */

#include "gve.h"
#include "gve_adminq.h"
#include "gve_utils.h"
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/vmalloc.h>
#include <linux/skbuff.h>
#include <net/xdp_sock_drv.h>

static inline void gve_tx_put_doorbell(struct gve_priv *priv,
				       struct gve_queue_resources *q_resources,
				       u32 val)
{}

void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
{}

/* gvnic can only transmit from a Registered Segment.
 * We copy skb payloads into the registered segment before writing Tx
 * descriptors and ringing the Tx doorbell.
 *
 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
 * free allocations in the order they were allocated.
 */

static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
{}

static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
{}

static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
					  size_t bytes)
{}

static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
{}

/* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
 * @fifo: FIFO to allocate from
 * @bytes: Allocation size
 * @iov: Scatter-gather elements to fill with allocation fragment base/len
 *
 * Returns number of valid elements in iov[] or negative on error.
 *
 * Allocations from a given FIFO must be externally synchronized but concurrent
 * allocation and frees are allowed.
 */
static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
			     struct gve_tx_iovec iov[2])
{}

/* gve_tx_free_fifo - Return space to Tx FIFO
 * @fifo: FIFO to return fragments to
 * @bytes: Bytes to free
 */
static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
{}

static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
{}

static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
			      u32 to_do)
{}

static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
			     u32 to_do, bool try_to_wake);

void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
{}

static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
				 struct gve_tx_alloc_rings_cfg *cfg)
{}

void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
{}

static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
				 struct gve_tx_alloc_rings_cfg *cfg,
				 struct gve_tx_ring *tx,
				 int idx)
{}

int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg)
{}

void gve_tx_free_rings_gqi(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg)
{}

/* gve_tx_avail - Calculates the number of slots available in the ring
 * @tx: tx ring to check
 *
 * Returns the number of slots available
 *
 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
 **/
static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
{}

static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
					      struct sk_buff *skb)
{}

/* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
 * 1 for each skb frag
 * 1 for the skb linear portion
 * 1 for when tcp hdr needs to be in separate descriptor
 * 1 if the payload wraps to the beginning of the FIFO
 * 1 for metadata descriptor
 */
#define MAX_TX_DESC_NEEDED
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{}

/* Check if sufficient resources (descriptor ring space, FIFO space) are
 * available to transmit the given number of bytes.
 */
static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
{}

static_assert();

/* Stops the queue if the skb cannot be transmitted. */
static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
			     struct sk_buff *skb)
{}

static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
				 u16 csum_offset, u8 ip_summed, bool is_gso,
				 int l4_hdr_offset, u32 desc_cnt,
				 u16 hlen, u64 addr, u16 pkt_len)
{}

static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
				 struct sk_buff *skb)
{}

static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
				 u16 l3_offset, u16 gso_size,
				 bool is_gso_v6, bool is_gso,
				 u16 len, u64 addr)
{}

static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
				    u64 iov_offset, u64 iov_len)
{}

static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
{}

static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
				  struct sk_buff *skb)
{}

netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
{}

static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
			   void *data, int len, void *frame_p, bool is_xsk)
{}

int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
		 u32 flags)
{}

int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
		     void *data, int len, void *frame_p)
{}

#define GVE_TX_START_THRESH

static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
			     u32 to_do, bool try_to_wake)
{}

u32 gve_tx_load_event_counter(struct gve_priv *priv,
			      struct gve_tx_ring *tx)
{}

static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
		      int budget)
{}

bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{}

bool gve_tx_poll(struct gve_notify_block *block, int budget)
{}

bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
{}