linux/drivers/net/ethernet/google/gve/gve_tx_dqo.c

// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2021 Google, Inc.
 */

#include "gve.h"
#include "gve_adminq.h"
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>

/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
{}

static s16
gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx)
{}

static void
gve_free_tx_qpl_bufs(struct gve_tx_ring *tx,
		     struct gve_tx_pending_packet_dqo *pkt)
{}

/* Returns true if a gve_tx_pending_packet_dqo object is available. */
static bool gve_has_pending_packet(struct gve_tx_ring *tx)
{}

static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{}

static void
gve_free_pending_packet(struct gve_tx_ring *tx,
			struct gve_tx_pending_packet_dqo *pending_packet)
{}

/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
 */
static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
{}

void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
{}

static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
				 struct gve_tx_alloc_rings_cfg *cfg)
{}

static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
{}

void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
{}

static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
				 struct gve_tx_alloc_rings_cfg *cfg,
				 struct gve_tx_ring *tx,
				 int idx)
{}

int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg)
{}

void gve_tx_free_rings_dqo(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg)
{}

/* Returns the number of slots available in the ring */
static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
{}

static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
				       int desc_count, int buf_count)
{}

/* Stops the queue if available descriptors is less than 'count'.
 * Return: 0 if stop is not required.
 */
static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
				 int desc_count, int buf_count)
{}

static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
					struct gve_tx_metadata_dqo *metadata)
{}

static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
				     struct sk_buff *skb, u32 len, u64 addr,
				     s16 compl_tag, bool eop, bool is_gso)
{}

/* Validates and prepares `skb` for TSO.
 *
 * Returns header length, or < 0 if invalid.
 */
static int gve_prep_tso(struct sk_buff *skb)
{}

static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
				     const struct sk_buff *skb,
				     const struct gve_tx_metadata_dqo *metadata,
				     int header_len)
{}

static void
gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
			     const struct gve_tx_metadata_dqo *metadata)
{}

static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
				      struct sk_buff *skb,
				      struct gve_tx_pending_packet_dqo *pkt,
				      s16 completion_tag,
				      u32 *desc_idx,
				      bool is_gso)
{}

/* Tx buffer i corresponds to
 * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO
 * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO
 */
static void gve_tx_buf_get_addr(struct gve_tx_ring *tx,
				s16 index,
				void **va, dma_addr_t *dma_addr)
{}

static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
				   struct sk_buff *skb,
				   struct gve_tx_pending_packet_dqo *pkt,
				   s16 completion_tag,
				   u32 *desc_idx,
				   bool is_gso)
{}

/* Returns 0 on success, or < 0 on error.
 *
 * Before this function is called, the caller must ensure
 * gve_has_pending_packet(tx) returns true.
 */
static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
			      struct sk_buff *skb)
{}

static int gve_num_descs_per_buf(size_t size)
{}

static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
{}

/* Returns true if HW is capable of sending TSO represented by `skb`.
 *
 * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
 * - The header is counted as one buffer for every single segment.
 * - A buffer which is split between two segments is counted for both.
 * - If a buffer contains both header and payload, it is counted as two buffers.
 */
static bool gve_can_send_tso(const struct sk_buff *skb)
{}

netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
					 struct net_device *dev,
					 netdev_features_t features)
{}

/* Attempt to transmit specified SKB.
 *
 * Returns 0 if the SKB was transmitted or dropped.
 * Returns -1 if there is not currently enough space to transmit the SKB.
 */
static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
			  struct sk_buff *skb)
{}

/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{}

static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
			struct gve_tx_pending_packet_dqo *pending_packet)
{}

static void remove_from_list(struct gve_tx_ring *tx,
			     struct gve_index_list *list,
			     struct gve_tx_pending_packet_dqo *pkt)
{}

static void gve_unmap_packet(struct device *dev,
			     struct gve_tx_pending_packet_dqo *pkt)
{}

/* Completion types and expected behavior:
 * No Miss compl + Packet compl = Packet completed normally.
 * Miss compl + Re-inject compl = Packet completed normally.
 * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
 * Miss compl + Packet compl = Skipped i.e. packet not completed.
 */
static void gve_handle_packet_completion(struct gve_priv *priv,
					 struct gve_tx_ring *tx, bool is_napi,
					 u16 compl_tag, u64 *bytes, u64 *pkts,
					 bool is_reinjection)
{}

static void gve_handle_miss_completion(struct gve_priv *priv,
				       struct gve_tx_ring *tx, u16 compl_tag,
				       u64 *bytes, u64 *pkts)
{}

static void remove_miss_completions(struct gve_priv *priv,
				    struct gve_tx_ring *tx)
{}

static void remove_timed_out_completions(struct gve_priv *priv,
					 struct gve_tx_ring *tx)
{}

int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
			  struct napi_struct *napi)
{}

bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
{}