linux/drivers/net/ethernet/google/gve/gve.h

/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
 * Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2024 Google LLC
 */

#ifndef _GVE_H_
#define _GVE_H_

#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
#include <net/xdp.h>

#include "gve_desc.h"
#include "gve_desc_dqo.h"

#ifndef PCI_VENDOR_ID_GOOGLE
#define PCI_VENDOR_ID_GOOGLE
#endif

#define PCI_DEV_ID_GVNIC

#define GVE_REGISTER_BAR
#define GVE_DOORBELL_BAR

/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
#define GVE_TX_MAX_IOVEC
/* 1 for management, 1 for rx, 1 for tx */
#define GVE_MIN_MSIX

/* Numbers of gve tx/rx stats in stats report. */
#define GVE_TX_STATS_REPORT_NUM
#define GVE_RX_STATS_REPORT_NUM

/* Interval to schedule a stats report update, 20000ms. */
#define GVE_STATS_REPORT_TIMER_PERIOD

/* Numbers of NIC tx/rx stats in stats report. */
#define NIC_TX_STATS_REPORT_NUM
#define NIC_RX_STATS_REPORT_NUM

#define GVE_ADMINQ_BUFFER_SIZE

#define GVE_DATA_SLOT_ADDR_PAGE_MASK

/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES

/* Default minimum ring size */
#define GVE_DEFAULT_MIN_TX_RING_SIZE
#define GVE_DEFAULT_MIN_RX_RING_SIZE

#define GVE_DEFAULT_RX_BUFFER_SIZE

#define GVE_MAX_RX_BUFFER_SIZE

#define GVE_DEFAULT_RX_BUFFER_OFFSET

#define GVE_FLOW_RULES_CACHE_SIZE
#define GVE_FLOW_RULE_IDS_CACHE_SIZE

#define GVE_XDP_ACTIONS

#define GVE_GQ_TX_MIN_PKT_DESC_BYTES

#define GVE_DEFAULT_HEADER_BUFFER_SIZE

#define DQO_QPL_DEFAULT_TX_PAGES

/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX

#define GVE_TX_BUF_SHIFT_DQO

/* 2K buffers for DQO-QPL */
#define GVE_TX_BUF_SIZE_DQO
#define GVE_TX_BUFS_PER_PAGE_DQO
#define GVE_MAX_TX_BUFS_PER_PKT

/* If number of free/recyclable buffers are less than this threshold; driver
 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
 * up buffers.
 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
 */
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD

/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {};

/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {};

/* A list of pages registered with the device during setup and used by a queue
 * as buffers
 */
struct gve_queue_page_list {};

/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
struct gve_rx_data_queue {};

struct gve_priv;

/* RX buffer queue for posting buffers to HW.
 * Each RX (completion) queue has a corresponding buffer queue.
 */
struct gve_rx_buf_queue_dqo {};

/* RX completion queue to receive packets from HW. */
struct gve_rx_compl_queue_dqo {};

struct gve_header_buf {};

/* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo {};

/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {};

/* A single received packet split across multiple buffers may be
 * reconstructed using the information in this structure.
 */
struct gve_rx_ctx {};

struct gve_rx_cnts {};

/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {};

/* A TX desc ring entry */
gve_tx_desc;

/* Tracks the memory in the fifo occupied by a segment of a packet */
struct gve_tx_iovec {};

/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 * ring entry but only used for a pkt_desc not a seg_desc
 */
struct gve_tx_buffer_state {};

/* A TX buffer - each queue has one */
struct gve_tx_fifo {};

/* TX descriptor for DQO format */
gve_tx_desc_dqo;

enum gve_packet_state {};

struct gve_tx_pending_packet_dqo {};

/* Contains datapath state used to represent a TX queue. */
struct gve_tx_ring {} ____cacheline_aligned;

/* Wraps the info for one irq including the napi struct and the queues
 * associated with that irq.
 */
struct gve_notify_block {};

/* Tracks allowed and current queue settings */
struct gve_queue_config {};

/* Tracks the available and used qpl IDs */
struct gve_qpl_config {};

struct gve_irq_db {} ____cacheline_aligned;

struct gve_ptype {};

struct gve_ptype_lut {};

/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {};

/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {};

/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
 * when the entire configure_device_resources command is zeroed out and the
 * queue_format is not specified.
 */
enum gve_queue_format {};

struct gve_flow_spec {};

struct gve_flow_rule {};

struct gve_flow_rules_cache {};

struct gve_priv {};

enum gve_service_task_flags_bit {};

enum gve_state_flags_bit {};

enum gve_ethtool_flags_bit {};

static inline bool gve_get_do_reset(struct gve_priv *priv)
{}

static inline void gve_set_do_reset(struct gve_priv *priv)
{}

static inline void gve_clear_do_reset(struct gve_priv *priv)
{}

static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
{}

static inline void gve_set_reset_in_progress(struct gve_priv *priv)
{}

static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
{}

static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
{}

static inline void gve_set_probe_in_progress(struct gve_priv *priv)
{}

static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
{}

static inline bool gve_get_do_report_stats(struct gve_priv *priv)
{}

static inline void gve_set_do_report_stats(struct gve_priv *priv)
{}

static inline void gve_clear_do_report_stats(struct gve_priv *priv)
{}

static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
{}

static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
{}

static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
{}

static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
{}

static inline void gve_set_device_resources_ok(struct gve_priv *priv)
{}

static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
{}

static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
{}

static inline void gve_set_device_rings_ok(struct gve_priv *priv)
{}

static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
{}

static inline bool gve_get_napi_enabled(struct gve_priv *priv)
{}

static inline void gve_set_napi_enabled(struct gve_priv *priv)
{}

static inline void gve_clear_napi_enabled(struct gve_priv *priv)
{}

static inline bool gve_get_report_stats(struct gve_priv *priv)
{}

static inline void gve_clear_report_stats(struct gve_priv *priv)
{}

/* Returns the address of the ntfy_blocks irq doorbell
 */
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
					       struct gve_notify_block *block)
{}

/* Returns the index into ntfy_blocks of the given tx ring's block
 */
static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
{}

/* Returns the index into ntfy_blocks of the given rx ring's block
 */
static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
{}

static inline bool gve_is_qpl(struct gve_priv *priv)
{}

/* Returns the number of tx queue page lists */
static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
				  int num_xdp_queues,
				  bool is_qpl)
{}

/* Returns the number of XDP tx queue page lists
 */
static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
{}

/* Returns the number of rx queue page lists */
static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
				  bool is_qpl)
{}

static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
{}

static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
{}

static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{}

static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{}

static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{}

static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{}

/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
						      int id)
{}

static inline bool gve_is_gqi(struct gve_priv *priv)
{}

static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{}

static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
{}

static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
{}

/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);

/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
		   struct page **page, dma_addr_t *dma,
		   enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
		   enum dma_data_direction);
/* qpls */
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
						      u32 id, int pages);
void gve_free_queue_page_list(struct gve_priv *priv,
			      struct gve_queue_page_list *qpl,
			      u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
		 u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
		     void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
			   struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
			      struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
			  struct gve_rx_alloc_rings_cfg *cfg,
			  struct gve_rx_ring *rx,
			  int idx);
void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
			  struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
			   struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
			   struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
		      struct gve_queue_config new_rx_config,
		      struct gve_queue_config new_tx_config);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
extern const struct ethtool_ops gve_ethtool_ops;
/* needed by ethtool */
extern char gve_driver_name[];
extern const char gve_version_str[];
#endif /* _GVE_H_ */