linux/drivers/net/ethernet/google/gve/gve_adminq.c

// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2021 Google, Inc.
 */

#include <linux/etherdevice.h>
#include <linux/pci.h>
#include "gve.h"
#include "gve_adminq.h"
#include "gve_register.h"

#define GVE_MAX_ADMINQ_RELEASE_CHECK
#define GVE_ADMINQ_SLEEP_LEN
#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK

#define GVE_DEVICE_OPTION_ERROR_FMT

#define GVE_DEVICE_OPTION_TOO_BIG_FMT

static
struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
					      struct gve_device_option *option)
{}

#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE

static
void gve_parse_device_option(struct gve_priv *priv,
			     struct gve_device_descriptor *device_descriptor,
			     struct gve_device_option *option,
			     struct gve_device_option_gqi_rda **dev_op_gqi_rda,
			     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
			     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
			     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
			     struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
			     struct gve_device_option_flow_steering **dev_op_flow_steering,
			     struct gve_device_option_modify_ring **dev_op_modify_ring)
{}

/* Process all device options for a given describe device call. */
static int
gve_process_device_options(struct gve_priv *priv,
			   struct gve_device_descriptor *descriptor,
			   struct gve_device_option_gqi_rda **dev_op_gqi_rda,
			   struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
			   struct gve_device_option_dqo_rda **dev_op_dqo_rda,
			   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
			   struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
			   struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
			   struct gve_device_option_flow_steering **dev_op_flow_steering,
			   struct gve_device_option_modify_ring **dev_op_modify_ring)
{}

int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{}

void gve_adminq_release(struct gve_priv *priv)
{}

void gve_adminq_free(struct device *dev, struct gve_priv *priv)
{}

static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
{}

static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
{}

static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{}

/* Flushes all AQ commands currently queued and waits for them to complete.
 * If there are failures, it will return the first error.
 */
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{}

/* This function is not threadsafe - the caller is responsible for any
 * necessary locks.
 */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
				union gve_adminq_command *cmd_orig)
{}

static int gve_adminq_execute_cmd(struct gve_priv *priv,
				  union gve_adminq_command *cmd_orig)
{}

static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
					   size_t cmd_size, void *cmd_orig)
{}

/* The device specifies that the management vector can either be the first irq
 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
 * the management vector is first.
 *
 * gve arranges the msix vectors so that the management vector is last.
 */
#define GVE_NTFY_BLK_BASE_MSIX_IDX
int gve_adminq_configure_device_resources(struct gve_priv *priv,
					  dma_addr_t counter_array_bus_addr,
					  u32 num_counters,
					  dma_addr_t db_array_bus_addr,
					  u32 num_ntfy_blks)
{}

int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
{}

static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{}

int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{}

static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
					       union gve_adminq_command *cmd,
					       u32 queue_index)
{}

static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{}

/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{}

int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{}

static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
{}

int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{}

static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
						 u32 queue_index)
{}

static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{}

/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{}

int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
{}

static void gve_set_default_desc_cnt(struct gve_priv *priv,
			const struct gve_device_descriptor *descriptor)
{}

static void gve_enable_supported_features(struct gve_priv *priv,
					  u32 supported_features_mask,
					  const struct gve_device_option_jumbo_frames
					  *dev_op_jumbo_frames,
					  const struct gve_device_option_dqo_qpl
					  *dev_op_dqo_qpl,
					  const struct gve_device_option_buffer_sizes
					  *dev_op_buffer_sizes,
					  const struct gve_device_option_flow_steering
					  *dev_op_flow_steering,
					  const struct gve_device_option_modify_ring
					  *dev_op_modify_ring)
{}

int gve_adminq_describe_device(struct gve_priv *priv)
{}

int gve_adminq_register_page_list(struct gve_priv *priv,
				  struct gve_queue_page_list *qpl)
{}

int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
{}

int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
{}

int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
			    dma_addr_t stats_report_addr, u64 interval)
{}

int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
					   u64 driver_info_len,
					   dma_addr_t driver_info_addr)
{}

int gve_adminq_report_link_speed(struct gve_priv *priv)
{}

int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
				 struct gve_ptype_lut *ptype_lut)
{}

static int
gve_adminq_configure_flow_rule(struct gve_priv *priv,
			       struct gve_adminq_configure_flow_rule *flow_rule_cmd)
{}

int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc)
{}

int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc)
{}

int gve_adminq_reset_flow_rules(struct gve_priv *priv)
{}

/* In the dma memory that the driver allocated for the device to query the flow rules, the device
 * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
 * will write an array of rules or rule ids with the count that specified in the descriptor.
 * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
 */
static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode,
					       struct gve_query_flow_rules_descriptor *descriptor)
{}

int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc)
{}