linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h

/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
 * Copyright (c) 2015-2017  QLogic Corporation
 * Copyright (c) 2019-2021 Marvell International Ltd.
 */

#ifndef _QED_HSI_H
#define _QED_HSI_H

#include <linux/types.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/storage_common.h>
#include <linux/qed/tcp_common.h>
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
#include <linux/qed/nvmetcp_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
#include <linux/qed/qed_fcoe_if.h>

struct qed_hwfn;
struct qed_ptt;

/* Opcodes for the event ring */
enum common_event_opcode {};

/* Common Ramrod Command IDs */
enum common_ramrod_cmd_id {};

/* How ll2 should deal with packet upon errors */
enum core_error_handle {};

/* Opcodes for the event ring */
enum core_event_opcode {};

/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {};

/* LL2 SP error code */
enum core_ll2_error_code {};

/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {};

/* LL2 TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {};

/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {};

struct core_ll2_tstorm_per_queue_stat {};

struct core_ll2_ustorm_per_queue_stat {};

struct core_ll2_rx_per_queue_stat {};

struct core_ll2_tx_per_queue_stat {};

/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {};

/* Ramrod data for rx/tx queue statistics query ramrod */
struct core_queue_stats_query_ramrod_data {};

/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {};

/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {};

/* Specifies how ll2 should deal with packets errors: packet_too_big and
 * no_buff.
 */
struct core_rx_action_on_error {};

/* Core RX BD for Light L2 */
struct core_rx_bd {};

/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {};

/* Core RX CM offload BD for Light L2 */
core_rx_bd_union;

/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {};

/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {};

/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {};

/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {};

/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {};

/* Core RX CM offload BD for Light L2 */
core_rx_cqe_union;

/* RX packet source. */
enum core_rx_pkt_source {};

/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {};

/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {};

/* Flags for Core TX BD */
struct core_tx_bd_data {};

/* Core TX BD for Light L2 */
struct core_tx_bd {};

/* Light L2 TX Destination */
enum core_tx_dest {};

/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {};

/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {};

/* Ramrod data for tx queue update ramrod */
struct core_tx_update_ramrod_data {};

/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {};

/* The core storm context for the Ystorm */
struct ystorm_core_conn_st_ctx {};

/* The core storm context for the Pstorm */
struct pstorm_core_conn_st_ctx {};

/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {};

struct xstorm_core_conn_ag_ctx {};

struct tstorm_core_conn_ag_ctx {};

struct ustorm_core_conn_ag_ctx {};

/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {};

/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {};

/* The core storm context for the Tstorm */
struct tstorm_core_conn_st_ctx {};

/* core connection context */
struct core_conn_context {};

struct eth_mstorm_per_pf_stat {};

struct eth_mstorm_per_queue_stat {};

/* Ethernet TX Per PF */
struct eth_pstorm_per_pf_stat {};

/* Ethernet TX Per Queue Stats */
struct eth_pstorm_per_queue_stat {};

/* ETH Rx producers data */
struct eth_rx_rate_limit {};

/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {};

struct eth_ustorm_per_pf_stat {};

struct eth_ustorm_per_queue_stat {};

/* Event Ring VF-PF Channel data */
struct vf_pf_channel_eqe_data {};

/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {};

/* FW error data */
struct fw_err_data {};

/* Event Data Union */
event_ring_data;

/* Event Ring Entry */
struct event_ring_entry {};

/* Event Ring Next Page Address */
struct event_ring_next_addr {};

/* Event Ring Element */
event_ring_element;

/* Ports mode */
enum fw_flow_ctrl_mode {};

/* GFT profile type */
enum gft_profile_type {};

/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {};

/* Integration Phase */
enum integ_phase {};

/* Ports mode */
enum iwarp_ll2_tx_queues {};

/* Function error ID */
enum func_err_id {};

/* FW error handling mode */
enum fw_err_mode {};

/* FW error recovery scope */
enum fw_err_recovery_scope {};

/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {};

/* Mstorm VF zone */
struct mstorm_vf_zone {};

/* vlan header including TPID and TCI fields */
struct vlan_header {};

/* outer tag configurations */
struct outer_tag_config_struct {};

/* personality per PF */
enum personality_type {};

/* tunnel configuration */
struct pf_start_tunnel_config {};

/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {};

/* Data for port update ramrod */
struct protocol_dcb_data {};

/* Update tunnel configuration */
struct pf_update_tunnel_config {};

/* Data for port update ramrod */
struct pf_update_ramrod_data {};

/* Ports mode */
enum ports_mode {};

/* Protocol-common error code */
enum protocol_common_error_code {};

/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {};

/* RDMA TX Stats */
struct rdma_sent_stats {};

/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {};

/* Pstorm VF zone */
struct pstorm_vf_zone {};

/* Ramrod Header of SPQE */
struct ramrod_header {};

/* RDMA RX Stats */
struct rdma_rcv_stats {};

/* Data for update QCN/DCQCN RL ramrod */
struct rl_update_ramrod_data {};

/* Slowpath Element (SPQE) */
struct slow_path_element {};

/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {};

struct tstorm_per_port_stat {};

/* Tstorm VF zone */
struct tstorm_vf_zone {};

/* Tunnel classification scheme */
enum tunnel_clss {};

/* Ustorm non-triggering VF zone */
struct ustorm_non_trigger_vf_zone {};

/* Ustorm triggering VF zone */
struct ustorm_trigger_vf_zone {};

/* Ustorm VF zone */
struct ustorm_vf_zone {};

/* VF-PF channel data */
struct vf_pf_channel_data {};

/* Ramrod data for VF start ramrod */
struct vf_start_ramrod_data {};

/* Ramrod data for VF start ramrod */
struct vf_stop_ramrod_data {};

/* VF zone size mode */
enum vf_zone_size_mode {};

/* Xstorm non-triggering VF zone */
struct xstorm_non_trigger_vf_zone {};

/* Tstorm VF zone */
struct xstorm_vf_zone {};

/* Attentions status block */
struct atten_status_block {};

/* DMAE command */
struct dmae_cmd {};

enum dmae_cmd_comp_crc_en_enum {};

enum dmae_cmd_comp_func_enum {};

enum dmae_cmd_comp_word_en_enum {};

enum dmae_cmd_c_dst_enum {};

enum dmae_cmd_dst_enum {};

enum dmae_cmd_error_handling_enum {};

enum dmae_cmd_src_enum {};

struct mstorm_core_conn_ag_ctx {};

struct ystorm_core_conn_ag_ctx {};

/* DMAE parameters */
struct qed_dmae_params {};

/* IGU cleanup command */
struct igu_cleanup {};

/* IGU firmware driver command */
igu_command;

/* IGU firmware driver command */
struct igu_command_reg_ctrl {};

/* IGU mapping line structure */
struct igu_mapping_line {};

/* IGU MSIX line structure */
struct igu_msix_vector {};

/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {};

enum pxp_tph_st_hint {};

/* QM hardware structure of enable bypass credit mask */
struct qm_rf_bypass_mask {};

/* QM hardware structure of opportunistic credit mask */
struct qm_rf_opportunistic_mask {};

/* QM hardware structure of QM map memory */
struct qm_rf_pq_map {};

/* Completion params for aggregated interrupt completion */
struct sdm_agg_int_comp_params {};

/* SDM operation gen command (generate aggregative interrupt) */
struct sdm_op_gen {};

/* Physical memory descriptor */
struct phys_mem_desc {};

/* Virtual memory descriptor */
struct virt_mem_desc {};

/********************************/
/* HSI Init Functions constants */
/********************************/

/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES

/* BRB RAM init requirements */
struct init_brb_ram_req {};

/* ETS per-TC init requirements */
struct init_ets_tc_req {};

/* ETS init requirements */
struct init_ets_req {};

/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {};

/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry {};

/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req {};

/* QM per global RL init parameters */
struct init_qm_global_rl_params {};

/* QM per-port init parameters */
struct init_qm_port_params {};

/* QM per-PQ init parameters */
struct init_qm_pq_params {};

/* QM per RL init parameters */
struct init_qm_rl_params {};

/* QM Rate Limiter types */
enum init_qm_rl_type {};

/* QM per-vport init parameters */
struct init_qm_vport_params {};

/**************************************/
/* Init Tool HSI constants and macros */
/**************************************/

/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS
#define MAX_GRC_ADDR

/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID

/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE
enum chip_ids {};

struct fw_asserts_ram_section {};

struct fw_ver_num {};

struct fw_ver_info {};

struct fw_info {};

struct fw_info_location {};

enum init_modes {};

enum init_phases {};

enum init_split_types {};

/* Binary buffer header */
struct bin_buffer_hdr {};

/* Binary init buffer types */
enum bin_init_buffer_type {};

/* FW overlay buffer header */
struct fw_overlay_buf_hdr {};

/* init array header: raw */
struct init_array_raw_hdr {};

/* init array header: standard */
struct init_array_standard_hdr {};

/* init array header: zipped */
struct init_array_zipped_hdr {};

/* init array header: pattern */
struct init_array_pattern_hdr {};

/* init array header union */
init_array_hdr;

/* init array types */
enum init_array_types {};

/* init operation: callback */
struct init_callback_op {};

/* init operation: delay */
struct init_delay_op {};

/* init operation: if_mode */
struct init_if_mode_op {};

/* init operation: if_phase */
struct init_if_phase_op {};

/* init mode operators */
enum init_mode_ops {};

/* init operation: raw */
struct init_raw_op {};

/* init array params */
struct init_op_array_params {};

/* Write init operation arguments */
init_write_args;

/* init operation: write */
struct init_write_op {};

/* init operation: read */
struct init_read_op {};

/* Init operations union */
init_op;

/* Init command operation types */
enum init_op_types {};

/* init polling types */
enum init_poll_types {};

/* init source types */
enum init_source_types {};

/* Internal RAM Offsets macro data */
struct iro {};

/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD

/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM

/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM

/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024

/* Win 6 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_2048

/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM

/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024

/* Win 9 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048

/* Win 10 */
#define GTT_BAR0_MAP_REG_XSDM_RAM

/* Win 11 */
#define GTT_BAR0_MAP_REG_XSDM_RAM_1024

/* Win 12 */
#define GTT_BAR0_MAP_REG_YSDM_RAM

/* Win 13 */
#define GTT_BAR0_MAP_REG_PSDM_RAM

/* Returns the VOQ based on port and TC */
#define VOQ(port, tc, max_phys_tcs_per_port)

struct init_qm_pq_params;

/**
 * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
 *
 * @num_pf_cids: Number of connections used by this PF.
 * @num_vf_cids: Number of connections used by VFs of this PF.
 * @num_tids: Number of tasks used by this PF.
 * @num_pf_pqs: Number of PQs used by this PF.
 * @num_vf_pqs: Number of PQs used by VFs of this PF.
 *
 * Return: The required host memory size in 4KB units.
 *
 * Returns the required host memory size in 4KB units.
 * Must be called before all QM init HSI functions.
 */
u32 qed_qm_pf_mem_size(u32 num_pf_cids,
		       u32 num_vf_cids,
		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);

struct qed_qm_common_rt_init_params {};

/**
 * qed_qm_common_rt_init(): Prepare QM runtime init values for the
 *                          engine phase.
 *
 * @p_hwfn: HW device data.
 * @p_params: Parameters.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
			  struct qed_qm_common_rt_init_params *p_params);

struct qed_qm_pf_rt_init_params {};

/**
 * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
 *
 * @p_hwfn:  HW device data.
 * @p_ptt: Ptt window used for writing the registers
 * @p_params: Parameters.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
		      struct qed_ptt *p_ptt,
		      struct qed_qm_pf_rt_init_params *p_params);

/**
 * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers
 * @pf_id: PF ID
 * @pf_wfq: WFQ weight. Must be non-zero.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
		    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);

/**
 * qed_init_pf_rl(): Initializes the rate limit of the specified PF
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @pf_id: PF ID.
 * @pf_rl: rate limit in Mb/sec units
 *
 * Return: 0 on success, -1 on error.
 */
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
		   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);

/**
 * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers
 * @first_tx_pq_id: An array containing the first Tx PQ ID associated
 *                  with the VPORT for each TC. This array is filled by
 *                  qed_qm_pf_rt_init
 * @wfq: WFQ weight. Must be non-zero.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
		       struct qed_ptt *p_ptt,
		       u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);

/**
 * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
 *                          VPORT and TC.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
 *                  (filled by qed_qm_pf_rt_init).
 * @weight: VPORT+TC WFQ weight.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  u16 first_tx_pq_id, u16 weight);

/**
 * qed_init_global_rl():  Initializes the rate limit of the specified
 * rate limiter.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @rl_id: RL ID.
 * @rate_limit: Rate limit in Mb/sec units
 * @vport_rl_type: Vport RL type.
 *
 * Return: 0 on success, -1 on error.
 */
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
		       struct qed_ptt *p_ptt,
		       u16 rl_id, u32 rate_limit,
		       enum init_qm_rl_type vport_rl_type);

/**
 * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @is_release_cmd: true for release, false for stop.
 * @is_tx_pq: true for Tx PQs, false for Other PQs.
 * @start_pq: first PQ ID to stop
 * @num_pqs: Number of PQs to stop, starting from start_pq.
 *
 * Return: Bool, true if successful, false if timeout occurred while waiting
 *         for QM command done.
 */
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  bool is_release_cmd,
			  bool is_tx_pq, u16 start_pq, u16 num_pqs);

/**
 * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @dest_port: vxlan destination udp port.
 *
 * Return: Void.
 */
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
			     struct qed_ptt *p_ptt, u16 dest_port);

/**
 * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @vxlan_enable: vxlan enable flag.
 *
 * Return: Void.
 */
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt, bool vxlan_enable);

/**
 * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @eth_gre_enable: Eth GRE enable flag.
 * @ip_gre_enable: IP GRE enable flag.
 *
 * Return: Void.
 */
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
			struct qed_ptt *p_ptt,
			bool eth_gre_enable, bool ip_gre_enable);

/**
 * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @dest_port: Geneve destination udp port.
 *
 * Retur: Void.
 */
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt, u16 dest_port);

/**
 * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @eth_geneve_enable: Eth GENEVE enable flag.
 * @ip_geneve_enable: IP GENEVE enable flag.
 *
 * Return: Void.
 */
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
			   bool eth_geneve_enable, bool ip_geneve_enable);

void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
				struct qed_ptt *p_ptt, bool enable);

/**
 * qed_gft_disable(): Disable GFT.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @pf_id: PF on which to disable GFT.
 *
 * Return: Void.
 */
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);

/**
 * qed_gft_config(): Enable and configure HW for GFT.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @pf_id: PF on which to enable GFT.
 * @tcp: Set profile tcp packets.
 * @udp: Set profile udp  packet.
 * @ipv4: Set profile ipv4 packet.
 * @ipv6: Set profile ipv6 packet.
 * @profile_type: Define packet same fields. Use enum gft_profile_type.
 *
 * Return: Void.
 */
void qed_gft_config(struct qed_hwfn *p_hwfn,
		    struct qed_ptt *p_ptt,
		    u16 pf_id,
		    bool tcp,
		    bool udp,
		    bool ipv4, bool ipv6, enum gft_profile_type profile_type);

/**
 * qed_enable_context_validation(): Enable and configure context
 *                                  validation.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 *
 * Return: Void.
 */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
				   struct qed_ptt *p_ptt);

/**
 * qed_calc_session_ctx_validation(): Calcualte validation byte for
 *                                    session context.
 *
 * @p_ctx_mem: Pointer to context memory.
 * @ctx_size: Context size.
 * @ctx_type: Context type.
 * @cid: Context cid.
 *
 * Return: Void.
 */
void qed_calc_session_ctx_validation(void *p_ctx_mem,
				     u16 ctx_size, u8 ctx_type, u32 cid);

/**
 * qed_calc_task_ctx_validation(): Calcualte validation byte for task
 *                                 context.
 *
 * @p_ctx_mem: Pointer to context memory.
 * @ctx_size: Context size.
 * @ctx_type: Context type.
 * @tid: Context tid.
 *
 * Return: Void.
 */
void qed_calc_task_ctx_validation(void *p_ctx_mem,
				  u16 ctx_size, u8 ctx_type, u32 tid);

/**
 * qed_memset_session_ctx(): Memset session context to 0 while
 *                            preserving validation bytes.
 *
 * @p_ctx_mem: Pointer to context memory.
 * @ctx_size: Size to initialzie.
 * @ctx_type: Context type.
 *
 * Return: Void.
 */
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);

/**
 * qed_memset_task_ctx(): Memset task context to 0 while preserving
 *                        validation bytes.
 *
 * @p_ctx_mem: Pointer to context memory.
 * @ctx_size: size to initialzie.
 * @ctx_type: context type.
 *
 * Return: Void.
 */
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);

#define NUM_STORMS

/**
 * qed_get_protocol_type_str(): Get a string for Protocol type.
 *
 * @protocol_type: Protocol type (using enum protocol_type).
 *
 * Return: String.
 */
const char *qed_get_protocol_type_str(u32 protocol_type);

/**
 * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID.
 *
 * @protocol_type: Protocol type (using enum protocol_type).
 * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id).
 *
 * Return: String.
 */
const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id);

/**
 * qed_set_rdma_error_level(): Sets the RDMA assert level.
 *                             If the severity of the error will be
 *                             above the level, the FW will assert.
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @assert_level: An array of assert levels for each storm.
 *
 * Return: Void.
 */
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt,
			      u8 assert_level[NUM_STORMS]);
/**
 * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
 *
 * @p_hwfn: HW device data.
 * @fw_overlay_in_buf: The input FW overlay buffer.
 * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
 *		        must be aligned to dwords.
 *
 * Return: A pointer to the allocated overlays memory,
 * or NULL in case of failures.
 */
struct phys_mem_desc *
qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
			 const u32 *const fw_overlay_in_buf,
			 u32 buf_size_in_bytes);

/**
 * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
 *
 * @p_hwfn: HW device data.
 * @p_ptt: Ptt window used for writing the registers.
 * @fw_overlay_mem: the allocated FW overlay memory.
 *
 * Return: Void.
 */
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
			     struct qed_ptt *p_ptt,
			     struct phys_mem_desc *fw_overlay_mem);

/**
 * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
 *
 * @p_hwfn: HW device data.
 * @fw_overlay_mem: The allocated FW overlay memory to free.
 *
 * Return: Void.
 */
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
			     struct phys_mem_desc **fw_overlay_mem);

#define PCICFG_OFFSET
#define GRC_CONFIG_REG_PF_INIT_VF

/* First VF_NUM for PF is encoded in this register.
 * The number of VFs assigned to a PF is assumed to be a multiple of 8.
 * Software should program these bits based on Total Number of VFs programmed
 * for each PF.
 * Since registers from 0x000-0x7ff are spilt across functions, each PF will
 * have the same location for the same 4 bits
 */
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK

/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET
#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET
#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET
#define IGU_REG_PF_CONFIGURATION_RT_OFFSET
#define IGU_REG_VF_CONFIGURATION_RT_OFFSET
#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET
#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE
#define CAU_REG_PI_MEMORY_RT_OFFSET
#define CAU_REG_PI_MEMORY_RT_SIZE
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET
#define PRS_REG_SEARCH_TCP_RT_OFFSET
#define PRS_REG_SEARCH_FCOE_RT_OFFSET
#define PRS_REG_SEARCH_ROCE_RT_OFFSET
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET
#define SRC_REG_FIRSTFREE_RT_OFFSET
#define SRC_REG_FIRSTFREE_RT_SIZE
#define SRC_REG_LASTFREE_RT_OFFSET
#define SRC_REG_LASTFREE_RT_SIZE
#define SRC_REG_COUNTFREE_RT_OFFSET
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET
#define PSWRQ2_REG_VF_BASE_RT_OFFSET
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE
#define PGLUE_REG_B_VF_BASE_RT_OFFSET
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE
#define QM_REG_MAXPQSIZE_0_RT_OFFSET
#define QM_REG_MAXPQSIZE_1_RT_OFFSET
#define QM_REG_MAXPQSIZE_2_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET
#define QM_REG_BASEADDROTHERPQ_RT_SIZE
#define QM_REG_PTRTBLOTHER_RT_OFFSET
#define QM_REG_PTRTBLOTHER_RT_SIZE
#define QM_REG_VOQCRDLINE_RT_OFFSET
#define QM_REG_VOQCRDLINE_RT_SIZE
#define QM_REG_VOQINITCRDLINE_RT_OFFSET
#define QM_REG_VOQINITCRDLINE_RT_SIZE
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET
#define QM_REG_PQTX2PF_0_RT_OFFSET
#define QM_REG_PQTX2PF_1_RT_OFFSET
#define QM_REG_PQTX2PF_2_RT_OFFSET
#define QM_REG_PQTX2PF_3_RT_OFFSET
#define QM_REG_PQTX2PF_4_RT_OFFSET
#define QM_REG_PQTX2PF_5_RT_OFFSET
#define QM_REG_PQTX2PF_6_RT_OFFSET
#define QM_REG_PQTX2PF_7_RT_OFFSET
#define QM_REG_PQTX2PF_8_RT_OFFSET
#define QM_REG_PQTX2PF_9_RT_OFFSET
#define QM_REG_PQTX2PF_10_RT_OFFSET
#define QM_REG_PQTX2PF_11_RT_OFFSET
#define QM_REG_PQTX2PF_12_RT_OFFSET
#define QM_REG_PQTX2PF_13_RT_OFFSET
#define QM_REG_PQTX2PF_14_RT_OFFSET
#define QM_REG_PQTX2PF_15_RT_OFFSET
#define QM_REG_PQTX2PF_16_RT_OFFSET
#define QM_REG_PQTX2PF_17_RT_OFFSET
#define QM_REG_PQTX2PF_18_RT_OFFSET
#define QM_REG_PQTX2PF_19_RT_OFFSET
#define QM_REG_PQTX2PF_20_RT_OFFSET
#define QM_REG_PQTX2PF_21_RT_OFFSET
#define QM_REG_PQTX2PF_22_RT_OFFSET
#define QM_REG_PQTX2PF_23_RT_OFFSET
#define QM_REG_PQTX2PF_24_RT_OFFSET
#define QM_REG_PQTX2PF_25_RT_OFFSET
#define QM_REG_PQTX2PF_26_RT_OFFSET
#define QM_REG_PQTX2PF_27_RT_OFFSET
#define QM_REG_PQTX2PF_28_RT_OFFSET
#define QM_REG_PQTX2PF_29_RT_OFFSET
#define QM_REG_PQTX2PF_30_RT_OFFSET
#define QM_REG_PQTX2PF_31_RT_OFFSET
#define QM_REG_PQTX2PF_32_RT_OFFSET
#define QM_REG_PQTX2PF_33_RT_OFFSET
#define QM_REG_PQTX2PF_34_RT_OFFSET
#define QM_REG_PQTX2PF_35_RT_OFFSET
#define QM_REG_PQTX2PF_36_RT_OFFSET
#define QM_REG_PQTX2PF_37_RT_OFFSET
#define QM_REG_PQTX2PF_38_RT_OFFSET
#define QM_REG_PQTX2PF_39_RT_OFFSET
#define QM_REG_PQTX2PF_40_RT_OFFSET
#define QM_REG_PQTX2PF_41_RT_OFFSET
#define QM_REG_PQTX2PF_42_RT_OFFSET
#define QM_REG_PQTX2PF_43_RT_OFFSET
#define QM_REG_PQTX2PF_44_RT_OFFSET
#define QM_REG_PQTX2PF_45_RT_OFFSET
#define QM_REG_PQTX2PF_46_RT_OFFSET
#define QM_REG_PQTX2PF_47_RT_OFFSET
#define QM_REG_PQTX2PF_48_RT_OFFSET
#define QM_REG_PQTX2PF_49_RT_OFFSET
#define QM_REG_PQTX2PF_50_RT_OFFSET
#define QM_REG_PQTX2PF_51_RT_OFFSET
#define QM_REG_PQTX2PF_52_RT_OFFSET
#define QM_REG_PQTX2PF_53_RT_OFFSET
#define QM_REG_PQTX2PF_54_RT_OFFSET
#define QM_REG_PQTX2PF_55_RT_OFFSET
#define QM_REG_PQTX2PF_56_RT_OFFSET
#define QM_REG_PQTX2PF_57_RT_OFFSET
#define QM_REG_PQTX2PF_58_RT_OFFSET
#define QM_REG_PQTX2PF_59_RT_OFFSET
#define QM_REG_PQTX2PF_60_RT_OFFSET
#define QM_REG_PQTX2PF_61_RT_OFFSET
#define QM_REG_PQTX2PF_62_RT_OFFSET
#define QM_REG_PQTX2PF_63_RT_OFFSET
#define QM_REG_PQOTHER2PF_0_RT_OFFSET
#define QM_REG_PQOTHER2PF_1_RT_OFFSET
#define QM_REG_PQOTHER2PF_2_RT_OFFSET
#define QM_REG_PQOTHER2PF_3_RT_OFFSET
#define QM_REG_PQOTHER2PF_4_RT_OFFSET
#define QM_REG_PQOTHER2PF_5_RT_OFFSET
#define QM_REG_PQOTHER2PF_6_RT_OFFSET
#define QM_REG_PQOTHER2PF_7_RT_OFFSET
#define QM_REG_PQOTHER2PF_8_RT_OFFSET
#define QM_REG_PQOTHER2PF_9_RT_OFFSET
#define QM_REG_PQOTHER2PF_10_RT_OFFSET
#define QM_REG_PQOTHER2PF_11_RT_OFFSET
#define QM_REG_PQOTHER2PF_12_RT_OFFSET
#define QM_REG_PQOTHER2PF_13_RT_OFFSET
#define QM_REG_PQOTHER2PF_14_RT_OFFSET
#define QM_REG_PQOTHER2PF_15_RT_OFFSET
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET
#define QM_REG_RLGLBLINCVAL_RT_OFFSET
#define QM_REG_RLGLBLINCVAL_RT_SIZE
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE
#define QM_REG_RLGLBLCRD_RT_OFFSET
#define QM_REG_RLGLBLCRD_RT_SIZE
#define QM_REG_RLGLBLENABLE_RT_OFFSET
#define QM_REG_RLPFPERIOD_RT_OFFSET
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET
#define QM_REG_RLPFINCVAL_RT_OFFSET
#define QM_REG_RLPFINCVAL_RT_SIZE
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET
#define QM_REG_RLPFUPPERBOUND_RT_SIZE
#define QM_REG_RLPFCRD_RT_OFFSET
#define QM_REG_RLPFCRD_RT_SIZE
#define QM_REG_RLPFENABLE_RT_OFFSET
#define QM_REG_RLPFVOQENABLE_RT_OFFSET
#define QM_REG_WFQPFWEIGHT_RT_OFFSET
#define QM_REG_WFQPFWEIGHT_RT_SIZE
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE
#define QM_REG_WFQPFCRD_RT_OFFSET
#define QM_REG_WFQPFCRD_RT_SIZE
#define QM_REG_WFQPFENABLE_RT_OFFSET
#define QM_REG_WFQVPENABLE_RT_OFFSET
#define QM_REG_BASEADDRTXPQ_RT_OFFSET
#define QM_REG_BASEADDRTXPQ_RT_SIZE
#define QM_REG_TXPQMAP_RT_OFFSET
#define QM_REG_TXPQMAP_RT_SIZE
#define QM_REG_WFQVPWEIGHT_RT_OFFSET
#define QM_REG_WFQVPWEIGHT_RT_SIZE
#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET
#define QM_REG_WFQVPUPPERBOUND_RT_SIZE
#define QM_REG_WFQVPCRD_RT_OFFSET
#define QM_REG_WFQVPCRD_RT_SIZE
#define QM_REG_WFQVPMAP_RT_OFFSET
#define QM_REG_WFQVPMAP_RT_SIZE
#define QM_REG_PTRTBLTX_RT_OFFSET
#define QM_REG_PTRTBLTX_RT_SIZE
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET
#define QM_REG_WFQPFCRD_MSB_RT_SIZE
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET
#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET
#define XCM_REG_CON_PHY_Q3_RT_OFFSET

#define RUNTIME_ARRAY_SIZE

/* Init Callbacks */
#define DMAE_READY_CB

/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {};

/* The eth storm context for the Pstorm */
struct pstorm_eth_conn_st_ctx {};

/* The eth storm context for the Xstorm */
struct xstorm_eth_conn_st_ctx {};

struct xstorm_eth_conn_ag_ctx {};

/* The eth storm context for the Ystorm */
struct ystorm_eth_conn_st_ctx {};

struct ystorm_eth_conn_ag_ctx {};

struct tstorm_eth_conn_ag_ctx {};

struct ustorm_eth_conn_ag_ctx {};

/* The eth storm context for the Ustorm */
struct ustorm_eth_conn_st_ctx {};

/* The eth storm context for the Mstorm */
struct mstorm_eth_conn_st_ctx {};

/* eth connection context */
struct eth_conn_context {};

/* Ethernet filter types: mac/vlan/pair */
enum eth_error_code {};

/* Opcodes for the event ring */
enum eth_event_opcode {};

/* Classify rule types in E2/E3 */
enum eth_filter_action {};

/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd {};

/*	$$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd_header {};

/* Ethernet filter types: mac/vlan/pair */
enum eth_filter_type {};

/* inner to inner vlan priority translation configurations */
struct eth_in_to_in_pri_map_cfg {};

/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type {};

/* eth IPv4 Fragment Type */
enum eth_ip_type {};

/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {};

/* Return code from eth sp ramrods */
struct eth_return_code {};

/* tx destination enum */
enum eth_tx_dst_mode_config_enum {};

/* What to do in case an error occurs */
enum eth_tx_err {};

/* Array of the different error type behaviors */
struct eth_tx_err_vals {};

/* vport rss configuration data */
struct eth_vport_rss_config {};

/* eth vport RSS mode */
enum eth_vport_rss_mode {};

/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_rx_mode {};

/* Command for setting tpa parameters */
struct eth_vport_tpa_param {};

/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_tx_mode {};

/* GFT filter update action type */
enum gft_filter_update_action {};

/* Ramrod data for rx create gft action */
struct rx_create_gft_action_ramrod_data {};

/* Ramrod data for rx create openflow action */
struct rx_create_openflow_action_ramrod_data {};

/* Ramrod data for rx add openflow filter */
struct rx_openflow_filter_ramrod_data {};

/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {};

/* Ramrod data for rx queue stop ramrod */
struct rx_queue_stop_ramrod_data {};

/* Ramrod data for rx queue update ramrod */
struct rx_queue_update_ramrod_data {};

/* Ramrod data for rx Add UDP Filter */
struct rx_udp_filter_ramrod_data {};

/* Add or delete GFT filter - filter is packet header of type of packet wished
 * to pass certain FW flow.
 */
struct rx_update_gft_filter_ramrod_data {};

/* Ramrod data for tx queue start ramrod */
struct tx_queue_start_ramrod_data {};

/* Ramrod data for tx queue stop ramrod */
struct tx_queue_stop_ramrod_data {};

/* Ramrod data for tx queue update ramrod */
struct tx_queue_update_ramrod_data {};

/* Inner to Inner VLAN priority map update mode */
enum update_in_to_in_pri_map_mode_enum {};

/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {};

/* Ramrod data for vport start ramrod */
struct vport_start_ramrod_data {};

/* Ramrod data for vport stop ramrod */
struct vport_stop_ramrod_data {};

/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data_cmn {};

struct vport_update_ramrod_mcast {};

/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data {};

struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {};

struct mstorm_eth_conn_ag_ctx {};

struct xstorm_eth_hw_conn_ag_ctx {};

/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {};

/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {};

/* Profile key stucr fot GFT logic in Prs */
struct gft_profile_key {};

/* Used in gft_profile_key: Indication for tunnel type */
enum gft_profile_tunnel_type {};

/* Used in gft_profile_key: Indication for protocol type */
enum gft_profile_upper_protocol_type {};

/* GFT RAM line struct */
struct gft_ram_line {};

/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
enum gft_vlan_select {};

/* The rdma task context of Mstorm */
struct ystorm_rdma_task_st_ctx {};

struct ystorm_rdma_task_ag_ctx {};

struct mstorm_rdma_task_ag_ctx {};

/* The roce task context of Mstorm */
struct mstorm_rdma_task_st_ctx {};

/* The roce task context of Ustorm */
struct ustorm_rdma_task_st_ctx {};

struct ustorm_rdma_task_ag_ctx {};

/* RDMA task context */
struct rdma_task_context {};

#define TOE_MAX_RAMROD_PER_PF
#define TOE_TX_PAGE_SIZE_BYTES
#define TOE_GRQ_PAGE_SIZE_BYTES
#define TOE_RX_CQ_PAGE_SIZE_BYTES

#define TOE_RX_MAX_RSS_CHAINS
#define TOE_TX_MAX_TSS_CHAINS
#define TOE_RSS_INDIRECTION_TABLE_SIZE

/* The toe storm context of Mstorm */
struct mstorm_toe_conn_st_ctx {};

/* The toe storm context of Pstorm */
struct pstorm_toe_conn_st_ctx {};

/* The toe storm context of Ystorm */
struct ystorm_toe_conn_st_ctx {};

/* The toe storm context of Xstorm */
struct xstorm_toe_conn_st_ctx {};

struct ystorm_toe_conn_ag_ctx {};

struct xstorm_toe_conn_ag_ctx {};

struct tstorm_toe_conn_ag_ctx {};

struct ustorm_toe_conn_ag_ctx {};

/* The toe storm context of Tstorm */
struct tstorm_toe_conn_st_ctx {};

/* The toe storm context of Ustorm */
struct ustorm_toe_conn_st_ctx {};

/* toe connection context */
struct toe_conn_context {};

/* toe init ramrod header */
struct toe_init_ramrod_header {};

/* toe pf init parameters */
struct toe_pf_init_params {};

/* toe tss parameters */
struct toe_tss_params {};

/* toe rss parameters */
struct toe_rss_params {};

/* toe init ramrod data */
struct toe_init_ramrod_data {};

/* toe offload parameters */
struct toe_offload_params {};

/* TOE offload ramrod data - DMAed by firmware */
struct toe_offload_ramrod_data {};

/* TOE ramrod command IDs */
enum toe_ramrod_cmd_id {};

/* Toe RQ buffer descriptor */
struct toe_rx_bd {};

/* TOE RX completion queue opcodes (opcode 0 is illegal) */
enum toe_rx_cmp_opcode {};

/* TOE rx ooo completion data */
struct toe_rx_cqe_ooo_params {};

/* TOE rx in order completion data */
struct toe_rx_cqe_in_order_params {};

/* Union for TOE rx completion data */
toe_rx_cqe_data_union;

/* TOE rx completion element */
struct toe_rx_cqe {};

/* toe RX doorbel data */
struct toe_rx_db_data {};

/* Toe GRQ buffer descriptor */
struct toe_rx_grq_bd {};

/* Toe transmission application buffer descriptor */
struct toe_tx_app_buff_desc {};

/* Toe transmission application buffer descriptor page pointer */
struct toe_tx_app_buff_page_pointer {};

/* Toe transmission buffer descriptor */
struct toe_tx_bd {};

/* TOE completion opcodes */
enum toe_tx_cmp_opcode {};

/* Toe transmission completion element */
struct toe_tx_cqe {};

/* Toe transmission page pointer bd */
struct toe_tx_page_pointer_bd {};

/* Toe transmission completion element page pointer */
struct toe_tx_page_pointer_cqe {};

/* toe update parameters */
struct toe_update_params {};

/* TOE update ramrod data - DMAed by firmware */
struct toe_update_ramrod_data {};

struct mstorm_toe_conn_ag_ctx {};

/* TOE doorbell data */
struct toe_db_data {};

/* rdma function init ramrod data */
struct rdma_close_func_ramrod_data {};

/* rdma function init CNQ parameters */
struct rdma_cnq_params {};

/* rdma create cq ramrod data */
struct rdma_create_cq_ramrod_data {};

/* rdma deregister tid ramrod data */
struct rdma_deregister_tid_ramrod_data {};

/* rdma destroy cq output params */
struct rdma_destroy_cq_output_params {};

/* rdma destroy cq ramrod data */
struct rdma_destroy_cq_ramrod_data {};

/* RDMA slow path EQ cmd IDs */
enum rdma_event_opcode {};

/* RDMA FW return code for slow path ramrods */
enum rdma_fw_return_code {};

/* rdma function init header */
struct rdma_init_func_hdr {};

/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {};

/* rdma namespace tracking ramrod data */
struct rdma_namespace_tracking_ramrod_data {};

/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {};

/* rdma register tid ramrod data */
struct rdma_register_tid_ramrod_data {};

/* rdma resize cq output params */
struct rdma_resize_cq_output_params {};

/* rdma resize cq ramrod data */
struct rdma_resize_cq_ramrod_data {};

/* The rdma SRQ context */
struct rdma_srq_context {};

/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data {};

/* rdma create qp requester ramrod data */
struct rdma_srq_destroy_ramrod_data {};

/* rdma create qp requester ramrod data */
struct rdma_srq_modify_ramrod_data {};

/* RDMA Tid type enumeration (for register_tid ramrod) */
enum rdma_tid_type {};

/* The rdma XRC SRQ context */
struct rdma_xrc_srq_context {};

struct tstorm_rdma_task_ag_ctx {};

struct ustorm_rdma_conn_ag_ctx {};

struct xstorm_roce_conn_ag_ctx {};

struct tstorm_roce_conn_ag_ctx {};

/* The roce storm context of Ystorm */
struct ystorm_roce_conn_st_ctx {};

/* The roce storm context of Mstorm */
struct pstorm_roce_conn_st_ctx {};

/* The roce storm context of Xstorm */
struct xstorm_roce_conn_st_ctx {};

/* The roce storm context of Tstorm */
struct tstorm_roce_conn_st_ctx {};

/* The roce storm context of Mstorm */
struct mstorm_roce_conn_st_ctx {};

/* The roce storm context of Ustorm */
struct ustorm_roce_conn_st_ctx {};

/* roce connection context */
struct roce_conn_context {};

/* roce cqes statistics */
struct roce_cqe_stats {};

/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {};

/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data {};

/* RoCE Create Suspended qp requester runtime ramrod data */
struct roce_create_suspended_qp_req_runtime_ramrod_data {};

/* RoCE Create Suspended QP requester ramrod data */
struct roce_create_suspended_qp_req_ramrod_data {};

/* RoCE Create Suspended QP responder runtime params */
struct roce_create_suspended_qp_resp_runtime_params {};

/* RoCE RDB array entry */
struct roce_resp_qp_rdb_entry {};

/* RoCE Create Suspended QP responder runtime ramrod data */
struct roce_create_suspended_qp_resp_runtime_ramrod_data {};

/* RoCE Create Suspended QP responder ramrod data */
struct roce_create_suspended_qp_resp_ramrod_data {};

/* RoCE create ud qp ramrod data */
struct roce_create_ud_qp_ramrod_data {};

/* roce DCQCN received statistics */
struct roce_dcqcn_received_stats {};

/* roce DCQCN sent statistics */
struct roce_dcqcn_sent_stats {};

/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {};

/* RoCE destroy qp requester ramrod data */
struct roce_destroy_qp_req_ramrod_data {};

/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {};

/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {};

/* RoCE destroy ud qp ramrod data */
struct roce_destroy_ud_qp_ramrod_data {};

/* roce error statistics */
struct roce_error_stats {};

/* roce special events statistics */
struct roce_events_stats {};

/* roce slow path EQ cmd IDs */
enum roce_event_opcode {};

/* roce func init ramrod data */
struct roce_init_func_params {};

/* roce func init ramrod data */
struct roce_init_func_ramrod_data {};

/* roce_ll2_cqe_data */
struct roce_ll2_cqe_data {};

/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {};

/* roce modify qp responder ramrod data */
struct roce_modify_qp_resp_ramrod_data {};

/* RoCE query qp requester output params */
struct roce_query_qp_req_output_params {};

/* RoCE query qp requester ramrod data */
struct roce_query_qp_req_ramrod_data {};

/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {};

/* RoCE query qp responder ramrod data */
struct roce_query_qp_resp_ramrod_data {};

/* RoCE Query Suspended QP requester output params */
struct roce_query_suspended_qp_req_output_params {};

/* RoCE Query Suspended QP requester ramrod data */
struct roce_query_suspended_qp_req_ramrod_data {};

/* RoCE Query Suspended QP responder runtime params */
struct roce_query_suspended_qp_resp_runtime_params {};

/* RoCE Query Suspended QP responder output params */
struct roce_query_suspended_qp_resp_output_params {};

/* RoCE Query Suspended QP responder ramrod data */
struct roce_query_suspended_qp_resp_ramrod_data {};

/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {};

/* ROCE RDB array entry type */
enum roce_resp_qp_rdb_entry_type {};

/* RoCE func init ramrod data */
struct roce_update_func_params {};

struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {};

struct mstorm_roce_conn_ag_ctx {};

struct mstorm_roce_req_conn_ag_ctx {};

struct mstorm_roce_resp_conn_ag_ctx {};

struct tstorm_roce_req_conn_ag_ctx {};

struct tstorm_roce_resp_conn_ag_ctx {};

struct ustorm_roce_req_conn_ag_ctx {};

struct ustorm_roce_resp_conn_ag_ctx {};

struct xstorm_roce_req_conn_ag_ctx {};

struct xstorm_roce_resp_conn_ag_ctx {};

struct ystorm_roce_conn_ag_ctx {};

struct ystorm_roce_req_conn_ag_ctx {};

struct ystorm_roce_resp_conn_ag_ctx {};

/* Roce doorbell data */
enum roce_flavor {};

/* The iwarp storm context of Ystorm */
struct ystorm_iwarp_conn_st_ctx {};

/* The iwarp storm context of Pstorm */
struct pstorm_iwarp_conn_st_ctx {};

/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx {};

struct xstorm_iwarp_conn_ag_ctx {};

struct tstorm_iwarp_conn_ag_ctx {};

/* The iwarp storm context of Tstorm */
struct tstorm_iwarp_conn_st_ctx {};

/* The iwarp storm context of Mstorm */
struct mstorm_iwarp_conn_st_ctx {};

/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {};

/* iwarp connection context */
struct iwarp_conn_context {};

/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
struct iwarp_create_qp_ramrod_data {};

/* iWARP completion queue types */
enum iwarp_eqe_async_opcode {};

struct iwarp_eqe_data_mpa_async_completion {};

struct iwarp_eqe_data_tcp_async_completion {};

/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {};

/* iWARP EQE completion status */
enum iwarp_fw_return_code {};

/* unaligned opaque data received from LL2 */
struct iwarp_init_func_params {};

/* iwarp func init ramrod data */
struct iwarp_init_func_ramrod_data {};

/* iWARP QP - possible states to transition to */
enum iwarp_modify_qp_new_state_type {};

/* iwarp modify qp responder ramrod data */
struct iwarp_modify_qp_ramrod_data {};

/* MPA params for Enhanced mode */
struct mpa_rq_params {};

/* MPA host Address-Len for private data */
struct mpa_ulp_buffer {};

/* iWARP MPA offload params common to Basic and Enhanced modes */
struct mpa_outgoing_params {};

/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
 * Ramrod.
 */
struct iwarp_mpa_offload_ramrod_data {};

/* iWARP TCP connection offload params passed by driver to FW */
struct iwarp_offload_params {};

/* iWARP query QP output params */
struct iwarp_query_qp_output_params {};

/* iWARP query QP ramrod data */
struct iwarp_query_qp_ramrod_data {};

/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {};

/* Per PF iWARP retransmit path statistics */
struct iwarp_rxmit_stats_drv {};

/* iWARP and TCP connection offload params passed by driver to FW in iWARP
 * offload ramrod.
 */
struct iwarp_tcp_offload_ramrod_data {};

/* iWARP MPA negotiation types */
enum mpa_negotiation_mode {};

/* iWARP MPA Enhanced mode RTR types */
enum mpa_rtr_type {};

/* unaligned opaque data received from LL2 */
struct unaligned_opaque_data {};

struct mstorm_iwarp_conn_ag_ctx {};

struct ustorm_iwarp_conn_ag_ctx {};

struct ystorm_iwarp_conn_ag_ctx {};

/* The fcoe storm context of Ystorm */
struct ystorm_fcoe_conn_st_ctx {};

/* FCoE 16-bits vlan structure */
struct fcoe_vlan_fields {};

/* FCoE 16-bits vlan union */
fcoe_vlan_field_union;

/* FCoE 16-bits vlan, vif union */
fcoe_vlan_vif_field_union;

/* Ethernet context section */
struct pstorm_fcoe_eth_context_section {};

/* The fcoe storm context of Pstorm */
struct pstorm_fcoe_conn_st_ctx {};

/* The fcoe storm context of Xstorm */
struct xstorm_fcoe_conn_st_ctx {};

struct xstorm_fcoe_conn_ag_ctx {};

/* The fcoe storm context of Ustorm */
struct ustorm_fcoe_conn_st_ctx {};

struct tstorm_fcoe_conn_ag_ctx {};

struct ustorm_fcoe_conn_ag_ctx {};

/* The fcoe storm context of Tstorm */
struct tstorm_fcoe_conn_st_ctx {};

struct mstorm_fcoe_conn_ag_ctx {};

/* Fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_fp {};

/* Non fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {};

/* The fcoe storm context of Mstorm */
struct mstorm_fcoe_conn_st_ctx {};

/* fcoe connection context */
struct fcoe_conn_context {};

/* FCoE connection offload params passed by driver to FW in FCoE offload
 * ramrod.
 */
struct fcoe_conn_offload_ramrod_params {};

/* FCoE connection terminate params passed by driver to FW in FCoE terminate
 * conn ramrod.
 */
struct fcoe_conn_terminate_ramrod_params {};

/* FCoE event type */
enum fcoe_event_type {};

/* FCoE init params passed by driver to FW in FCoE init ramrod */
struct fcoe_init_ramrod_params {};

/* FCoE ramrod Command IDs */
enum fcoe_ramrod_cmd_id {};

/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
 * ramrod.
 */
struct fcoe_stat_ramrod_params {};

struct ystorm_fcoe_conn_ag_ctx {};

/* The iscsi storm connection context of Ystorm */
struct ystorm_iscsi_conn_st_ctx {};

/* Combined iSCSI and TCP storm connection of Pstorm */
struct pstorm_iscsi_tcp_conn_st_ctx {};

/* The combined tcp and iscsi storm context of Xstorm */
struct xstorm_iscsi_tcp_conn_st_ctx {};

struct xstorm_iscsi_conn_ag_ctx {};

struct tstorm_iscsi_conn_ag_ctx {};

struct ustorm_iscsi_conn_ag_ctx {};

/* The iscsi storm connection context of Tstorm */
struct tstorm_iscsi_conn_st_ctx {};

struct mstorm_iscsi_conn_ag_ctx {};

/* Combined iSCSI and TCP storm connection of Mstorm */
struct mstorm_iscsi_tcp_conn_st_ctx {};

/* The iscsi storm context of Ustorm */
struct ustorm_iscsi_conn_st_ctx {};

/* iscsi connection context */
struct iscsi_conn_context {};

/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
struct iscsi_init_ramrod_params {};

struct ystorm_iscsi_conn_ag_ctx {};

#endif