linux/include/net/mana/mana.h

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright (c) 2021, Microsoft Corporation. */

#ifndef _MANA_H
#define _MANA_H

#include <net/xdp.h>

#include "gdma.h"
#include "hw_channel.h"

/* Microsoft Azure Network Adapter (MANA)'s definitions
 *
 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
 * them are naturally aligned and hence don't need __packed.
 */

/* MANA protocol version */
#define MANA_MAJOR_VERSION
#define MANA_MINOR_VERSION
#define MANA_MICRO_VERSION

mana_handle_t;
#define INVALID_MANA_HANDLE

enum TRI_STATE {};

/* Number of entries for hardware indirection table must be in power of 2 */
#define MANA_INDIRECT_TABLE_MAX_SIZE
#define MANA_INDIRECT_TABLE_DEF_SIZE

/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
#define MANA_HASH_KEY_SIZE

#define COMP_ENTRY_SIZE

/* This Max value for RX buffers is derived from __alloc_page()'s max page
 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
 * size beyond this value gets rejected by __alloc_page() call.
 */
#define MAX_RX_BUFFERS_PER_QUEUE
#define DEF_RX_BUFFERS_PER_QUEUE
#define MIN_RX_BUFFERS_PER_QUEUE

/* This max value for TX buffers is derived as the maximum allocatable
 * pages supported on host per guest through testing. TX buffer size beyond
 * this value is rejected by the hardware.
 */
#define MAX_TX_BUFFERS_PER_QUEUE
#define DEF_TX_BUFFERS_PER_QUEUE
#define MIN_TX_BUFFERS_PER_QUEUE

#define EQ_SIZE

#define LOG2_EQ_THROTTLE

#define MAX_PORTS_IN_MANA_DEV

/* Update this count whenever the respective structures are changed */
#define MANA_STATS_RX_COUNT
#define MANA_STATS_TX_COUNT

struct mana_stats_rx {};

struct mana_stats_tx {};

struct mana_txq {};

/* skb data and frags dma mappings */
struct mana_skb_head {};

#define MANA_HEADROOM

enum mana_tx_pkt_format {};

struct mana_tx_short_oob {}; /* HW DATA */

struct mana_tx_long_oob {}; /* HW DATA */

struct mana_tx_oob {}; /* HW DATA */

enum mana_cq_type {};

enum mana_cqe_type {};

#define MANA_CQE_COMPLETION

struct mana_cqe_header {}; /* HW DATA */

/* NDIS HASH Types */
#define NDIS_HASH_IPV4
#define NDIS_HASH_TCP_IPV4
#define NDIS_HASH_UDP_IPV4
#define NDIS_HASH_IPV6
#define NDIS_HASH_TCP_IPV6
#define NDIS_HASH_UDP_IPV6
#define NDIS_HASH_IPV6_EX
#define NDIS_HASH_TCP_IPV6_EX
#define NDIS_HASH_UDP_IPV6_EX

#define MANA_HASH_L3
#define MANA_HASH_L4

struct mana_rxcomp_perpkt_info {}; /* HW DATA */

#define MANA_RXCOMP_OOB_NUM_PPI

/* Receive completion OOB */
struct mana_rxcomp_oob {}; /* HW DATA */

struct mana_tx_comp_oob {}; /* HW DATA */

struct mana_rxq;

#define CQE_POLLING_BUFFER

struct mana_cq {};

struct mana_recv_buf_oob {};

#define MANA_RXBUF_PAD

#define MANA_XDP_MTU_MAX

struct mana_rxq {};

struct mana_tx_qp {};

struct mana_ethtool_stats {};

struct mana_context {};

struct mana_port_context {};

netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
		    bool update_hash, bool update_tab);

int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
int mana_detach(struct net_device *ndev, bool from_close);

int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);

void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
		  u32 flags);
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);

extern const struct ethtool_ops mana_ethtool_ops;

/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ

struct mana_obj_spec {};

enum mana_command_code {};

/* Query Device Configuration */
struct mana_query_device_cfg_req {}; /* HW DATA */

struct mana_query_device_cfg_resp {}; /* HW DATA */

/* Query vPort Configuration */
struct mana_query_vport_cfg_req {}; /* HW DATA */

struct mana_query_vport_cfg_resp {}; /* HW DATA */

/* Configure vPort */
struct mana_config_vport_req {}; /* HW DATA */

struct mana_config_vport_resp {}; /* HW DATA */

/* Create WQ Object */
struct mana_create_wqobj_req {}; /* HW DATA */

struct mana_create_wqobj_resp {}; /* HW DATA */

/* Destroy WQ Object */
struct mana_destroy_wqobj_req {}; /* HW DATA */

struct mana_destroy_wqobj_resp {}; /* HW DATA */

/* Fence RQ */
struct mana_fence_rq_req {}; /* HW DATA */

struct mana_fence_rq_resp {}; /* HW DATA */

/* Query stats RQ */
struct mana_query_gf_stat_req {}; /* HW DATA */

struct mana_query_gf_stat_resp {}; /* HW DATA */

/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req_v2 {}; /* HW DATA */

struct mana_cfg_rx_steer_resp {}; /* HW DATA */

/* Register HW vPort */
struct mana_register_hw_vport_req {}; /* HW DATA */

struct mana_register_hw_vport_resp {}; /* HW DATA */

/* Deregister HW vPort */
struct mana_deregister_hw_vport_req {}; /* HW DATA */

struct mana_deregister_hw_vport_resp {}; /* HW DATA */

/* Register filter */
struct mana_register_filter_req {}; /* HW DATA */

struct mana_register_filter_resp {}; /* HW DATA */

/* Deregister filter */
struct mana_deregister_filter_req {}; /* HW DATA */

struct mana_deregister_filter_resp {}; /* HW DATA */

/* Requested GF stats Flags */
/* Rx discards/Errors */
#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE
#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED
/* Rx bytes/pkts */
#define STATISTICS_FLAGS_HC_RX_BYTES
#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS
#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES
#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS
#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES
#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS
#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES
/* Tx errors */
#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED
#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED
#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS
#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT
#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT
#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT
#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT
#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT
#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION
#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB
/* Tx bytes/pkts */
#define STATISTICS_FLAGS_HC_TX_BYTES
#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS
#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES
#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS
#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES
#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS
#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES
/* Tx error */
#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR

#define MANA_MAX_NUM_QUEUES

#define MANA_SHORT_VPORT_OFFSET_MAX

struct mana_tx_package {};

int mana_create_wq_obj(struct mana_port_context *apc,
		       mana_handle_t vport,
		       u32 wq_type, struct mana_obj_spec *wq_spec,
		       struct mana_obj_spec *cq_spec,
		       mana_handle_t *wq_obj);

void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
			 mana_handle_t wq_obj);

int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
		   u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);

struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
#endif /* _MANA_H */