#ifndef _MANA_H
#define _MANA_H
#include <net/xdp.h>
#include "gdma.h"
#include "hw_channel.h"
#define MANA_MAJOR_VERSION …
#define MANA_MINOR_VERSION …
#define MANA_MICRO_VERSION …
mana_handle_t;
#define INVALID_MANA_HANDLE …
enum TRI_STATE { … };
#define MANA_INDIRECT_TABLE_MAX_SIZE …
#define MANA_INDIRECT_TABLE_DEF_SIZE …
#define MANA_HASH_KEY_SIZE …
#define COMP_ENTRY_SIZE …
#define MAX_RX_BUFFERS_PER_QUEUE …
#define DEF_RX_BUFFERS_PER_QUEUE …
#define MIN_RX_BUFFERS_PER_QUEUE …
#define MAX_TX_BUFFERS_PER_QUEUE …
#define DEF_TX_BUFFERS_PER_QUEUE …
#define MIN_TX_BUFFERS_PER_QUEUE …
#define EQ_SIZE …
#define LOG2_EQ_THROTTLE …
#define MAX_PORTS_IN_MANA_DEV …
#define MANA_STATS_RX_COUNT …
#define MANA_STATS_TX_COUNT …
struct mana_stats_rx { … };
struct mana_stats_tx { … };
struct mana_txq { … };
struct mana_skb_head { … };
#define MANA_HEADROOM …
enum mana_tx_pkt_format { … };
struct mana_tx_short_oob { … };
struct mana_tx_long_oob { … };
struct mana_tx_oob { … };
enum mana_cq_type { … };
enum mana_cqe_type { … };
#define MANA_CQE_COMPLETION …
struct mana_cqe_header { … };
#define NDIS_HASH_IPV4 …
#define NDIS_HASH_TCP_IPV4 …
#define NDIS_HASH_UDP_IPV4 …
#define NDIS_HASH_IPV6 …
#define NDIS_HASH_TCP_IPV6 …
#define NDIS_HASH_UDP_IPV6 …
#define NDIS_HASH_IPV6_EX …
#define NDIS_HASH_TCP_IPV6_EX …
#define NDIS_HASH_UDP_IPV6_EX …
#define MANA_HASH_L3 …
#define MANA_HASH_L4 …
struct mana_rxcomp_perpkt_info { … };
#define MANA_RXCOMP_OOB_NUM_PPI …
struct mana_rxcomp_oob { … };
struct mana_tx_comp_oob { … };
struct mana_rxq;
#define CQE_POLLING_BUFFER …
struct mana_cq { … };
struct mana_recv_buf_oob { … };
#define MANA_RXBUF_PAD …
#define MANA_XDP_MTU_MAX …
struct mana_rxq { … };
struct mana_tx_qp { … };
struct mana_ethtool_stats { … };
struct mana_context { … };
struct mana_port_context { … };
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
bool update_hash, bool update_tab);
int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
u32 flags);
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len);
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
#define GDMA_CQ_NO_EQ …
struct mana_obj_spec { … };
enum mana_command_code { … };
struct mana_query_device_cfg_req { … };
struct mana_query_device_cfg_resp { … };
struct mana_query_vport_cfg_req { … };
struct mana_query_vport_cfg_resp { … };
struct mana_config_vport_req { … };
struct mana_config_vport_resp { … };
struct mana_create_wqobj_req { … };
struct mana_create_wqobj_resp { … };
struct mana_destroy_wqobj_req { … };
struct mana_destroy_wqobj_resp { … };
struct mana_fence_rq_req { … };
struct mana_fence_rq_resp { … };
struct mana_query_gf_stat_req { … };
struct mana_query_gf_stat_resp { … };
struct mana_cfg_rx_steer_req_v2 { … };
struct mana_cfg_rx_steer_resp { … };
struct mana_register_hw_vport_req { … };
struct mana_register_hw_vport_resp { … };
struct mana_deregister_hw_vport_req { … };
struct mana_deregister_hw_vport_resp { … };
struct mana_register_filter_req { … };
struct mana_register_filter_resp { … };
struct mana_deregister_filter_req { … };
struct mana_deregister_filter_resp { … };
#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE …
#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED …
#define STATISTICS_FLAGS_HC_RX_BYTES …
#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS …
#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES …
#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS …
#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES …
#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS …
#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES …
#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED …
#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED …
#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS …
#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT …
#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT …
#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT …
#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT …
#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT …
#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION …
#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB …
#define STATISTICS_FLAGS_HC_TX_BYTES …
#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS …
#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES …
#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS …
#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES …
#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS …
#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES …
#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR …
#define MANA_MAX_NUM_QUEUES …
#define MANA_SHORT_VPORT_OFFSET_MAX …
struct mana_tx_package { … };
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
struct mana_obj_spec *cq_spec,
mana_handle_t *wq_obj);
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj);
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);
struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
#endif