#ifndef _GDMA_H
#define _GDMA_H
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES …
enum gdma_request_type { … };
#define GDMA_RESOURCE_DOORBELL_PAGE …
enum gdma_queue_type { … };
enum gdma_work_request_flags { … };
enum gdma_eqe_type { … };
enum { … };
struct gdma_resource { … };
gdma_doorbell_entry;
struct gdma_msg_hdr { … };
struct gdma_dev_id { … };
struct gdma_req_hdr { … };
struct gdma_resp_hdr { … };
struct gdma_general_req { … };
#define GDMA_MESSAGE_V1 …
#define GDMA_MESSAGE_V2 …
#define GDMA_MESSAGE_V3 …
struct gdma_general_resp { … };
#define GDMA_STANDARD_HEADER_TYPE …
static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
u32 req_size, u32 resp_size)
{ … }
struct gdma_sge { … };
struct gdma_wqe_request { … };
enum gdma_page_type { … };
#define GDMA_INVALID_DMA_REGION …
struct gdma_mem_info { … };
#define REGISTER_ATB_MST_MKEY_LOWER_SIZE …
struct gdma_dev { … };
#define MANA_PAGE_SHIFT …
#define MANA_PAGE_SIZE …
#define MANA_PAGE_ALIGN(x) …
#define MANA_PAGE_ALIGNED(addr) …
#define MANA_PFN(a) …
#define MANA_MIN_QSIZE …
#define GDMA_CQE_SIZE …
#define GDMA_EQE_SIZE …
#define GDMA_MAX_SQE_SIZE …
#define GDMA_MAX_RQE_SIZE …
#define GDMA_COMP_DATA_SIZE …
#define GDMA_EVENT_DATA_SIZE …
#define GDMA_WQE_BU_SIZE …
#define INVALID_PDID …
#define INVALID_DOORBELL …
#define INVALID_MEM_KEY …
#define INVALID_QUEUE_ID …
#define INVALID_PCI_MSIX_INDEX …
struct gdma_comp { … };
struct gdma_event { … };
struct gdma_queue;
struct mana_eq { … };
gdma_eq_callback;
gdma_cq_callback;
struct gdma_queue { … };
struct gdma_queue_spec { … };
#define MANA_IRQ_NAME_SZ …
struct gdma_irq_context { … };
struct gdma_context { … };
#define MAX_NUM_GDMA_DEVICES …
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{ … }
static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
{ … }
u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
int mana_gd_create_hwc_queue(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
struct gdma_queue **queue_ptr);
int mana_gd_create_mana_eq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
struct gdma_queue **queue_ptr);
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
struct gdma_queue **queue_ptr);
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
struct gdma_wqe { … };
#define INLINE_OOB_SMALL_SIZE …
#define INLINE_OOB_LARGE_SIZE …
#define MAX_TX_WQE_SIZE …
#define MAX_RX_WQE_SIZE …
#define MAX_TX_WQE_SGL_ENTRIES …
#define MAX_RX_WQE_SGL_ENTRIES …
struct gdma_cqe { … };
#define GDMA_CQE_OWNER_BITS …
#define GDMA_CQE_OWNER_MASK …
#define SET_ARM_BIT …
#define GDMA_EQE_OWNER_BITS …
gdma_eqe_info;
#define GDMA_EQE_OWNER_MASK …
#define INITIALIZED_OWNER_BIT(log2_num_entries) …
struct gdma_eqe { … };
#define GDMA_REG_DB_PAGE_OFFSET …
#define GDMA_REG_DB_PAGE_SIZE …
#define GDMA_REG_SHM_OFFSET …
#define GDMA_PF_REG_DB_PAGE_SIZE …
#define GDMA_PF_REG_DB_PAGE_OFF …
#define GDMA_PF_REG_SHM_OFF …
#define GDMA_SRIOV_REG_CFG_BASE_OFF …
#define MANA_PF_DEVICE_ID …
#define MANA_VF_DEVICE_ID …
struct gdma_posted_wqe_info { … };
struct gdma_generate_test_event_req { … };
enum { … };
#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT …
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX …
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG …
#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT …
#define GDMA_DRV_CAP_FLAGS1 …
#define GDMA_DRV_CAP_FLAGS2 …
#define GDMA_DRV_CAP_FLAGS3 …
#define GDMA_DRV_CAP_FLAGS4 …
struct gdma_verify_ver_req { … };
struct gdma_verify_ver_resp { … };
struct gdma_query_max_resources_resp { … };
struct gdma_list_devices_resp { … };
struct gdma_register_device_resp { … };
struct gdma_allocate_resource_range_req { … };
struct gdma_allocate_resource_range_resp { … };
struct gdma_destroy_resource_range_req { … };
struct gdma_create_queue_req { … };
struct gdma_create_queue_resp { … };
struct gdma_disable_queue_req { … };
struct gdma_query_hwc_timeout_req { … };
struct gdma_query_hwc_timeout_resp { … };
enum atb_page_size { … };
enum gdma_mr_access_flags { … };
struct gdma_create_dma_region_req { … };
struct gdma_create_dma_region_resp { … };
struct gdma_dma_region_add_pages_req { … };
struct gdma_destroy_dma_region_req { … };
enum gdma_pd_flags { … };
struct gdma_create_pd_req { … };
struct gdma_create_pd_resp { … };
struct gdma_destroy_pd_req { … };
struct gdma_destory_pd_resp { … };
enum gdma_mr_type { … };
struct gdma_create_mr_params { … };
struct gdma_create_mr_request { … };
struct gdma_create_mr_response { … };
struct gdma_destroy_mr_request { … };
struct gdma_destroy_mr_response { … };
int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd);
int mana_gd_deregister_device(struct gdma_dev *gd);
int mana_gd_post_work_request(struct gdma_queue *wq,
const struct gdma_wqe_request *wqe_req,
struct gdma_posted_wqe_info *wqe_info);
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe,
struct gdma_posted_wqe_info *wqe_info);
int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
void mana_gd_free_res_map(struct gdma_resource *r);
void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
struct gdma_queue *queue);
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi);
void mana_gd_free_memory(struct gdma_mem_info *gmi);
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
#endif