linux/include/net/mana/gdma.h

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright (c) 2021, Microsoft Corporation. */

#ifndef _GDMA_H
#define _GDMA_H

#include <linux/dma-mapping.h>
#include <linux/netdevice.h>

#include "shm_channel.h"

#define GDMA_STATUS_MORE_ENTRIES

/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
 * them are naturally aligned and hence don't need __packed.
 */

enum gdma_request_type {};

#define GDMA_RESOURCE_DOORBELL_PAGE

enum gdma_queue_type {};

enum gdma_work_request_flags {};

enum gdma_eqe_type {};

enum {};

struct gdma_resource {};

gdma_doorbell_entry; /* HW DATA */

struct gdma_msg_hdr {}; /* HW DATA */

struct gdma_dev_id {}; /* HW DATA */

struct gdma_req_hdr {}; /* HW DATA */

struct gdma_resp_hdr {}; /* HW DATA */

struct gdma_general_req {}; /* HW DATA */

#define GDMA_MESSAGE_V1
#define GDMA_MESSAGE_V2
#define GDMA_MESSAGE_V3

struct gdma_general_resp {}; /* HW DATA */

#define GDMA_STANDARD_HEADER_TYPE

static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
					u32 req_size, u32 resp_size)
{}

/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
struct gdma_sge {}; /* HW DATA */

struct gdma_wqe_request {};

enum gdma_page_type {};

#define GDMA_INVALID_DMA_REGION

struct gdma_mem_info {};

#define REGISTER_ATB_MST_MKEY_LOWER_SIZE

struct gdma_dev {};

/* MANA_PAGE_SIZE is the DMA unit */
#define MANA_PAGE_SHIFT
#define MANA_PAGE_SIZE
#define MANA_PAGE_ALIGN(x)
#define MANA_PAGE_ALIGNED(addr)
#define MANA_PFN(a)

/* Required by HW */
#define MANA_MIN_QSIZE

#define GDMA_CQE_SIZE
#define GDMA_EQE_SIZE
#define GDMA_MAX_SQE_SIZE
#define GDMA_MAX_RQE_SIZE

#define GDMA_COMP_DATA_SIZE

#define GDMA_EVENT_DATA_SIZE

/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
#define GDMA_WQE_BU_SIZE

#define INVALID_PDID
#define INVALID_DOORBELL
#define INVALID_MEM_KEY
#define INVALID_QUEUE_ID
#define INVALID_PCI_MSIX_INDEX

struct gdma_comp {};

struct gdma_event {};

struct gdma_queue;

struct mana_eq {};

gdma_eq_callback;

gdma_cq_callback;

/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
 * driver increases the 'head' in BUs rather than in bytes, and notifies
 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
 *
 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
 * processed, the driver increases the 'tail' to indicate that WQEs have
 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
 *
 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
 * the owner bits mechanism to detect if the queue has become empty.
 */
struct gdma_queue {};

struct gdma_queue_spec {};

#define MANA_IRQ_NAME_SZ

struct gdma_irq_context {};

struct gdma_context {};

#define MAX_NUM_GDMA_DEVICES

static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{}

static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
{}

u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq);

int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);

int mana_gd_create_hwc_queue(struct gdma_dev *gd,
			     const struct gdma_queue_spec *spec,
			     struct gdma_queue **queue_ptr);

int mana_gd_create_mana_eq(struct gdma_dev *gd,
			   const struct gdma_queue_spec *spec,
			   struct gdma_queue **queue_ptr);

int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
			      const struct gdma_queue_spec *spec,
			      struct gdma_queue **queue_ptr);

void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);

int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);

void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);

struct gdma_wqe {}; /* HW DATA */

#define INLINE_OOB_SMALL_SIZE
#define INLINE_OOB_LARGE_SIZE

#define MAX_TX_WQE_SIZE
#define MAX_RX_WQE_SIZE

#define MAX_TX_WQE_SGL_ENTRIES

#define MAX_RX_WQE_SGL_ENTRIES

struct gdma_cqe {}; /* HW DATA */

#define GDMA_CQE_OWNER_BITS

#define GDMA_CQE_OWNER_MASK

#define SET_ARM_BIT

#define GDMA_EQE_OWNER_BITS

gdma_eqe_info; /* HW DATA */

#define GDMA_EQE_OWNER_MASK
#define INITIALIZED_OWNER_BIT(log2_num_entries)

struct gdma_eqe {}; /* HW DATA */

#define GDMA_REG_DB_PAGE_OFFSET
#define GDMA_REG_DB_PAGE_SIZE
#define GDMA_REG_SHM_OFFSET

#define GDMA_PF_REG_DB_PAGE_SIZE
#define GDMA_PF_REG_DB_PAGE_OFF
#define GDMA_PF_REG_SHM_OFF

#define GDMA_SRIOV_REG_CFG_BASE_OFF

#define MANA_PF_DEVICE_ID
#define MANA_VF_DEVICE_ID

struct gdma_posted_wqe_info {};

/* GDMA_GENERATE_TEST_EQE */
struct gdma_generate_test_event_req {}; /* HW DATA */

/* GDMA_VERIFY_VF_DRIVER_VERSION */
enum {};

#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT

/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
 * so the driver is able to reliably support features like busy_poll.
 */
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG
#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT

#define GDMA_DRV_CAP_FLAGS1

#define GDMA_DRV_CAP_FLAGS2

#define GDMA_DRV_CAP_FLAGS3

#define GDMA_DRV_CAP_FLAGS4

struct gdma_verify_ver_req {}; /* HW DATA */

struct gdma_verify_ver_resp {}; /* HW DATA */

/* GDMA_QUERY_MAX_RESOURCES */
struct gdma_query_max_resources_resp {}; /* HW DATA */

/* GDMA_LIST_DEVICES */
struct gdma_list_devices_resp {}; /* HW DATA */

/* GDMA_REGISTER_DEVICE */
struct gdma_register_device_resp {}; /* HW DATA */

struct gdma_allocate_resource_range_req {};

struct gdma_allocate_resource_range_resp {};

struct gdma_destroy_resource_range_req {};

/* GDMA_CREATE_QUEUE */
struct gdma_create_queue_req {}; /* HW DATA */

struct gdma_create_queue_resp {}; /* HW DATA */

/* GDMA_DISABLE_QUEUE */
struct gdma_disable_queue_req {}; /* HW DATA */

/* GDMA_QUERY_HWC_TIMEOUT */
struct gdma_query_hwc_timeout_req {};

struct gdma_query_hwc_timeout_resp {};

enum atb_page_size {};

enum gdma_mr_access_flags {};

/* GDMA_CREATE_DMA_REGION */
struct gdma_create_dma_region_req {}; /* HW DATA */

struct gdma_create_dma_region_resp {}; /* HW DATA */

/* GDMA_DMA_REGION_ADD_PAGES */
struct gdma_dma_region_add_pages_req {}; /* HW DATA */

/* GDMA_DESTROY_DMA_REGION */
struct gdma_destroy_dma_region_req {}; /* HW DATA */

enum gdma_pd_flags {};

struct gdma_create_pd_req {};/* HW DATA */

struct gdma_create_pd_resp {};/* HW DATA */

struct gdma_destroy_pd_req {};/* HW DATA */

struct gdma_destory_pd_resp {};/* HW DATA */

enum gdma_mr_type {};

struct gdma_create_mr_params {};

struct gdma_create_mr_request {};/* HW DATA */

struct gdma_create_mr_response {};/* HW DATA */

struct gdma_destroy_mr_request {};/* HW DATA */

struct gdma_destroy_mr_response {};/* HW DATA */

int mana_gd_verify_vf_version(struct pci_dev *pdev);

int mana_gd_register_device(struct gdma_dev *gd);
int mana_gd_deregister_device(struct gdma_dev *gd);

int mana_gd_post_work_request(struct gdma_queue *wq,
			      const struct gdma_wqe_request *wqe_req,
			      struct gdma_posted_wqe_info *wqe_info);

int mana_gd_post_and_ring(struct gdma_queue *queue,
			  const struct gdma_wqe_request *wqe,
			  struct gdma_posted_wqe_info *wqe_info);

int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
void mana_gd_free_res_map(struct gdma_resource *r);

void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
			      struct gdma_queue *queue);

int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
			 struct gdma_mem_info *gmi);

void mana_gd_free_memory(struct gdma_mem_info *gmi);

int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
			 u32 resp_len, void *resp);

int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);

#endif /* _GDMA_H */