linux/drivers/net/ethernet/microsoft/mana/gdma_main.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */

#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>

#include <net/mana/mana.h>

static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{}

static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
{}

static void mana_gd_init_pf_regs(struct pci_dev *pdev)
{}

static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{}

static void mana_gd_init_registers(struct pci_dev *pdev)
{}

static int mana_gd_query_max_resources(struct pci_dev *pdev)
{}

static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
{}

static int mana_gd_detect_devices(struct pci_dev *pdev)
{}

int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
			 u32 resp_len, void *resp)
{}
EXPORT_SYMBOL_NS();

int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
			 struct gdma_mem_info *gmi)
{}

void mana_gd_free_memory(struct gdma_mem_info *gmi)
{}

static int mana_gd_create_hw_eq(struct gdma_context *gc,
				struct gdma_queue *queue)
{}

static int mana_gd_disable_queue(struct gdma_queue *queue)
{}

#define DOORBELL_OFFSET_SQ
#define DOORBELL_OFFSET_RQ
#define DOORBELL_OFFSET_CQ
#define DOORBELL_OFFSET_EQ

static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
				  enum gdma_queue_type q_type, u32 qid,
				  u32 tail_ptr, u8 num_req)
{}

void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
{}

void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{}

static void mana_gd_process_eqe(struct gdma_queue *eq)
{}

static void mana_gd_process_eq_events(void *arg)
{}

static int mana_gd_register_irq(struct gdma_queue *queue,
				const struct gdma_queue_spec *spec)
{}

static void mana_gd_deregiser_irq(struct gdma_queue *queue)
{}

int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
{}

static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
			       struct gdma_queue *queue)
{}

static int mana_gd_create_eq(struct gdma_dev *gd,
			     const struct gdma_queue_spec *spec,
			     bool create_hwq, struct gdma_queue *queue)
{}

static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
			      struct gdma_queue *queue)
{}

static void mana_gd_destroy_cq(struct gdma_context *gc,
			       struct gdma_queue *queue)
{}

int mana_gd_create_hwc_queue(struct gdma_dev *gd,
			     const struct gdma_queue_spec *spec,
			     struct gdma_queue **queue_ptr)
{}

int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
{}
EXPORT_SYMBOL_NS();

static int mana_gd_create_dma_region(struct gdma_dev *gd,
				     struct gdma_mem_info *gmi)
{}

int mana_gd_create_mana_eq(struct gdma_dev *gd,
			   const struct gdma_queue_spec *spec,
			   struct gdma_queue **queue_ptr)
{}
EXPORT_SYMBOL_NS();

int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
			      const struct gdma_queue_spec *spec,
			      struct gdma_queue **queue_ptr)
{}

void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{}
EXPORT_SYMBOL_NS();

int mana_gd_verify_vf_version(struct pci_dev *pdev)
{}

int mana_gd_register_device(struct gdma_dev *gd)
{}
EXPORT_SYMBOL_NS();

int mana_gd_deregister_device(struct gdma_dev *gd)
{}
EXPORT_SYMBOL_NS();

u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{}

u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
{}

static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
				    enum gdma_queue_type q_type,
				    u32 client_oob_size, u32 sgl_data_size,
				    u8 *wqe_ptr)
{}

static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
			      const struct gdma_wqe_request *wqe_req)
{}

int mana_gd_post_work_request(struct gdma_queue *wq,
			      const struct gdma_wqe_request *wqe_req,
			      struct gdma_posted_wqe_info *wqe_info)
{}

int mana_gd_post_and_ring(struct gdma_queue *queue,
			  const struct gdma_wqe_request *wqe_req,
			  struct gdma_posted_wqe_info *wqe_info)
{}

static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
{}

int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
{}

static irqreturn_t mana_gd_intr(int irq, void *arg)
{}

int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
{}

void mana_gd_free_res_map(struct gdma_resource *r)
{}

static int irq_setup(unsigned int *irqs, unsigned int len, int node)
{}

static int mana_gd_setup_irqs(struct pci_dev *pdev)
{}

static void mana_gd_remove_irqs(struct pci_dev *pdev)
{}

static int mana_gd_setup(struct pci_dev *pdev)
{}

static void mana_gd_cleanup(struct pci_dev *pdev)
{}

static bool mana_is_pf(unsigned short dev_id)
{}

static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{}

static void mana_gd_remove(struct pci_dev *pdev)
{}

/* The 'state' parameter is not used. */
static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{}

/* In case the NIC hardware stops working, the suspend and resume callbacks will
 * fail -- if this happens, it's safer to just report an error than try to undo
 * what has been done.
 */
static int mana_gd_resume(struct pci_dev *pdev)
{}

/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
static void mana_gd_shutdown(struct pci_dev *pdev)
{}

static const struct pci_device_id mana_id_table[] =;

static struct pci_driver mana_driver =;

module_pci_driver();

MODULE_DEVICE_TABLE(pci, mana_id_table);

MODULE_LICENSE();
MODULE_DESCRIPTION();