linux/drivers/infiniband/hw/erdma/erdma_verbs.h

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */

/* Authors: Cheng Xu <[email protected]> */
/*          Kai Shen <[email protected]> */
/* Copyright (c) 2020-2022, Alibaba Group. */

#ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__

#include "erdma.h"

/* RDMA Capability. */
#define ERDMA_MAX_PD
#define ERDMA_MAX_SEND_WR
#define ERDMA_MAX_ORD
#define ERDMA_MAX_IRD
#define ERDMA_MAX_SGE_RD
#define ERDMA_MAX_CONTEXT
#define ERDMA_MAX_SEND_SGE
#define ERDMA_MAX_RECV_SGE
#define ERDMA_MAX_INLINE
#define ERDMA_MAX_FRMR_PA

enum {};

struct erdma_user_mmap_entry {};

struct erdma_ext_db_info {};

struct erdma_ucontext {};

struct erdma_pd {};

/*
 * MemoryRegion definition.
 */
#define ERDMA_MAX_INLINE_MTT_ENTRIES
#define MTT_SIZE(mtt_cnt)
#define ERDMA_MR_MAX_MTT_CNT
#define ERDMA_MTT_ENTRY_SIZE

#define ERDMA_MR_TYPE_NORMAL
#define ERDMA_MR_TYPE_FRMR
#define ERDMA_MR_TYPE_DMA

#define ERDMA_MR_MTT_0LEVEL
#define ERDMA_MR_MTT_1LEVEL

#define ERDMA_MR_ACC_RA
#define ERDMA_MR_ACC_LR
#define ERDMA_MR_ACC_LW
#define ERDMA_MR_ACC_RR
#define ERDMA_MR_ACC_RW

static inline u8 to_erdma_access_flags(int access)
{}

/* Hierarchical storage structure for MTT entries */
struct erdma_mtt {};

struct erdma_mem {};

struct erdma_mr {};

struct erdma_user_dbrecords_page {};

struct erdma_uqp {};

struct erdma_kqp {};

enum erdma_qp_state {};

enum erdma_qp_attr_mask {};

enum erdma_qp_flags {};

struct erdma_qp_attrs {};

struct erdma_qp {};

struct erdma_kcq_info {};

struct erdma_ucq_info {};

struct erdma_cq {};

#define QP_ID(qp)

static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
{}

static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
{}

void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
			     enum erdma_qp_attr_mask mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);

static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{}

static inline struct erdma_pd *to_epd(struct ib_pd *pd)
{}

static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
{}

static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
{}

static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
{}

static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{}

int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
		       struct ib_udata *data);
int erdma_get_port_immutable(struct ib_device *dev, u32 port,
			     struct ib_port_immutable *ib_port_immutable);
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
		    struct uverbs_attr_bundle *attrs);
int erdma_query_port(struct ib_device *dev, u32 port,
		     struct ib_port_attr *attr);
int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
		    union ib_gid *gid);
int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
		    struct ib_udata *data);
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
		   struct ib_qp_init_attr *init_attr);
int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
		    struct ib_udata *data);
int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
				u64 virt, int access, struct ib_udata *udata);
struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void erdma_qp_get_ref(struct ib_qp *ibqp);
void erdma_qp_put_ref(struct ib_qp *ibqp);
struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
		    const struct ib_send_wr **bad_send_wr);
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
		    const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
				u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		    unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
						u32 port_num);
int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
		       u32 port, int index);

#endif