linux/drivers/infiniband/hw/hns/hns_roce_device.h

/*
 * Copyright (c) 2016 Hisilicon Limited.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef _HNS_ROCE_DEVICE_H
#define _HNS_ROCE_DEVICE_H

#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
#include "hns_roce_debugfs.h"

#define PCI_REVISION_ID_HIP08
#define PCI_REVISION_ID_HIP09

#define HNS_ROCE_MAX_MSG_LEN

#define HNS_ROCE_IB_MIN_SQ_STRIDE

#define BA_BYTE_LEN

#define HNS_ROCE_MIN_CQE_NUM
#define HNS_ROCE_MIN_SRQ_WQE_NUM

#define HNS_ROCE_MAX_IRQ_NUM

#define HNS_ROCE_SGE_IN_WQE
#define HNS_ROCE_SGE_SHIFT

#define EQ_ENABLE
#define EQ_DISABLE

#define HNS_ROCE_CEQ
#define HNS_ROCE_AEQ

#define HNS_ROCE_CEQE_SIZE
#define HNS_ROCE_AEQE_SIZE

#define HNS_ROCE_V3_EQE_SIZE

#define HNS_ROCE_V2_CQE_SIZE
#define HNS_ROCE_V3_CQE_SIZE

#define HNS_ROCE_V2_QPC_SZ
#define HNS_ROCE_V3_QPC_SZ

#define HNS_ROCE_MAX_PORTS
#define HNS_ROCE_GID_SIZE
#define HNS_ROCE_SGE_SIZE
#define HNS_ROCE_DWQE_SIZE

#define HNS_ROCE_HOP_NUM_0

#define MR_TYPE_MR
#define MR_TYPE_FRMR
#define MR_TYPE_DMA

#define HNS_ROCE_FRMR_MAX_PA
#define HNS_ROCE_FRMR_ALIGN_SIZE

#define PKEY_ID
#define NODE_DESC_SIZE
#define DB_REG_OFFSET

/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET

#define ATOMIC_WR_LEN

#define HNS_ROCE_IDX_QUE_ENTRY_SZ
#define SRQ_DB_REG

#define HNS_ROCE_QP_BANK_NUM
#define HNS_ROCE_CQ_BANK_NUM

#define CQ_BANKID_SHIFT
#define CQ_BANKID_MASK

#define HNS_ROCE_MAX_CQ_COUNT
#define HNS_ROCE_MAX_CQ_PERIOD

enum {};

enum hns_roce_event {};

enum {};

#define HNS_ROCE_DB_TYPE_COUNT
#define HNS_ROCE_DB_UNIT_SIZE

enum {};

enum hns_roce_reset_stage {};

enum hns_roce_instance_state {};

enum {};

#define HNS_ROCE_CMD_SUCCESS

#define HNS_ROCE_MAX_HOP_NUM
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT
#define HNS_HW_PAGE_SIZE

#define HNS_HW_MAX_PAGE_SHIFT
#define HNS_HW_MAX_PAGE_SIZE

struct hns_roce_uar {};

enum hns_roce_mmap_type {};

struct hns_user_mmap_entry {};

struct hns_roce_ucontext {};

struct hns_roce_pd {};

struct hns_roce_xrcd {};

struct hns_roce_bitmap {};

struct hns_roce_ida {};

/* For Hardware Entry Memory */
struct hns_roce_hem_table {};

struct hns_roce_buf_region {};

#define HNS_ROCE_MAX_BT_REGION
#define HNS_ROCE_MAX_BT_LEVEL
struct hns_roce_hem_list {};

enum mtr_type {};

struct hns_roce_buf_attr {};

struct hns_roce_hem_cfg {};

/* memory translate region */
struct hns_roce_mtr {};

struct hns_roce_mw {};

struct hns_roce_mr {};

struct hns_roce_mr_table {};

struct hns_roce_wq {};

struct hns_roce_sge {};

struct hns_roce_buf_list {};

/*
 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
 * dma address range.
 *
 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
 *
 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
 * the allocated size is smaller than the required size.
 */
enum {};

struct hns_roce_buf {};

struct hns_roce_db_pgdir {};

struct hns_roce_user_db_page {};

struct hns_roce_db {};

struct hns_roce_cq {};

struct hns_roce_idx_que {};

struct hns_roce_srq {};

struct hns_roce_uar_table {};

struct hns_roce_bank {};

struct hns_roce_idx_table {};

struct hns_roce_qp_table {};

struct hns_roce_cq_table {};

struct hns_roce_srq_table {};

struct hns_roce_av {};

struct hns_roce_ah {};

struct hns_roce_cmd_context {};

enum hns_roce_cmdq_state {};

struct hns_roce_cmdq {};

struct hns_roce_cmd_mailbox {};

struct hns_roce_mbox_msg {};

struct hns_roce_dev;

enum {};

struct hns_roce_work {};

enum hns_roce_cong_type {};

struct hns_roce_qp {};

struct hns_roce_ib_iboe {};

struct hns_roce_ceqe {};

#define CEQE_FIELD_LOC(h, l)

#define CEQE_CQN
#define CEQE_OWNER

struct hns_roce_aeqe {};

#define AEQE_FIELD_LOC(h, l)

#define AEQE_EVENT_TYPE
#define AEQE_SUB_TYPE
#define AEQE_OWNER
#define AEQE_EVENT_QUEUE_NUM

struct hns_roce_eq {};

struct hns_roce_eq_table {};

struct hns_roce_caps {};

enum hns_roce_device_state {};

enum hns_roce_hw_pkt_stat_index {};

enum hns_roce_sw_dfx_stat_index {};

struct hns_roce_hw {};

struct hns_roce_dev {};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{}

static inline struct hns_roce_ucontext
			*to_hr_ucontext(struct ib_ucontext *ibucontext)
{}

static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
{}

static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{}

static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{}

static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
{}

static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{}

static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{}

static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
{}

static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
{}

static inline struct hns_user_mmap_entry *
to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
{}

static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{}

static inline struct hns_roce_qp
	*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{}

static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
					unsigned int offset)
{}

static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
					       unsigned int offset)
{}

static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
{}

#define hr_hw_page_align(x)

static inline u64 to_hr_hw_page_addr(u64 addr)
{}

static inline u32 to_hr_hw_page_shift(u32 page_shift)
{}

static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
{}

static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
{}

static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
{}

static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
{}

#define DSCP_SHIFT

static inline u8 get_tclass(const struct ib_global_route *grh)
{}

void hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);

int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
			u64 out_param);
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);

/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT
static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
{}

int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		      u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
			struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
			unsigned long user_addr);
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		     dma_addr_t *pages, unsigned int page_cnt);

void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);

void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);

void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);

int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
		       struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
{}

int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);

struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				   u64 virt_addr, int access_flags,
				   struct ib_udata *udata);
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
				     u64 length, u64 virt_addr,
				     int mr_access_flags, struct ib_pd *pd,
				     struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
				u32 max_num_sg);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		       unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
unsigned long key_to_hw_index(u32 key);

int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);

void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
					u32 page_shift, u32 flags);

int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
			   int buf_cnt, struct hns_roce_buf *buf,
			   unsigned int page_shift);
int hns_roce_get_umem_bufs(dma_addr_t *bufs,
			   int buf_cnt, struct ib_umem *umem,
			   unsigned int page_shift);

int hns_roce_create_srq(struct ib_srq *srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);

int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);

int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
		       struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		       int attr_mask, struct ib_udata *udata);
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
			  struct ib_cq *ib_cq);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
		       struct hns_roce_cq *recv_cq);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
			 struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
			 struct ib_udata *udata);
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);

int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		       struct uverbs_attr_bundle *attrs);

int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
			 struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    struct hns_roce_db *db);
int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      int order);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);

void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq);
int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
				size_t length,
				enum hns_roce_mmap_type mmap_type);
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);

#endif /* _HNS_ROCE_DEVICE_H */