linux/drivers/infiniband/hw/irdma/user.h

/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_USER_H
#define IRDMA_USER_H

#define irdma_handle
#define irdma_adapter_handle
#define irdma_qp_handle
#define irdma_cq_handle
#define irdma_pd_id
#define irdma_stag_handle
#define irdma_stag_index
#define irdma_stag
#define irdma_stag_key
#define irdma_tagged_offset
#define irdma_access_privileges
#define irdma_physical_fragment
#define irdma_address_list

#define IRDMA_MAX_MR_SIZE

#define IRDMA_ACCESS_FLAGS_LOCALREAD
#define IRDMA_ACCESS_FLAGS_LOCALWRITE
#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY
#define IRDMA_ACCESS_FLAGS_REMOTEREAD
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE
#define IRDMA_ACCESS_FLAGS_BIND_WINDOW
#define IRDMA_ACCESS_FLAGS_ZERO_BASED
#define IRDMA_ACCESS_FLAGS_ALL

#define IRDMA_OP_TYPE_RDMA_WRITE
#define IRDMA_OP_TYPE_RDMA_READ
#define IRDMA_OP_TYPE_SEND
#define IRDMA_OP_TYPE_SEND_INV
#define IRDMA_OP_TYPE_SEND_SOL
#define IRDMA_OP_TYPE_SEND_SOL_INV
#define IRDMA_OP_TYPE_RDMA_WRITE_SOL
#define IRDMA_OP_TYPE_BIND_MW
#define IRDMA_OP_TYPE_FAST_REG_NSMR
#define IRDMA_OP_TYPE_INV_STAG
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG
#define IRDMA_OP_TYPE_NOP
#define IRDMA_OP_TYPE_REC
#define IRDMA_OP_TYPE_REC_IMM

#define IRDMA_FLUSH_MAJOR_ERR

enum irdma_device_caps_const {};

enum irdma_addressing_type {};

enum irdma_flush_opcode {};

enum irdma_cmpl_status {};

enum irdma_cmpl_notify {};

enum irdma_qp_caps {};

struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;

struct irdma_ring {};

struct irdma_cqe {};

struct irdma_extended_cqe {};

struct irdma_post_send {};

struct irdma_post_rq_info {};

struct irdma_rdma_write {};

struct irdma_rdma_read {};

struct irdma_bind_window {};

struct irdma_inv_local_stag {};

struct irdma_post_sq_info {};

struct irdma_cq_poll_info {};

int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
			       struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
			 struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
		      bool post_sq);
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
			  struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
		       bool inv_stag, bool post_sq);
int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
			bool post_sq);
int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
		  bool post_sq);
int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
				   struct irdma_post_sq_info *info,
				   bool post_sq);

struct irdma_wqe_uk_ops {};

int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
			  struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
				      enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
void irdma_uk_cq_init(struct irdma_cq_uk *cq,
		      struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
		     struct irdma_qp_uk_init_info *info);
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
			    u8 *rq_shift);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
				 u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
				 u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {};

struct irdma_qp_quanta {};

struct irdma_qp_uk {};

struct irdma_cq_uk {};

struct irdma_qp_uk_init_info {};

struct irdma_cq_uk_init_info {};

__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
				   u16 quanta, u32 total_size,
				   struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
			 u32 inline_data, u8 *shift);
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
		      u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
		      u32 *wqdepth);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */