linux/drivers/infiniband/hw/irdma/uk.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "defs.h"
#include "user.h"
#include "irdma.h"

/**
 * irdma_set_fragment - set fragment in wqe
 * @wqe: wqe for setting fragment
 * @offset: offset value
 * @sge: sge length and stag
 * @valid: The wqe valid
 */
static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
			       u8 valid)
{}

/**
 * irdma_set_fragment_gen_1 - set fragment in wqe
 * @wqe: wqe for setting fragment
 * @offset: offset value
 * @sge: sge length and stag
 * @valid: wqe valid flag
 */
static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
				     struct ib_sge *sge, u8 valid)
{}

/**
 * irdma_nop_1 - insert a NOP wqe
 * @qp: hw qp ptr
 */
static int irdma_nop_1(struct irdma_qp_uk *qp)
{}

/**
 * irdma_clr_wqes - clear next 128 sq entries
 * @qp: hw qp ptr
 * @qp_wqe_idx: wqe_idx
 */
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
{}

/**
 * irdma_uk_qp_post_wr - ring doorbell
 * @qp: hw qp ptr
 */
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
{}

/**
 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
 * @qp: hw qp ptr
 * @wqe_idx: return wqe index
 * @quanta: size of WR in quanta
 * @total_size: size of WR in bytes
 * @info: info on WR
 */
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
				   u16 quanta, u32 total_size,
				   struct irdma_post_sq_info *info)
{}

/**
 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
 * @qp: hw qp ptr
 * @wqe_idx: return wqe index
 */
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
{}

/**
 * irdma_uk_rdma_write - rdma write operation
 * @qp: hw qp ptr
 * @info: post sq information
 * @post_sq: flag to post sq
 */
int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
			bool post_sq)
{}

/**
 * irdma_uk_rdma_read - rdma read command
 * @qp: hw qp ptr
 * @info: post sq information
 * @inv_stag: flag for inv_stag
 * @post_sq: flag to post sq
 */
int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
		       bool inv_stag, bool post_sq)
{}

/**
 * irdma_uk_send - rdma send command
 * @qp: hw qp ptr
 * @info: post sq information
 * @post_sq: flag to post sq
 */
int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
		  bool post_sq)
{}

/**
 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
 * @wqe: wqe for setting fragment
 * @op_info: info for setting bind wqe values
 */
static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
					struct irdma_bind_window *op_info)
{}

/**
 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
 * @wqe: pointer to wqe
 * @sge_list: table of pointers to inline data
 * @num_sges: Total inline data length
 * @polarity: compatibility parameter
 */
static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
					 u32 num_sges, u8 polarity)
{}

/**
 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
 * @data_size: data size for inline
 *
 * Gets the quanta based on inline and immediate data.
 */
static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
{}

/**
 * irdma_set_mw_bind_wqe - set mw bind in wqe
 * @wqe: wqe for setting mw bind
 * @op_info: info for setting wqe values
 */
static void irdma_set_mw_bind_wqe(__le64 *wqe,
				  struct irdma_bind_window *op_info)
{}

/**
 * irdma_copy_inline_data - Copy inline data to wqe
 * @wqe: pointer to wqe
 * @sge_list: table of pointers to inline data
 * @num_sges: number of SGE's
 * @polarity: polarity of wqe valid bit
 */
static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
				   u32 num_sges, u8 polarity)
{}

/**
 * irdma_inline_data_size_to_quanta - based on inline data, quanta
 * @data_size: data size for inline
 *
 * Gets the quanta based on inline and immediate data.
 */
static u16 irdma_inline_data_size_to_quanta(u32 data_size)
{}

/**
 * irdma_uk_inline_rdma_write - inline rdma write operation
 * @qp: hw qp ptr
 * @info: post sq information
 * @post_sq: flag to post sq
 */
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
			       struct irdma_post_sq_info *info, bool post_sq)
{}

/**
 * irdma_uk_inline_send - inline send operation
 * @qp: hw qp ptr
 * @info: post sq information
 * @post_sq: flag to post sq
 */
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
			 struct irdma_post_sq_info *info, bool post_sq)
{}

/**
 * irdma_uk_stag_local_invalidate - stag invalidate operation
 * @qp: hw qp ptr
 * @info: post sq information
 * @post_sq: flag to post sq
 */
int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
				   struct irdma_post_sq_info *info,
				   bool post_sq)
{}

/**
 * irdma_uk_post_receive - post receive wqe
 * @qp: hw qp ptr
 * @info: post rq information
 */
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
			  struct irdma_post_rq_info *info)
{}

/**
 * irdma_uk_cq_resize - reset the cq buffer info
 * @cq: cq to resize
 * @cq_base: new cq buffer addr
 * @cq_size: number of cqes
 */
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
{}

/**
 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
 * @cq: cq to resize
 * @cq_cnt: the count of the resized cq buffers
 */
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
{}

/**
 * irdma_uk_cq_request_notification - cq notification request (door bell)
 * @cq: hw cq
 * @cq_notify: notification type
 */
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
				      enum irdma_cmpl_notify cq_notify)
{}

/**
 * irdma_uk_cq_poll_cmpl - get cq completion info
 * @cq: hw cq
 * @info: cq poll information returned
 */
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
			  struct irdma_cq_poll_info *info)
{}

/**
 * irdma_qp_round_up - return round up qp wq depth
 * @wqdepth: wq depth in quanta to round up
 */
static int irdma_qp_round_up(u32 wqdepth)
{}

/**
 * irdma_get_wqe_shift - get shift count for maximum wqe size
 * @uk_attrs: qp HW attributes
 * @sge: Maximum Scatter Gather Elements wqe
 * @inline_data: Maximum inline data size
 * @shift: Returns the shift needed based on sge
 *
 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
 * size of 64 bytes).
 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
 * size of 256 bytes).
 */
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
			 u32 inline_data, u8 *shift)
{}

/*
 * irdma_get_sqdepth - get SQ depth (quanta)
 * @uk_attrs: qp HW attributes
 * @sq_size: SQ size
 * @shift: shift which determines size of WQE
 * @sqdepth: depth of SQ
 *
 */
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
		      u32 *sqdepth)
{}

/*
 * irdma_get_rqdepth - get RQ depth (quanta)
 * @uk_attrs: qp HW attributes
 * @rq_size: RQ size
 * @shift: shift which determines size of WQE
 * @rqdepth: depth of RQ
 */
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
		      u32 *rqdepth)
{}

static const struct irdma_wqe_uk_ops iw_wqe_uk_ops =;

static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 =;

/**
 * irdma_setup_connection_wqes - setup WQEs necessary to complete
 * connection.
 * @qp: hw qp (user and kernel)
 * @info: qp initialization info
 */
static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
					struct irdma_qp_uk_init_info *info)
{}

/**
 * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
 * @ukinfo: qp initialization info
 * @sq_shift: Returns shift of SQ
 * @rq_shift: Returns shift of RQ
 */
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
			    u8 *rq_shift)
{}

/**
 * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
 * @ukinfo: qp initialization info
 * @sq_depth: Returns depth of SQ
 * @sq_shift: Returns shift of SQ
 */
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
				 u32 *sq_depth, u8 *sq_shift)
{}

/**
 * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
 * @ukinfo: qp initialization info
 * @rq_depth: Returns depth of RQ
 * @rq_shift: Returns shift of RQ
 */
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
				 u32 *rq_depth, u8 *rq_shift)
{}

/**
 * irdma_uk_qp_init - initialize shared qp
 * @qp: hw qp (user and kernel)
 * @info: qp initialization info
 *
 * initializes the vars used in both user and kernel mode.
 * size of the wqe depends on numbers of max. fragements
 * allowed. Then size of wqe * the number of wqes should be the
 * amount of memory allocated for sq and rq.
 */
int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{}

/**
 * irdma_uk_cq_init - initialize shared cq (user and kernel)
 * @cq: hw cq
 * @info: hw cq initialization info
 */
void irdma_uk_cq_init(struct irdma_cq_uk *cq,
		      struct irdma_cq_uk_init_info *info)
{}

/**
 * irdma_uk_clean_cq - clean cq entries
 * @q: completion context
 * @cq: cq to clean
 */
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
{}

/**
 * irdma_nop - post a nop
 * @qp: hw qp ptr
 * @wr_id: work request id
 * @signaled: signaled for completion
 * @post_sq: ring doorbell
 */
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
{}

/**
 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
 * @frag_cnt: number of fragments
 * @quanta: quanta for frag_cnt
 */
int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
{}

/**
 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
 * @frag_cnt: number of fragments
 * @wqe_size: size in bytes given frag_cnt
 */
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
{}