linux/include/rdma/rdmavt_qp.h

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
 * Copyright(c) 2016 - 2020 Intel Corporation.
 */

#ifndef DEF_RDMAVT_INCQP_H
#define DEF_RDMAVT_INCQP_H

#include <rdma/rdma_vt.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdmavt_cq.h>
#include <rdma/rvt-abi.h>
#include <linux/vmalloc.h>
/*
 * Atomic bit definitions for r_aflags.
 */
#define RVT_R_WRID_VALID
#define RVT_R_REWIND_SGE

/*
 * Bit definitions for r_flags.
 */
#define RVT_R_REUSE_SGE
#define RVT_R_RDMAR_SEQ
#define RVT_R_RSP_NAK
#define RVT_R_RSP_SEND
#define RVT_R_COMM_EST

/*
 * If a packet's QP[23:16] bits match this value, then it is
 * a PSM packet and the hardware will expect a KDETH header
 * following the BTH.
 */
#define RVT_KDETH_QP_PREFIX
#define RVT_KDETH_QP_SUFFIX
#define RVT_KDETH_QP_PREFIX_MASK
#define RVT_KDETH_QP_PREFIX_SHIFT
#define RVT_KDETH_QP_BASE
#define RVT_KDETH_QP_MAX

/*
 * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
 * prefix value, then it is an AIP packet with a DETH containing the entropy
 * value in byte 4 following the BTH.
 */
#define RVT_AIP_QP_PREFIX
#define RVT_AIP_QP_SUFFIX
#define RVT_AIP_QP_PREFIX_MASK
#define RVT_AIP_QP_PREFIX_SHIFT
#define RVT_AIP_QP_BASE
#define RVT_AIP_QPN_MAX
#define RVT_AIP_QP_MAX

/*
 * Bit definitions for s_flags.
 *
 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
 * RVT_S_BUSY - send tasklet is processing the QP
 * RVT_S_TIMER - the RC retry timer is active
 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
 *                         before processing the next SWQE
 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
 *                         before processing the next SWQE
 * RVT_S_WAIT_RNR - waiting for RNR timeout
 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
 *                  next send completion entry not via send DMA
 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
 * RVT_S_ECN - a BECN was queued to the send engine
 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
 */
#define RVT_S_SIGNAL_REQ_WR
#define RVT_S_BUSY
#define RVT_S_TIMER
#define RVT_S_RESP_PENDING
#define RVT_S_ACK_PENDING
#define RVT_S_WAIT_FENCE
#define RVT_S_WAIT_RDMAR
#define RVT_S_WAIT_RNR
#define RVT_S_WAIT_SSN_CREDIT
#define RVT_S_WAIT_DMA
#define RVT_S_WAIT_PIO
#define RVT_S_WAIT_TX
#define RVT_S_WAIT_DMA_DESC
#define RVT_S_WAIT_KMEM
#define RVT_S_WAIT_PSN
#define RVT_S_WAIT_ACK
#define RVT_S_SEND_ONE
#define RVT_S_UNLIMITED_CREDIT
#define RVT_S_ECN
#define RVT_S_MAX_BIT_MASK

/*
 * Drivers should use s_flags starting with bit 31 down to the bit next to
 * RVT_S_MAX_BIT_MASK
 */

/*
 * Wait flags that would prevent any packet type from being sent.
 */
#define RVT_S_ANY_WAIT_IO

/*
 * Wait flags that would prevent send work requests from making progress.
 */
#define RVT_S_ANY_WAIT_SEND

#define RVT_S_ANY_WAIT

/* Number of bits to pay attention to in the opcode for checking qp type */
#define RVT_OPCODE_QP_MASK

/* Flags for checking QP state (see ib_rvt_state_ops[]) */
#define RVT_POST_SEND_OK
#define RVT_POST_RECV_OK
#define RVT_PROCESS_RECV_OK
#define RVT_PROCESS_SEND_OK
#define RVT_PROCESS_NEXT_SEND_OK
#define RVT_FLUSH_SEND
#define RVT_FLUSH_RECV
#define RVT_PROCESS_OR_FLUSH_SEND
#define RVT_SEND_OR_FLUSH_OR_RECV_OK

/*
 * Internal send flags
 */
#define RVT_SEND_RESERVE_USED
#define RVT_SEND_COMPLETION_ONLY

/**
 * rvt_ud_wr - IB UD work plus AH cache
 * @wr: valid IB work request
 * @attr: pointer to an allocated AH attribute
 *
 * Special case the UD WR so we can keep track of the AH attributes.
 *
 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
 * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
 * The copy assumes that wr is first.
 */
struct rvt_ud_wr {};

/*
 * Send work request queue entry.
 * The size of the sg_list is determined when the QP is created and stored
 * in qp->s_max_sge.
 */
struct rvt_swqe {};

/**
 * struct rvt_krwq - kernel struct receive work request
 * @p_lock: lock to protect producer of the kernel buffer
 * @head: index of next entry to fill
 * @c_lock:lock to protect consumer of the kernel buffer
 * @tail: index of next entry to pull
 * @count: count is aproximate of total receive enteries posted
 * @rvt_rwqe: struct of receive work request queue entry
 *
 * This structure is used to contain the head pointer,
 * tail pointer and receive work queue entries for kernel
 * mode user.
 */
struct rvt_krwq {};

/*
 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
 * @swqe: valid Send WQE
 *
 */
static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
{}

/**
 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
 * @swqe: valid Send WQE
 *
 */
static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
{}

/**
 * rvt_get_swqe_remote_qpn - Access the remote QPN value
 * @swqe: valid Send WQE
 *
 */
static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
{}

/**
 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
 * @swqe: valid Send WQE
 *
 */
static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
{}

/**
 * rvt_get_swqe_pkey_index - Access the pkey index
 * @swqe: valid Send WQE
 *
 */
static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
{}

struct rvt_rq {};

/**
 * rvt_get_rq_count - count numbers of request work queue entries
 * in circular buffer
 * @rq: data structure for request queue entry
 * @head: head indices of the circular buffer
 * @tail: tail indices of the circular buffer
 *
 * Return - total number of entries in the Receive Queue
 */

static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
{}

/*
 * This structure holds the information that the send tasklet needs
 * to send a RDMA read response or atomic operation.
 */
struct rvt_ack_entry {};

#define RC_QP_SCALING_INTERVAL

#define RVT_OPERATION_PRIV
#define RVT_OPERATION_ATOMIC
#define RVT_OPERATION_ATOMIC_SGE
#define RVT_OPERATION_LOCAL
#define RVT_OPERATION_USE_RESERVE
#define RVT_OPERATION_IGN_RNR_CNT

#define RVT_OPERATION_MAX

/**
 * rvt_operation_params - op table entry
 * @length - the length to copy into the swqe entry
 * @qpt_support - a bit mask indicating QP type support
 * @flags - RVT_OPERATION flags (see above)
 *
 * This supports table driven post send so that
 * the driver can have differing an potentially
 * different sets of operations.
 *
 **/

struct rvt_operation_params {};

/*
 * Common variables are protected by both r_rq.lock and s_lock in that order
 * which only happens in modify_qp() or changing the QP 'state'.
 */
struct rvt_qp {};

struct rvt_srq {};

static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
{}

static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
{}

#define RVT_QPN_MAX
#define RVT_QPNMAP_ENTRIES
#define RVT_BITS_PER_PAGE
#define RVT_BITS_PER_PAGE_MASK
#define RVT_QPN_MASK

/*
 * QPN-map pages start out as NULL, they get allocated upon
 * first use and are never deallocated. This way,
 * large bitmaps are not allocated unless large numbers of QPs are used.
 */
struct rvt_qpn_map {};

struct rvt_qpn_table {};

struct rvt_qp_ibdev {};

/*
 * There is one struct rvt_mcast for each multicast GID.
 * All attached QPs are then stored as a list of
 * struct rvt_mcast_qp.
 */
struct rvt_mcast_qp {};

struct rvt_mcast_addr {};

struct rvt_mcast {};

/*
 * Since struct rvt_swqe is not a fixed size, we can't simply index into
 * struct rvt_qp.s_wq.  This function does the array index computation.
 */
static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
						unsigned n)
{}

/*
 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
 * struct rvt_rwq.wq.  This function does the array index computation.
 */
static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
{}

/**
 * rvt_is_user_qp - return if this is user mode QP
 * @qp - the target QP
 */
static inline bool rvt_is_user_qp(struct rvt_qp *qp)
{}

/**
 * rvt_get_qp - get a QP reference
 * @qp - the QP to hold
 */
static inline void rvt_get_qp(struct rvt_qp *qp)
{}

/**
 * rvt_put_qp - release a QP reference
 * @qp - the QP to release
 */
static inline void rvt_put_qp(struct rvt_qp *qp)
{}

/**
 * rvt_put_swqe - drop mr refs held by swqe
 * @wqe - the send wqe
 *
 * This drops any mr references held by the swqe
 */
static inline void rvt_put_swqe(struct rvt_swqe *wqe)
{}

/**
 * rvt_qp_wqe_reserve - reserve operation
 * @qp - the rvt qp
 * @wqe - the send wqe
 *
 * This routine used in post send to record
 * a wqe relative reserved operation use.
 */
static inline void rvt_qp_wqe_reserve(
	struct rvt_qp *qp,
	struct rvt_swqe *wqe)
{}

/**
 * rvt_qp_wqe_unreserve - clean reserved operation
 * @qp - the rvt qp
 * @flags - send wqe flags
 *
 * This decrements the reserve use count.
 *
 * This call MUST precede the change to
 * s_last to insure that post send sees a stable
 * s_avail.
 *
 * An smp_mp__after_atomic() is used to insure
 * the compiler does not juggle the order of the s_last
 * ring index and the decrementing of s_reserved_used.
 */
static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
{}

extern const enum ib_wc_opcode ib_rvt_wc_opcode[];

/*
 * Compare the lower 24 bits of the msn values.
 * Returns an integer <, ==, or > than zero.
 */
static inline int rvt_cmp_msn(u32 a, u32 b)
{}

__be32 rvt_compute_aeth(struct rvt_qp *qp);

void rvt_get_credit(struct rvt_qp *qp, u32 aeth);

u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);

/**
 * rvt_div_round_up_mtu - round up divide
 * @qp - the qp pair
 * @len - the length
 *
 * Perform a shift based mtu round up divide
 */
static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
{}

/**
 * @qp - the qp pair
 * @len - the length
 *
 * Perform a shift based mtu divide
 */
static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
{}

/**
 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
 * @timeout - timeout input(0 - 31).
 *
 * Return a timeout value in jiffies.
 */
static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
{}

/**
 * rvt_lookup_qpn - return the QP with the given QPN
 * @ibp: the ibport
 * @qpn: the QP number to look up
 *
 * The caller must hold the rcu_read_lock(), and keep the lock until
 * the returned qp is no longer in use.
 */
static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
					    struct rvt_ibport *rvp,
					    u32 qpn) __must_hold(RCU)
{}

/**
 * rvt_mod_retry_timer - mod a retry timer
 * @qp - the QP
 * @shift - timeout shift to wait for multiple packets
 * Modify a potentially already running retry timer
 */
static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{}

static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
{}

/**
 * rvt_put_qp_swqe - drop refs held by swqe
 * @qp: the send qp
 * @wqe: the send wqe
 *
 * This drops any references held by the swqe
 */
static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

/**
 * rvt_qp_sqwe_incr - increment ring index
 * @qp: the qp
 * @val: the starting value
 *
 * Return: the new value wrapping as appropriate
 */
static inline u32
rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
{}

int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);

/**
 * rvt_recv_cq - add a new entry to completion queue
 *			by receive queue
 * @qp: receive queue
 * @wc: work completion entry to add
 * @solicited: true if @entry is solicited
 *
 * This is wrapper function for rvt_enter_cq function call by
 * receive queue. If rvt_cq_enter return false, it means cq is
 * full and the qp is put into error state.
 */
static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
			       bool solicited)
{}

/**
 * rvt_send_cq - add a new entry to completion queue
 *                        by send queue
 * @qp: send queue
 * @wc: work completion entry to add
 * @solicited: true if @entry is solicited
 *
 * This is wrapper function for rvt_enter_cq function call by
 * send queue. If rvt_cq_enter return false, it means cq is
 * full and the qp is put into error state.
 */
static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
			       bool solicited)
{}

/**
 * rvt_qp_complete_swqe - insert send completion
 * @qp - the qp
 * @wqe - the send wqe
 * @opcode - wc operation (driver dependent)
 * @status - completion status
 *
 * Update the s_last information, and then insert a send
 * completion into the completion
 * queue if the qp indicates it should be done.
 *
 * See IBTA 10.7.3.1 for info on completion
 * control.
 *
 * Return: new last
 */
static inline u32
rvt_qp_complete_swqe(struct rvt_qp *qp,
		     struct rvt_swqe *wqe,
		     enum ib_wc_opcode opcode,
		     enum ib_wc_status status)
{}

extern const int  ib_rvt_state_ops[];

struct rvt_dev_info;
int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
unsigned long rvt_rnr_tbl_to_usec(u32 index);
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
static inline void rvt_add_retry_timer(struct rvt_qp *qp)
{}

void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
		  void *data, u32 length,
		  bool release, bool copy_last);
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
		       enum ib_wc_status status);
void rvt_ruc_loopback(struct rvt_qp *qp);

/**
 * struct rvt_qp_iter - the iterator for QPs
 * @qp - the current QP
 *
 * This structure defines the current iterator
 * state for sequenced access to all QPs relative
 * to an rvt_dev_info.
 */
struct rvt_qp_iter {};

/**
 * ib_cq_tail - Return tail index of cq buffer
 * @send_cq - The cq for send
 *
 * This is called in qp_iter_print to get tail
 * of cq buffer.
 */
static inline u32 ib_cq_tail(struct ib_cq *send_cq)
{}

/**
 * ib_cq_head - Return head index of cq buffer
 * @send_cq - The cq for send
 *
 * This is called in qp_iter_print to get head
 * of cq buffer.
 */
static inline u32 ib_cq_head(struct ib_cq *send_cq)
{}

/**
 * rvt_free_rq - free memory allocated for rvt_rq struct
 * @rvt_rq: request queue data structure
 *
 * This function should only be called if the rvt_mmap_info()
 * has not succeeded.
 */
static inline void rvt_free_rq(struct rvt_rq *rq)
{}

/**
 * rvt_to_iport - Get the ibport pointer
 * @qp: the qp pointer
 *
 * This function returns the ibport pointer from the qp pointer.
 */
static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
{}

/**
 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
 * @qp: the qp
 * @wqe: the request
 *
 * This function returns false when there are not enough credits for the given
 * request and true otherwise.
 */
static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
				     u64 v,
				     void (*cb)(struct rvt_qp *qp, u64 v));
int rvt_qp_iter_next(struct rvt_qp_iter *iter);
void rvt_qp_iter(struct rvt_dev_info *rdi,
		 u64 v,
		 void (*cb)(struct rvt_qp *qp, u64 v));
void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
#endif          /* DEF_RDMAVT_INCQP_H */