linux/drivers/infiniband/hw/hfi1/tid_rdma.c

// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
 * Copyright(c) 2018 - 2020 Intel Corporation.
 *
 */

#include "hfi.h"
#include "qp.h"
#include "rc.h"
#include "verbs.h"
#include "tid_rdma.h"
#include "exp_rcv.h"
#include "trace.h"

/**
 * DOC: TID RDMA READ protocol
 *
 * This is an end-to-end protocol at the hfi1 level between two nodes that
 * improves performance by avoiding data copy on the requester side. It
 * converts a qualified RDMA READ request into a TID RDMA READ request on
 * the requester side and thereafter handles the request and response
 * differently. To be qualified, the RDMA READ request should meet the
 * following:
 * -- The total data length should be greater than 256K;
 * -- The total data length should be a multiple of 4K page size;
 * -- Each local scatter-gather entry should be 4K page aligned;
 * -- Each local scatter-gather entry should be a multiple of 4K page size;
 */

#define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK
#define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK
#define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK
#define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK
#define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK
#define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK

/* Maximum number of packets within a flow generation. */
#define MAX_TID_FLOW_PSN

#define GENERATION_MASK

static u32 mask_generation(u32 a)
{}

/* Reserved generation value to set to unused flows for kernel contexts */
#define KERN_GENERATION_RESERVED

/*
 * J_KEY for kernel contexts when TID RDMA is used.
 * See generate_jkey() in hfi.h for more information.
 */
#define TID_RDMA_JKEY
#define HFI1_KERNEL_MIN_JKEY
#define HFI1_KERNEL_MAX_JKEY

/* Maximum number of segments in flight per QP request. */
#define TID_RDMA_MAX_READ_SEGS_PER_REQ
#define TID_RDMA_MAX_WRITE_SEGS_PER_REQ
#define MAX_REQ
#define MAX_FLOWS

#define MAX_EXPECTED_PAGES

#define TID_RDMA_DESTQP_FLOW_SHIFT
#define TID_RDMA_DESTQP_FLOW_MASK

#define TID_OPFN_QP_CTXT_MASK
#define TID_OPFN_QP_CTXT_SHIFT
#define TID_OPFN_QP_KDETH_MASK
#define TID_OPFN_QP_KDETH_SHIFT
#define TID_OPFN_MAX_LEN_MASK
#define TID_OPFN_MAX_LEN_SHIFT
#define TID_OPFN_TIMEOUT_MASK
#define TID_OPFN_TIMEOUT_SHIFT
#define TID_OPFN_RESERVED_MASK
#define TID_OPFN_RESERVED_SHIFT
#define TID_OPFN_URG_MASK
#define TID_OPFN_URG_SHIFT
#define TID_OPFN_VER_MASK
#define TID_OPFN_VER_SHIFT
#define TID_OPFN_JKEY_MASK
#define TID_OPFN_JKEY_SHIFT
#define TID_OPFN_MAX_READ_MASK
#define TID_OPFN_MAX_READ_SHIFT
#define TID_OPFN_MAX_WRITE_MASK
#define TID_OPFN_MAX_WRITE_SHIFT

/*
 * OPFN TID layout
 *
 * 63               47               31               15
 * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
 * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
 * N - the context Number
 * K - the Kdeth_qp
 * M - Max_len
 * T - Timeout
 * D - reserveD
 * V - version
 * U - Urg capable
 * J - Jkey
 * R - max_Read
 * W - max_Write
 * C - Capcode
 */

static void tid_rdma_trigger_resume(struct work_struct *work);
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
					 gfp_t gfp);
static void hfi1_init_trdma_req(struct rvt_qp *qp,
				struct tid_rdma_request *req);
static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
static void hfi1_tid_timeout(struct timer_list *t);
static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
static void hfi1_tid_retry_timeout(struct timer_list *t);
static int make_tid_rdma_ack(struct rvt_qp *qp,
			     struct ib_other_headers *ohdr,
			     struct hfi1_pkt_state *ps);
static void hfi1_do_tid_send(struct rvt_qp *qp);
static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
static void tid_rdma_rcv_err(struct hfi1_packet *packet,
			     struct ib_other_headers *ohdr,
			     struct rvt_qp *qp, u32 psn, int diff, bool fecn);
static void update_r_next_psn_fecn(struct hfi1_packet *packet,
				   struct hfi1_qp_priv *priv,
				   struct hfi1_ctxtdata *rcd,
				   struct tid_rdma_flow *flow,
				   bool fecn);

static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
{}

static void tid_rdma_schedule_ack(struct rvt_qp *qp)
{}

static void tid_rdma_trigger_ack(struct rvt_qp *qp)
{}

static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{}

static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
{}

void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
{}

bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
{}

bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
{}

bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
{}

void tid_rdma_conn_error(struct rvt_qp *qp)
{}

/* This is called at context initialization time */
int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
{}

/**
 * qp_to_rcd - determine the receive context used by a qp
 * @rdi: rvt dev struct
 * @qp: the qp
 *
 * This routine returns the receive context associated
 * with a a qp's qpn.
 *
 * Return: the context.
 */
static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
				       struct rvt_qp *qp)
{}

int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
		      struct ib_qp_init_attr *init_attr)
{}

void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{}

/* Flow and tid waiter functions */
/**
 * DOC: lock ordering
 *
 * There are two locks involved with the queuing
 * routines: the qp s_lock and the exp_lock.
 *
 * Since the tid space allocation is called from
 * the send engine, the qp s_lock is already held.
 *
 * The allocation routines will get the exp_lock.
 *
 * The first_qp() call is provided to allow the head of
 * the rcd wait queue to be fetched under the exp_lock and
 * followed by a drop of the exp_lock.
 *
 * Any qp in the wait list will have the qp reference count held
 * to hold the qp in memory.
 */

/*
 * return head of rcd wait list
 *
 * Must hold the exp_lock.
 *
 * Get a reference to the QP to hold the QP in memory.
 *
 * The caller must release the reference when the local
 * is no longer being used.
 */
static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
			       struct tid_queue *queue)
	__must_hold(&rcd->exp_lock)
{}

/**
 * kernel_tid_waiters - determine rcd wait
 * @rcd: the receive context
 * @queue: the queue to operate on
 * @qp: the head of the qp being processed
 *
 * This routine will return false IFF
 * the list is NULL or the head of the
 * list is the indicated qp.
 *
 * Must hold the qp s_lock and the exp_lock.
 *
 * Return:
 * false if either of the conditions below are satisfied:
 * 1. The list is empty or
 * 2. The indicated qp is at the head of the list and the
 *    HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
 * true is returned otherwise.
 */
static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
			       struct tid_queue *queue, struct rvt_qp *qp)
	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
{}

/**
 * dequeue_tid_waiter - dequeue the qp from the list
 * @rcd: the receive context
 * @queue: the queue to operate on
 * @qp: the qp to remove the wait list
 *
 * This routine removes the indicated qp from the
 * wait list if it is there.
 *
 * This should be done after the hardware flow and
 * tid array resources have been allocated.
 *
 * Must hold the qp s_lock and the rcd exp_lock.
 *
 * It assumes the s_lock to protect the s_flags
 * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
 */
static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
			       struct tid_queue *queue, struct rvt_qp *qp)
	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
{}

/**
 * queue_qp_for_tid_wait - suspend QP on tid space
 * @rcd: the receive context
 * @queue: the queue to operate on
 * @qp: the qp
 *
 * The qp is inserted at the tail of the rcd
 * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
 *
 * Must hold the qp s_lock and the exp_lock.
 */
static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
				  struct tid_queue *queue, struct rvt_qp *qp)
	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
{}

/**
 * __trigger_tid_waiter - trigger tid waiter
 * @qp: the qp
 *
 * This is a private entrance to schedule the qp
 * assuming the caller is holding the qp->s_lock.
 */
static void __trigger_tid_waiter(struct rvt_qp *qp)
	__must_hold(&qp->s_lock)
{}

/**
 * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
 * @qp: the qp
 *
 * trigger a schedule or a waiting qp in a deadlock
 * safe manner.  The qp reference is held prior
 * to this call via first_qp().
 *
 * If the qp trigger was already scheduled (!rval)
 * the reference is dropped, otherwise the resume
 * or the destroy cancel will dispatch the reference.
 */
static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
{}

/**
 * tid_rdma_trigger_resume - field a trigger work request
 * @work: the work item
 *
 * Complete the off qp trigger processing by directly
 * calling the progress routine.
 */
static void tid_rdma_trigger_resume(struct work_struct *work)
{}

/*
 * tid_rdma_flush_wait - unwind any tid space wait
 *
 * This is called when resetting a qp to
 * allow a destroy or reset to get rid
 * of any tid space linkage and reference counts.
 */
static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
	__must_hold(&qp->s_lock)
{}

void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
	__must_hold(&qp->s_lock)
{}

/* Flow functions */
/**
 * kern_reserve_flow - allocate a hardware flow
 * @rcd: the context to use for allocation
 * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
 *         signify "don't care".
 *
 * Use a bit mask based allocation to reserve a hardware
 * flow for use in receiving KDETH data packets. If a preferred flow is
 * specified the function will attempt to reserve that flow again, if
 * available.
 *
 * The exp_lock must be held.
 *
 * Return:
 * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
 * On failure: -EAGAIN
 */
static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
	__must_hold(&rcd->exp_lock)
{}

static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
			     u32 flow_idx)
{}

static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
	__must_hold(&rcd->exp_lock)
{}

static u32 kern_flow_generation_next(u32 gen)
{}

static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
	__must_hold(&rcd->exp_lock)
{}

int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
{}

void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
{}

void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
{}

/* TID allocation functions */
static u8 trdma_pset_order(struct tid_rdma_pageset *s)
{}

/**
 * tid_rdma_find_phys_blocks_4k - get groups base on mr info
 * @flow: overall info for a TID RDMA segment
 * @pages: pointer to an array of page structs
 * @npages: number of pages
 * @list: page set array to return
 *
 * This routine returns the number of groups associated with
 * the current sge information.  This implementation is based
 * on the expected receive find_phys_blocks() adjusted to
 * use the MR information vs. the pfn.
 *
 * Return:
 * the number of RcvArray entries
 */
static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
					struct page **pages,
					u32 npages,
					struct tid_rdma_pageset *list)
{}

/**
 * tid_flush_pages - dump out pages into pagesets
 * @list: list of pagesets
 * @idx: pointer to current page index
 * @pages: number of pages to dump
 * @sets: current number of pagesset
 *
 * This routine flushes out accumuated pages.
 *
 * To insure an even number of sets the
 * code may add a filler.
 *
 * This can happen with when pages is not
 * a power of 2 or pages is a power of 2
 * less than the maximum pages.
 *
 * Return:
 * The new number of sets
 */

static u32 tid_flush_pages(struct tid_rdma_pageset *list,
			   u32 *idx, u32 pages, u32 sets)
{}

/**
 * tid_rdma_find_phys_blocks_8k - get groups base on mr info
 * @flow: overall info for a TID RDMA segment
 * @pages: pointer to an array of page structs
 * @npages: number of pages
 * @list: page set array to return
 *
 * This routine parses an array of pages to compute pagesets
 * in an 8k compatible way.
 *
 * pages are tested two at a time, i, i + 1 for contiguous
 * pages and i - 1 and i contiguous pages.
 *
 * If any condition is false, any accumulated pages are flushed and
 * v0,v1 are emitted as separate PAGE_SIZE pagesets
 *
 * Otherwise, the current 8k is totaled for a future flush.
 *
 * Return:
 * The number of pagesets
 * list set with the returned number of pagesets
 *
 */
static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
					struct page **pages,
					u32 npages,
					struct tid_rdma_pageset *list)
{}

/*
 * Find pages for one segment of a sge array represented by @ss. The function
 * does not check the sge, the sge must have been checked for alignment with a
 * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
 * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
 * copy maintained in @ss->sge, the original sge is not modified.
 *
 * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
 * releasing the MR reference count at the same time. Otherwise, we'll "leak"
 * references to the MR. This difference requires that we keep track of progress
 * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
 * structure.
 */
static u32 kern_find_pages(struct tid_rdma_flow *flow,
			   struct page **pages,
			   struct rvt_sge_state *ss, bool *last)
{}

static void dma_unmap_flow(struct tid_rdma_flow *flow)
{}

static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
{}

static inline bool dma_mapped(struct tid_rdma_flow *flow)
{}

/*
 * Get pages pointers and identify contiguous physical memory chunks for a
 * segment. All segments are of length flow->req->seg_len.
 */
static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
				struct page **pages,
				struct rvt_sge_state *ss, bool *last)
{}

static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
				     struct hfi1_ctxtdata *rcd, char *s,
				     struct tid_group *grp, u8 cnt)
{}

/*
 * Try to allocate pageset_count TID's from TID groups for a context
 *
 * This function allocates TID's without moving groups between lists or
 * modifying grp->map. This is done as follows, being cogizant of the lists
 * between which the TID groups will move:
 * 1. First allocate complete groups of 8 TID's since this is more efficient,
 *    these groups will move from group->full without affecting used
 * 2. If more TID's are needed allocate from used (will move from used->full or
 *    stay in used)
 * 3. If we still don't have the required number of TID's go back and look again
 *    at a complete group (will move from group->used)
 */
static int kern_alloc_tids(struct tid_rdma_flow *flow)
{}

static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
				   u32 *pset_idx)
{}

static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
{}

static void kern_program_rcvarray(struct tid_rdma_flow *flow)
{}

/**
 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
 * TID RDMA request
 *
 * @req: TID RDMA request for which the segment/flow is being set up
 * @ss: sge state, maintains state across successive segments of a sge
 * @last: set to true after the last sge segment has been processed
 *
 * This function
 * (1) finds a free flow entry in the flow circular buffer
 * (2) finds pages and continuous physical chunks constituing one segment
 *     of an sge
 * (3) allocates TID group entries for those chunks
 * (4) programs rcvarray entries in the hardware corresponding to those
 *     TID's
 * (5) computes a tidarray with formatted TID entries which can be sent
 *     to the sender
 * (6) Reserves and programs HW flows.
 * (7) It also manages queueing the QP when TID/flow resources are not
 *     available.
 *
 * @req points to struct tid_rdma_request of which the segments are a part. The
 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
 * req->flow_idx is the index of the flow which has been prepared in this
 * invocation of function call. With flow = &req->flows[req->flow_idx],
 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
 * sends and flow->npkts contains number of packets required to send the
 * segment.
 *
 * hfi1_check_sge_align should be called prior to calling this function and if
 * it signals error TID RDMA cannot be used for this sge and this function
 * should not be called.
 *
 * For the queuing, caller must hold the flow->req->qp s_lock from the send
 * engine and the function will procure the exp_lock.
 *
 * Return:
 * The function returns -EAGAIN if sufficient number of TID/flow resources to
 * map the segment could not be allocated. In this case the function should be
 * called again with previous arguments to retry the TID allocation. There are
 * no other error returns. The function returns 0 on success.
 */
int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
			    struct rvt_sge_state *ss, bool *last)
	__must_hold(&req->qp->s_lock)
{}

static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
{}

/*
 * This function is called after one segment has been successfully sent to
 * release the flow and TID HW/SW resources for that segment. The segments for a
 * TID RDMA request are setup and cleared in FIFO order which is managed using a
 * circular buffer.
 */
int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
	__must_hold(&req->qp->s_lock)
{}

/*
 * This function is called to release all the tid entries for
 * a request.
 */
void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
	__must_hold(&req->qp->s_lock)
{}

/**
 * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
 * @req: the tid rdma request to be cleaned
 */
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
{}

/**
 * __trdma_clean_swqe - clean up for large sized QPs
 * @qp: the queue patch
 * @wqe: the send wqe
 */
void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

/*
 * This can be called at QP create time or in the data path.
 */
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
					 gfp_t gfp)
{}

static void hfi1_init_trdma_req(struct rvt_qp *qp,
				struct tid_rdma_request *req)
{}

u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
			    void *context, int vl, int mode, u64 data)
{}

static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
					  u32 psn, u16 *fidx)
{}

/* TID RDMA READ functions */
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
				    struct ib_other_headers *ohdr, u32 *bth1,
				    u32 *bth2, u32 *len)
{}

/*
 * @len: contains the data length to read upon entry and the read request
 *       payload length upon exit.
 */
u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
				 struct ib_other_headers *ohdr, u32 *bth1,
				 u32 *bth2, u32 *len)
	__must_hold(&qp->s_lock)
{}

/*
 * Validate and accept the TID RDMA READ request parameters.
 * Return 0 if the request is accepted successfully;
 * Return 1 otherwise.
 */
static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
				     struct rvt_ack_entry *e,
				     struct hfi1_packet *packet,
				     struct ib_other_headers *ohdr,
				     u32 bth0, u32 psn, u64 vaddr, u32 len)
{}

static int tid_rdma_rcv_error(struct hfi1_packet *packet,
			      struct ib_other_headers *ohdr,
			      struct rvt_qp *qp, u32 psn, int diff)
{}

void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
{}

u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
				  struct ib_other_headers *ohdr, u32 *bth0,
				  u32 *bth1, u32 *bth2, u32 *len, bool *last)
{}

static inline struct tid_rdma_request *
find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
	__must_hold(&qp->s_lock)
{}

void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
{}

void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
	__must_hold(&qp->s_lock)
{}

static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
{}

static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
				      struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

/*
 * Handle the KDETH eflags for TID RDMA READ response.
 *
 * Return true if the last packet for a segment has been received and it is
 * time to process the response normally; otherwise, return true.
 *
 * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
 */
static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
				     struct hfi1_packet *packet, u8 rcv_type,
				     u8 rte, u32 psn, u32 ibpsn)
	__must_hold(&packet->qp->r_lock) __must_hold(RCU)
{}

bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
			      struct hfi1_pportdata *ppd,
			      struct hfi1_packet *packet)
{}

/*
 * "Rewind" the TID request information.
 * This means that we reset the state back to ACTIVE,
 * find the proper flow, set the flow index to that flow,
 * and reset the flow information.
 */
void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
			       u32 *bth2)
{}

void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
{}

bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

/* Does @sge meet the alignment requirements for tid rdma? */
static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
					struct rvt_sge *sge, int num_sge)
{}

void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{}

/* TID RDMA WRITE functions */

u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
				  struct ib_other_headers *ohdr,
				  u32 *bth1, u32 *bth2, u32 *len)
{}

static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
{}

static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
			     struct tid_queue *queue)
{}

/*
 * @qp: points to rvt_qp context.
 * @to_seg: desired RNR timeout in segments.
 * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
 */
static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
{}

/*
 * Central place for resource allocation at TID write responder,
 * is called from write_req and write_data interrupt handlers as
 * well as the send thread when a queued QP is scheduled for
 * resource allocation.
 *
 * Iterates over (a) segments of a request and then (b) queued requests
 * themselves to allocate resources for up to local->max_write
 * segments across multiple requests. Stop allocating when we
 * hit a sync point, resume allocating after data packets at
 * sync point have been received.
 *
 * Resource allocation and sending of responses is decoupled. The
 * request/segment which are being allocated and sent are as follows.
 * Resources are allocated for:
 *     [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
 * The send thread sends:
 *     [request: qp->s_tail_ack_queue, segment:req->cur_seg]
 */
static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
{}

void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
{}

u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
				   struct ib_other_headers *ohdr, u32 *bth1,
				   u32 bth2, u32 *len,
				   struct rvt_sge_state **ss)
{}

static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
{}

static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
{}

static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
{}

void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
{}

static void hfi1_tid_timeout(struct timer_list *t)
{}

void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
{}

bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
				struct ib_other_headers *ohdr,
				u32 *bth1, u32 *bth2, u32 *len)
{}

void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
{}

static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
{}

u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
				  struct ib_other_headers *ohdr, u16 iflow,
				  u32 *bth1, u32 *bth2)
{}

void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
{}

void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
{}

static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
{}

static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
{}

void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
{}

static void hfi1_tid_retry_timeout(struct timer_list *t)
{}

u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
			       struct ib_other_headers *ohdr, u32 *bth1,
			       u32 *bth2, u16 fidx)
{}

void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
{}

/*
 * Call this function when the last TID RDMA WRITE DATA packet for a request
 * is built.
 */
static void update_tid_tail(struct rvt_qp *qp)
	__must_hold(&qp->s_lock)
{}

int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
	__must_hold(&qp->s_lock)
{}

static int make_tid_rdma_ack(struct rvt_qp *qp,
			     struct ib_other_headers *ohdr,
			     struct hfi1_pkt_state *ps)
{}

static int hfi1_send_tid_ok(struct rvt_qp *qp)
{}

void _hfi1_do_tid_send(struct work_struct *work)
{}

static void hfi1_do_tid_send(struct rvt_qp *qp)
{}

static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
{}

/**
 * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
 * @qp: the QP
 *
 * This schedules qp progress on the TID RDMA state machine. Caller
 * should hold the s_lock.
 * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
 * the two state machines can step on each other with respect to the
 * RVT_S_BUSY flag.
 * Therefore, a modified test is used.
 *
 * Return: %true if the second leg is scheduled;
 *  %false if the second leg is not scheduled.
 */
bool hfi1_schedule_tid_send(struct rvt_qp *qp)
{}

bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
{}

static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
{}

static void tid_rdma_rcv_err(struct hfi1_packet *packet,
			     struct ib_other_headers *ohdr,
			     struct rvt_qp *qp, u32 psn, int diff, bool fecn)
{}

static void update_r_next_psn_fecn(struct hfi1_packet *packet,
				   struct hfi1_qp_priv *priv,
				   struct hfi1_ctxtdata *rcd,
				   struct tid_rdma_flow *flow,
				   bool fecn)
{}