linux/drivers/infiniband/sw/rxe/rxe_resp.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include <linux/skbuff.h>

#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"

static char *resp_state_name[] =;

/* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{}

static inline enum resp_states get_req(struct rxe_qp *qp,
				       struct rxe_pkt_info **pkt_p)
{}

static enum resp_states check_psn(struct rxe_qp *qp,
				  struct rxe_pkt_info *pkt)
{}

static enum resp_states check_op_seq(struct rxe_qp *qp,
				     struct rxe_pkt_info *pkt)
{}

static bool check_qp_attr_access(struct rxe_qp *qp,
				 struct rxe_pkt_info *pkt)
{}

static enum resp_states check_op_valid(struct rxe_qp *qp,
				       struct rxe_pkt_info *pkt)
{}

static enum resp_states get_srq_wqe(struct rxe_qp *qp)
{}

static enum resp_states check_resource(struct rxe_qp *qp,
				       struct rxe_pkt_info *pkt)
{}

static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
					      struct rxe_pkt_info *pkt)
{}

/* if the reth length field is zero we can assume nothing
 * about the rkey value and should not validate or use it.
 * Instead set qp->resp.rkey to 0 which is an invalid rkey
 * value since the minimum index part is 1.
 */
static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{}

static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{}

/* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
 * if an invalid rkey is received or the rdma length is zero. For middle
 * or last packets use the stored value of mr.
 */
static enum resp_states check_rkey(struct rxe_qp *qp,
				   struct rxe_pkt_info *pkt)
{}

static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
				     int data_len)
{}

static enum resp_states write_data_in(struct rxe_qp *qp,
				      struct rxe_pkt_info *pkt)
{}

static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					int type)
{}

static enum resp_states process_flush(struct rxe_qp *qp,
				       struct rxe_pkt_info *pkt)
{}

static enum resp_states atomic_reply(struct rxe_qp *qp,
				     struct rxe_pkt_info *pkt)
{}

static enum resp_states atomic_write_reply(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt)
{}

static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
					  struct rxe_pkt_info *ack,
					  int opcode,
					  int payload,
					  u32 psn,
					  u8 syndrome)
{}

/**
 * rxe_recheck_mr - revalidate MR from rkey and get a reference
 * @qp: the qp
 * @rkey: the rkey
 *
 * This code allows the MR to be invalidated or deregistered or
 * the MW if one was used to be invalidated or deallocated.
 * It is assumed that the access permissions if originally good
 * are OK and the mappings to be unchanged.
 *
 * TODO: If someone reregisters an MR to change its size or
 * access permissions during the processing of an RDMA read
 * we should kill the responder resource and complete the
 * operation with an error.
 *
 * Return: mr on success else NULL
 */
static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
{}

/* RDMA read response. If res is not NULL, then we have a current RDMA request
 * being processed or replayed.
 */
static enum resp_states read_reply(struct rxe_qp *qp,
				   struct rxe_pkt_info *req_pkt)
{}

static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
{}

/* Executes a new request. A retried request never reach that function (send
 * and writes are discarded, and reads and atomics are retried elsewhere.
 */
static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{}

static enum resp_states do_complete(struct rxe_qp *qp,
				    struct rxe_pkt_info *pkt)
{}


static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
				  int opcode, const char *msg)
{}

static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{}

static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{}

static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{}

static enum resp_states acknowledge(struct rxe_qp *qp,
				    struct rxe_pkt_info *pkt)
{}

static enum resp_states cleanup(struct rxe_qp *qp,
				struct rxe_pkt_info *pkt)
{}

static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
{}

static enum resp_states duplicate_request(struct rxe_qp *qp,
					  struct rxe_pkt_info *pkt)
{}

/* Process a class A or C. Both are treated the same in this implementation. */
static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
			      enum ib_wc_status status)
{}

static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
{}

/* drain incoming request packet queue */
static void drain_req_pkts(struct rxe_qp *qp)
{}

/* complete receive wqe with flush error */
static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
{}

/* drain and optionally complete the recive queue
 * if unable to complete a wqe stop completing and
 * just flush the remaining wqes
 */
static void flush_recv_queue(struct rxe_qp *qp, bool notify)
{}

int rxe_receiver(struct rxe_qp *qp)
{}