linux/drivers/infiniband/sw/siw/siw_qp.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause

/* Authors: Bernard Metzler <[email protected]> */
/* Copyright (c) 2008-2019, IBM Corporation */

#include <linux/errno.h>
#include <linux/types.h>
#include <linux/net.h>
#include <linux/scatterlist.h>
#include <linux/llist.h>
#include <asm/barrier.h>
#include <net/tcp.h>
#include <trace/events/sock.h>

#include "siw.h"
#include "siw_verbs.h"
#include "siw_mem.h"

static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE"] =;

/*
 * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a
 * per-RDMAP message basis. Please keep order of initializer. All MPA len
 * is initialized to minimum packet size.
 */
struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] =;

void siw_qp_llp_data_ready(struct sock *sk)
{}

void siw_qp_llp_close(struct siw_qp *qp)
{}

/*
 * socket callback routine informing about newly available send space.
 * Function schedules SQ work for processing SQ items.
 */
void siw_qp_llp_write_space(struct sock *sk)
{}

static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
{}

static int siw_qp_enable_crc(struct siw_qp *qp)
{}

/*
 * Send a non signalled READ or WRITE to peer side as negotiated
 * with MPAv2 P2P setup protocol. The work request is only created
 * as a current active WR and does not consume Send Queue space.
 *
 * Caller must hold QP state lock.
 */
int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
{}

/*
 * Map memory access error to DDP tagged error
 */
enum ddp_ecode siw_tagged_error(enum siw_access_state state)
{}

/*
 * Map memory access error to RDMAP protection error
 */
enum rdmap_ecode siw_rdmap_error(enum siw_access_state state)
{}

void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype,
			u8 ecode, int in_tx)
{}

/*
 * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581.
 * Sending TERMINATE messages is best effort - such messages
 * can only be send if the QP is still connected and it does
 * not have another outbound message in-progress, i.e. the
 * TERMINATE message must not interfer with an incomplete current
 * transmit operation.
 */
void siw_send_terminate(struct siw_qp *qp)
{}

/*
 * Handle all attrs other than state
 */
static void siw_qp_modify_nonstate(struct siw_qp *qp,
				   struct siw_qp_attrs *attrs,
				   enum siw_qp_attr_mask mask)
{}

static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
				      struct siw_qp_attrs *attrs,
				      enum siw_qp_attr_mask mask)
{}

static int siw_qp_nextstate_from_rts(struct siw_qp *qp,
				     struct siw_qp_attrs *attrs)
{}

static void siw_qp_nextstate_from_term(struct siw_qp *qp,
				       struct siw_qp_attrs *attrs)
{}

static int siw_qp_nextstate_from_close(struct siw_qp *qp,
				       struct siw_qp_attrs *attrs)
{}

/*
 * Caller must hold qp->state_lock
 */
int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs,
		  enum siw_qp_attr_mask mask)
{}

void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
{}

static int siw_activate_tx_from_sq(struct siw_qp *qp)
{}

/*
 * Must be called with SQ locked.
 * To avoid complete SQ starvation by constant inbound READ requests,
 * the active IRQ will not be served after qp->irq_burst, if the
 * SQ has pending work.
 */
int siw_activate_tx(struct siw_qp *qp)
{}

/*
 * Check if current CQ state qualifies for calling CQ completion
 * handler. Must be called with CQ lock held.
 */
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
{}

int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
		     enum siw_wc_status status)
{}

int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
		     u32 inval_stag, enum siw_wc_status status)
{}

/*
 * siw_sq_flush()
 *
 * Flush SQ and ORQ entries to CQ.
 *
 * Must be called with QP state write lock held.
 * Therefore, SQ and ORQ lock must not be taken.
 */
void siw_sq_flush(struct siw_qp *qp)
{}

/*
 * siw_rq_flush()
 *
 * Flush recv queue entries to CQ. Also
 * takes care of pending active tagged and untagged
 * inbound transfers, which have target memory
 * referenced.
 *
 * Must be called with QP state write lock held.
 * Therefore, RQ lock must not be taken.
 */
void siw_rq_flush(struct siw_qp *qp)
{}

int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
{}

void siw_free_qp(struct kref *ref)
{}