linux/drivers/infiniband/hw/hfi1/qp.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2015 - 2020 Intel Corporation.
 */

#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
#include <rdma/ib_verbs.h>

#include "hfi.h"
#include "qp.h"
#include "trace.h"
#include "verbs_txreq.h"

unsigned int hfi1_qp_table_size =;
module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC();

static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
	struct sdma_engine *sde,
	struct iowait_work *wait,
	struct sdma_txreq *stx,
	unsigned int seq,
	bool pkts_sent);
static void iowait_wakeup(struct iowait *wait, int reason);
static void iowait_sdma_drained(struct iowait *wait);
static void qp_pio_drain(struct rvt_qp *qp);

const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] =;

static void flush_list_head(struct list_head *l)
{}

static void flush_tx_list(struct rvt_qp *qp)
{}

static void flush_iowait(struct rvt_qp *qp)
{}

/*
 * This function is what we would push to the core layer if we wanted to be a
 * "first class citizen".  Instead we hide this here and rely on Verbs ULPs
 * to blindly pass the MTU enum value from the PathRecord to us.
 */
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{}

int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
			 int attr_mask, struct ib_udata *udata)
{}

/*
 * qp_set_16b - Set the hdr_type based on whether the slid or the
 * dlid in the connection is extended. Only applicable for RC and UC
 * QPs. UD QPs determine this on the fly from the ah in the wqe
 */
static inline void qp_set_16b(struct rvt_qp *qp)
{}

void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
		    int attr_mask, struct ib_udata *udata)
{}

/**
 * hfi1_setup_wqe - set up the wqe
 * @qp: The qp
 * @wqe: The built wqe
 * @call_send: Determine if the send should be posted or scheduled.
 *
 * Perform setup of the wqe.  This is called
 * prior to inserting the wqe into the ring but after
 * the wqe has been setup by RDMAVT. This function
 * allows the driver the opportunity to perform
 * validation and additional setup of the wqe.
 *
 * Returns 0 on success, -EINVAL on failure
 *
 */
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
{}

/**
 * _hfi1_schedule_send - schedule progress
 * @qp: the QP
 *
 * This schedules qp progress w/o regard to the s_flags.
 *
 * It is only used in the post send, which doesn't hold
 * the s_lock.
 */
bool _hfi1_schedule_send(struct rvt_qp *qp)
{}

static void qp_pio_drain(struct rvt_qp *qp)
{}

/**
 * hfi1_schedule_send - schedule progress
 * @qp: the QP
 *
 * This schedules qp progress and caller should hold
 * the s_lock.
 * @return true if the first leg is scheduled;
 * false if the first leg is not scheduled.
 */
bool hfi1_schedule_send(struct rvt_qp *qp)
{}

static void hfi1_qp_schedule(struct rvt_qp *qp)
{}

void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
{}

void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
{}

static int iowait_sleep(
	struct sdma_engine *sde,
	struct iowait_work *wait,
	struct sdma_txreq *stx,
	uint seq,
	bool pkts_sent)
{}

static void iowait_wakeup(struct iowait *wait, int reason)
{}

static void iowait_sdma_drained(struct iowait *wait)
{}

static void hfi1_init_priority(struct iowait *w)
{}

/**
 * qp_to_sdma_engine - map a qp to a send engine
 * @qp: the QP
 * @sc5: the 5 bit sc
 *
 * Return:
 * A send engine for the qp or NULL for SMI type qp.
 */
struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
{}

/**
 * qp_to_send_context - map a qp to a send context
 * @qp: the QP
 * @sc5: the 5 bit sc
 *
 * Return:
 * A send context for the qp
 */
struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
{}

static const char * const qp_type_str[] =;

static int qp_idle(struct rvt_qp *qp)
{}

/**
 * qp_iter_print - print the qp information to seq_file
 * @s: the seq_file to emit the qp information on
 * @iter: the iterator for the qp hash list
 */
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
{}

void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{}

void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{}

unsigned free_all_qps(struct rvt_dev_info *rdi)
{}

void flush_qp_waiters(struct rvt_qp *qp)
{}

void stop_send_queue(struct rvt_qp *qp)
{}

void quiesce_qp(struct rvt_qp *qp)
{}

void notify_qp_reset(struct rvt_qp *qp)
{}

/*
 * Switch to alternate path.
 * The QP s_lock should be held and interrupts disabled.
 */
void hfi1_migrate_qp(struct rvt_qp *qp)
{}

int mtu_to_path_mtu(u32 mtu)
{}

u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
{}

int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
		       struct ib_qp_attr *attr)
{}

void notify_error_qp(struct rvt_qp *qp)
{}

/**
 * hfi1_qp_iter_cb - callback for iterator
 * @qp: the qp
 * @v: the sl in low bits of v
 *
 * This is called from the iterator callback to work
 * on an individual qp.
 */
static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
{}

/**
 * hfi1_error_port_qps - put a port's RC/UC qps into error state
 * @ibp: the ibport.
 * @sl: the service level.
 *
 * This function places all RC/UC qps with a given service level into error
 * state. It is generally called to force upper lay apps to abandon stale qps
 * after an sl->sc mapping change.
 */
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
{}