linux/drivers/infiniband/hw/qib/qib_verbs.c

/*
 * Copyright (c) 2012 - 2018 Intel Corporation.  All rights reserved.
 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <rdma/ib_mad.h>
#include <rdma/ib_user_verbs.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <rdma/rdma_vt.h>

#include "qib.h"
#include "qib_common.h"

static unsigned int ib_qib_qp_table_size =;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC();

static unsigned int qib_lkey_table_size =;
module_param_named(lkey_table_size, qib_lkey_table_size, uint,
		   S_IRUGO);
MODULE_PARM_DESC();

static unsigned int ib_qib_max_pds =;
module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
MODULE_PARM_DESC();

static unsigned int ib_qib_max_ahs =;
module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_cqes =;
module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_cqs =;
module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_qp_wrs =;
module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_qps =;
module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_sges =;
module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_mcast_grps =;
module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_mcast_qp_attached =;
module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
		   uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_srqs =;
module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_srq_sges =;
module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int ib_qib_max_srq_wrs =;
module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
MODULE_PARM_DESC();

static unsigned int ib_qib_disable_sma;
module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC();

/*
 * Translate ib_wr_opcode into ib_wc_opcode.
 */
const enum ib_wc_opcode ib_qib_wc_opcode[] =;

/*
 * System image GUID.
 */
__be64 ib_qib_sys_image_guid;

/*
 * Count the number of DMA descriptors needed to send length bytes of data.
 * Don't modify the qib_sge_state to get the count.
 * Return zero if any of the segments is not aligned.
 */
static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
{}

/*
 * Copy from the SGEs to the data buffer.
 */
static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
{}

/**
 * qib_qp_rcv - processing an incoming packet on a QP
 * @rcd: the context pointer
 * @hdr: the packet header
 * @has_grh: true if the packet has a GRH
 * @data: the packet data
 * @tlen: the packet length
 * @qp: the QP the packet came on
 *
 * This is called from qib_ib_rcv() to process an incoming packet
 * for the given QP.
 * Called at interrupt level.
 */
static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
		       int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{}

/**
 * qib_ib_rcv - process an incoming packet
 * @rcd: the context pointer
 * @rhdr: the header of the packet
 * @data: the packet payload
 * @tlen: the packet length
 *
 * This is called from qib_kreceive() to process an incoming packet at
 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
 */
void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
{}

/*
 * This is called from a timer to check for QPs
 * which need kernel memory in order to send a packet.
 */
static void mem_timer(struct timer_list *t)
{}

#ifdef __LITTLE_ENDIAN
static inline u32 get_upper_bits(u32 data, u32 shift)
{}

static inline u32 set_upper_bits(u32 data, u32 shift)
{}

static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
{}
#else
static inline u32 get_upper_bits(u32 data, u32 shift)
{
	return data << shift;
}

static inline u32 set_upper_bits(u32 data, u32 shift)
{
	return data >> shift;
}

static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
{
	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
	return data;
}
#endif

static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
		    u32 length, unsigned flush_wc)
{}

static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
					   struct rvt_qp *qp)
{}

static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
					 struct rvt_qp *qp)
{}

void qib_put_txreq(struct qib_verbs_txreq *tx)
{}

/*
 * This is called when there are send DMA descriptors that might be
 * available.
 *
 * This is called with ppd->sdma_lock held.
 */
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{}

/*
 * This is called with ppd->sdma_lock held.
 */
static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
{}

static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
{}

static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
			      u32 hdrwords, struct rvt_sge_state *ss, u32 len,
			      u32 plen, u32 dwords)
{}

/*
 * If we are now in the error state, return zero to flush the
 * send work request.
 */
static int no_bufs_available(struct rvt_qp *qp)
{}

static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
			      u32 hdrwords, struct rvt_sge_state *ss, u32 len,
			      u32 plen, u32 dwords)
{}

/**
 * qib_verbs_send - send a packet
 * @qp: the QP to send on
 * @hdr: the packet header
 * @hdrwords: the number of 32-bit words in the header
 * @ss: the SGE to send
 * @len: the length of the packet in bytes
 *
 * Return zero if packet is sent or queued OK.
 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
 */
int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
		   u32 hdrwords, struct rvt_sge_state *ss, u32 len)
{}

int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
			  u64 *rwords, u64 *spkts, u64 *rpkts,
			  u64 *xmit_wait)
{}

/**
 * qib_get_counters - get various chip counters
 * @ppd: the qlogic_ib device
 * @cntrs: counters are placed here
 *
 * Return the counters needed by recv_pma_get_portcounters().
 */
int qib_get_counters(struct qib_pportdata *ppd,
		     struct qib_verbs_counters *cntrs)
{}

/**
 * qib_ib_piobufavail - callback when a PIO buffer is available
 * @dd: the device pointer
 *
 * This is called from qib_intr() at interrupt level when a PIO buffer is
 * available after qib_verbs_send() returned an error that no buffers were
 * available. Disable the interrupt if there are no more QPs waiting.
 */
void qib_ib_piobufavail(struct qib_devdata *dd)
{}

static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num,
			  struct ib_port_attr *props)
{}

static int qib_modify_device(struct ib_device *device,
			     int device_modify_mask,
			     struct ib_device_modify *device_modify)
{}

static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
{}

static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
			   int guid_index, __be64 *guid)
{}

int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
{}

static void qib_notify_new_ah(struct ib_device *ibdev,
			      struct rdma_ah_attr *ah_attr,
			      struct rvt_ah *ah)
{}

struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
{}

/**
 * qib_get_npkeys - return the size of the PKEY table for context 0
 * @dd: the qlogic_ib device
 */
unsigned qib_get_npkeys(struct qib_devdata *dd)
{}

/*
 * Return the indexed PKEY from the port PKEY table.
 * No need to validate rcd[ctxt]; the port is setup if we are here.
 */
unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
{}

static void init_ibport(struct qib_pportdata *ppd)
{}

/**
 * qib_fill_device_attr - Fill in rvt dev info device attributes.
 * @dd: the device data structure
 */
static void qib_fill_device_attr(struct qib_devdata *dd)
{}

static const struct ib_device_ops qib_dev_ops =;

/**
 * qib_register_ib_device - register our device with the infiniband core
 * @dd: the device data structure
 * Return the allocated qib_ibdev pointer or NULL on error.
 */
int qib_register_ib_device(struct qib_devdata *dd)
{}

void qib_unregister_ib_device(struct qib_devdata *dd)
{}

/**
 * _qib_schedule_send - schedule progress
 * @qp: the qp
 *
 * This schedules progress w/o regard to the s_flags.
 *
 * It is only used in post send, which doesn't hold
 * the s_lock.
 */
bool _qib_schedule_send(struct rvt_qp *qp)
{}

/**
 * qib_schedule_send - schedule progress
 * @qp: the qp
 *
 * This schedules qp progress.  The s_lock
 * should be held.
 */
bool qib_schedule_send(struct rvt_qp *qp)
{}