linux/drivers/infiniband/hw/bnxt_re/ib_verbs.c

/*
 * Broadcom NetXtreme-E RoCE driver.
 *
 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * BSD license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Description: IB Verbs interpreter
 */

#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <net/addrconf.h>

#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>

#include "bnxt_ulp.h"

#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
#include "qplib_rcfw.h"

#include "bnxt_re.h"
#include "ib_verbs.h"

#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>

#include <rdma/ib_user_ioctl_cmds.h>

#define UVERBS_MODULE_NAME
#include <rdma/uverbs_named_ioctl.h>

#include <rdma/bnxt_re-abi.h>

static int __from_ib_access_flags(int iflags)
{
	int qflags = 0;

	if (iflags & IB_ACCESS_LOCAL_WRITE)
		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
	if (iflags & IB_ACCESS_REMOTE_READ)
		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
	if (iflags & IB_ACCESS_REMOTE_WRITE)
		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
	if (iflags & IB_ACCESS_MW_BIND)
		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
	if (iflags & IB_ZERO_BASED)
		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
	if (iflags & IB_ACCESS_ON_DEMAND)
		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
	return qflags;
};

static enum ib_access_flags __to_ib_access_flags(int qflags)
{
	enum ib_access_flags iflags = 0;

	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
		iflags |= IB_ACCESS_LOCAL_WRITE;
	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
		iflags |= IB_ACCESS_REMOTE_WRITE;
	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
		iflags |= IB_ACCESS_REMOTE_READ;
	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
		iflags |= IB_ACCESS_REMOTE_ATOMIC;
	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
		iflags |= IB_ACCESS_MW_BIND;
	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
		iflags |= IB_ZERO_BASED;
	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
		iflags |= IB_ACCESS_ON_DEMAND;
	return iflags;
};

static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
			     struct bnxt_qplib_sge *sg_list, int num)
{}

/* Device */
int bnxt_re_query_device(struct ib_device *ibdev,
			 struct ib_device_attr *ib_attr,
			 struct ib_udata *udata)
{}

/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
		       struct ib_port_attr *port_attr)
{}

int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
			       struct ib_port_immutable *immutable)
{}

void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
{}

int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
		       u16 index, u16 *pkey)
{}

int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
		      int index, union ib_gid *gid)
{}

int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
{}

int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
{}

enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
					    u32 port_num)
{}

#define BNXT_RE_FENCE_PBL_SIZE

static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
{}

static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
{}

static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
{}

static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
{}

static struct bnxt_re_user_mmap_entry*
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
{}

/* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
{}

int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{}

/* Address Handles */
int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
{}

static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
{}

int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
		      struct ib_udata *udata)
{}

int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{}

unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{}

void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
			unsigned long flags)
	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{}

static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
{}

/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{}

static u8 __from_ib_qp_type(enum ib_qp_type type)
{}

static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
				   int rsge, int max)
{}

static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
{}

static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
				   struct ib_qp_init_attr *init_attr)
{}

static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
				struct bnxt_re_qp *qp, struct ib_udata *udata)
{}

static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
				(struct bnxt_re_pd *pd,
				 struct bnxt_qplib_res *qp1_res,
				 struct bnxt_qplib_qp *qp1_qp)
{}

static struct bnxt_re_qp *bnxt_re_create_shadow_qp
				(struct bnxt_re_pd *pd,
				 struct bnxt_qplib_res *qp1_res,
				 struct bnxt_qplib_qp *qp1_qp)
{}

static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
				struct ib_qp_init_attr *init_attr,
				struct bnxt_re_ucontext *uctx)
{}

static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
{}

static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
				struct ib_qp_init_attr *init_attr,
				struct bnxt_re_ucontext *uctx)
{}

static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
				       struct ib_qp_init_attr *init_attr,
				       struct bnxt_re_ucontext *uctx)
{}

static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
				struct ib_qp_init_attr *init_attr)
{}

static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
				struct ib_qp_init_attr *init_attr,
				struct ib_udata *udata)
{}

static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
				     struct bnxt_re_pd *pd)
{}

static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
				 struct ib_qp_init_attr *init_attr)
{}

static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
				   struct ib_qp_init_attr *init_attr,
				   struct bnxt_qplib_dev_attr *dev_attr)
{}

int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
		      struct ib_udata *udata)
{}

static u8 __from_ib_qp_state(enum ib_qp_state state)
{}

static enum ib_qp_state __to_ib_qp_state(u8 state)
{}

static u32 __from_ib_mtu(enum ib_mtu mtu)
{}

static enum ib_mtu __to_ib_mtu(u32 mtu)
{}

/* Shared Receive Queues */
int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
{}

static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
				 struct bnxt_re_pd *pd,
				 struct bnxt_re_srq *srq,
				 struct ib_udata *udata)
{}

int bnxt_re_create_srq(struct ib_srq *ib_srq,
		       struct ib_srq_init_attr *srq_init_attr,
		       struct ib_udata *udata)
{}

int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
		       enum ib_srq_attr_mask srq_attr_mask,
		       struct ib_udata *udata)
{}

int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
{}

int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
			  const struct ib_recv_wr **bad_wr)
{}
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
				    struct bnxt_re_qp *qp1_qp,
				    int qp_attr_mask)
{}

int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
		      int qp_attr_mask, struct ib_udata *udata)
{}

int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{}

/* Routine for sending QP1 packets for RoCE V1 an V2
 */
static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
				     const struct ib_send_wr *wr,
				     struct bnxt_qplib_swqe *wqe,
				     int payload_size)
{}

/* For the MAD layer, it only provides the recv SGE the size of
 * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
 * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
 * receive packet (334 bytes) with no VLAN and then copy the GRH
 * and the MAD datagram out to the provided SGE.
 */
static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
					    const struct ib_recv_wr *wr,
					    struct bnxt_qplib_swqe *wqe,
					    int payload_size)
{}

static int is_ud_qp(struct bnxt_re_qp *qp)
{}

static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
				  const struct ib_send_wr *wr,
				  struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
				  struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
				    struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
				 struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
				 struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
				    const struct ib_send_wr *wr,
				    struct bnxt_qplib_swqe *wqe)
{}

static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
				   const struct ib_send_wr *wr,
				   struct bnxt_qplib_swqe *wqe)
{}

static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
{}

static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
				       struct bnxt_re_qp *qp,
				       const struct ib_send_wr *wr)
{}

static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
{}

int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
		      const struct ib_send_wr **bad_wr)
{}

static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
				       struct bnxt_re_qp *qp,
				       const struct ib_recv_wr *wr)
{}

int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
		      const struct ib_recv_wr **bad_wr)
{}

/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{}

int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
		      struct uverbs_attr_bundle *attrs)
{}

static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
{}

int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{}

static u8 __req_to_ib_wc_status(u8 qstatus)
{}

static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
{}

static u8 __rc_to_ib_wc_status(u8 qstatus)
{}

static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
{}

static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
				     u16 raweth_qp1_flags2)
{}

static int bnxt_re_to_ib_nw_type(int nw_type)
{}

static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
				       void *rq_hdr_buf)
{}

static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
					 struct bnxt_qplib_cqe *cqe)
{}

static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
					  struct bnxt_qplib_cqe *cqe)
{}

static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
					u16 vlan_id)
{}

static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
				u16 *vid, u8 *sl)
{}

static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
				      struct bnxt_qplib_cqe *cqe)
{}

static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
					     struct ib_wc *wc,
					     struct bnxt_qplib_cqe *cqe)
{}

static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
				      struct ib_wc *wc,
				      struct bnxt_qplib_cqe *cqe)
{}

static int send_phantom_wqe(struct bnxt_re_qp *qp)
{}

int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{}

int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
			  enum ib_cq_notify_flags ib_cqn_flags)
{}

/* Memory Regions */
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
{}

int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{}

static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
{}

int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
		      unsigned int *sg_offset)
{}

struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
			       u32 max_num_sg)
{}

struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
			       struct ib_udata *udata)
{}

int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
{}

static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
					   int mr_access_flags, struct ib_umem *umem)
{}

struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
				  u64 virt_addr, int mr_access_flags,
				  struct ib_udata *udata)
{}

struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
					 u64 length, u64 virt_addr, int fd,
					 int mr_access_flags, struct ib_udata *udata)
{}

int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{}

void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
{}

static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
{}

/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{}

void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{}

static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
{}

static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
{}

static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
				  enum rdma_remove_reason why,
			    struct uverbs_attr_bundle *attrs)
{}

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD_DESTROY();

DECLARE_UVERBS_NAMED_OBJECT();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_GLOBAL_METHODS();

/* Toggle MEM */
static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
{}

static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
				      enum rdma_remove_reason why,
				      struct uverbs_attr_bundle *attrs)
{}

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD_DESTROY();

DECLARE_UVERBS_NAMED_OBJECT();

const struct uapi_definition bnxt_re_uapi_defs[] =;