linux/drivers/infiniband/core/verbs.c

/*
 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/addrconf.h>
#include <linux/security.h>

#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include <rdma/rw.h>
#include <rdma/lag.h>

#include "core_priv.h"
#include <trace/events/rdma_core.h>

static int ib_resolve_eth_dmac(struct ib_device *device,
			       struct rdma_ah_attr *ah_attr);

static const char * const ib_events[] =;

const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
{}
EXPORT_SYMBOL();

static const char * const wc_statuses[] =;

const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
{}
EXPORT_SYMBOL();

__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{}
EXPORT_SYMBOL();

__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{}
EXPORT_SYMBOL();

__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
{}
EXPORT_SYMBOL();

__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(unsigned int node_type)
{}
EXPORT_SYMBOL();

enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
					      u32 port_num)
{}
EXPORT_SYMBOL();

/* Protection domains */

/**
 * __ib_alloc_pd - Allocates an unused protection domain.
 * @device: The device on which to allocate the protection domain.
 * @flags: protection domain flags
 * @caller: caller's build-time module name
 *
 * A protection domain object provides an association between QPs, shared
 * receive queues, address handles, memory regions, and memory windows.
 *
 * Every PD has a local_dma_lkey which can be used as the lkey value for local
 * memory operations.
 */
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
		const char *caller)
{}
EXPORT_SYMBOL();

/**
 * ib_dealloc_pd_user - Deallocates a protection domain.
 * @pd: The protection domain to deallocate.
 * @udata: Valid user data or NULL for kernel object
 *
 * It is an error to call this function while any resources in the pd still
 * exist.  The caller is responsible to synchronously destroy them and
 * guarantee no new allocations will happen.
 */
int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/* Address handles */

/**
 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
 * @dest:       Pointer to destination ah_attr. Contents of the destination
 *              pointer is assumed to be invalid and attribute are overwritten.
 * @src:        Pointer to source ah_attr.
 */
void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
		       const struct rdma_ah_attr *src)
{}
EXPORT_SYMBOL();

/**
 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
 * @old:        Pointer to existing ah_attr which needs to be replaced.
 *              old is assumed to be valid or zero'd
 * @new:        Pointer to the new ah_attr.
 *
 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
 * old the ah_attr is valid; after that it copies the new attribute and holds
 * the reference to the replaced ah_attr.
 */
void rdma_replace_ah_attr(struct rdma_ah_attr *old,
			  const struct rdma_ah_attr *new)
{}
EXPORT_SYMBOL();

/**
 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
 * @dest:       Pointer to destination ah_attr to copy to.
 *              dest is assumed to be valid or zero'd
 * @src:        Pointer to the new ah_attr.
 *
 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
 * if it is valid. This also transfers ownership of internal references from
 * src to dest, making src invalid in the process. No new reference of the src
 * ah_attr is taken.
 */
void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
{}
EXPORT_SYMBOL();

/*
 * Validate that the rdma_ah_attr is valid for the device before passing it
 * off to the driver.
 */
static int rdma_check_ah_attr(struct ib_device *device,
			      struct rdma_ah_attr *ah_attr)
{}

/*
 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
 * On success the caller is responsible to call rdma_unfill_sgid_attr().
 */
static int rdma_fill_sgid_attr(struct ib_device *device,
			       struct rdma_ah_attr *ah_attr,
			       const struct ib_gid_attr **old_sgid_attr)
{}

static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
				  const struct ib_gid_attr *old_sgid_attr)
{}

static const struct ib_gid_attr *
rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
		      const struct ib_gid_attr *old_attr)
{}

static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
				     struct rdma_ah_attr *ah_attr,
				     u32 flags,
				     struct ib_udata *udata,
				     struct net_device *xmit_slave)
{}

/**
 * rdma_create_ah - Creates an address handle for the
 * given address vector.
 * @pd: The protection domain associated with the address handle.
 * @ah_attr: The attributes of the address vector.
 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
 *
 * It returns 0 on success and returns appropriate error code on error.
 * The address handle is used to reference a local or global destination
 * in all UD QP post sends.
 */
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
			     u32 flags)
{}
EXPORT_SYMBOL();

/**
 * rdma_create_user_ah - Creates an address handle for the
 * given address vector.
 * It resolves destination mac address for ah attribute of RoCE type.
 * @pd: The protection domain associated with the address handle.
 * @ah_attr: The attributes of the address vector.
 * @udata: pointer to user's input output buffer information need by
 *         provider driver.
 *
 * It returns 0 on success and returns appropriate error code on error.
 * The address handle is used to reference a local or global destination
 * in all UD QP post sends.
 */
struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
				  struct rdma_ah_attr *ah_attr,
				  struct ib_udata *udata)
{}
EXPORT_SYMBOL();

int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
{}
EXPORT_SYMBOL();

static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
						     u32 port_num,
						     const struct ib_grh *grh)
{}

struct find_gid_index_context {};

static bool find_gid_index(const union ib_gid *gid,
			   const struct ib_gid_attr *gid_attr,
			   void *context)
{}

static const struct ib_gid_attr *
get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
		       u16 vlan_id, const union ib_gid *sgid,
		       enum ib_gid_type gid_type)
{}

int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
			      enum rdma_network_type net_type,
			      union ib_gid *sgid, union ib_gid *dgid)
{}
EXPORT_SYMBOL();

/* Resolve destination mac address and hop limit for unicast destination
 * GID entry, considering the source GID entry as well.
 * ah_attribute must have valid port_num, sgid_index.
 */
static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
				       struct rdma_ah_attr *ah_attr)
{}

/*
 * This function initializes address handle attributes from the incoming packet.
 * Incoming packet has dgid of the receiver node on which this code is
 * getting executed and, sgid contains the GID of the sender.
 *
 * When resolving mac address of destination, the arrived dgid is used
 * as sgid and, sgid is used as dgid because sgid contains destinations
 * GID whom to respond to.
 *
 * On success the caller is responsible to call rdma_destroy_ah_attr on the
 * attr.
 */
int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
			    const struct ib_wc *wc, const struct ib_grh *grh,
			    struct rdma_ah_attr *ah_attr)
{}
EXPORT_SYMBOL();

/**
 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
 * of the reference
 *
 * @attr:	Pointer to AH attribute structure
 * @dgid:	Destination GID
 * @flow_label:	Flow label
 * @hop_limit:	Hop limit
 * @traffic_class: traffic class
 * @sgid_attr:	Pointer to SGID attribute
 *
 * This takes ownership of the sgid_attr reference. The caller must ensure
 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
 * calling this function.
 */
void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
			     u32 flow_label, u8 hop_limit, u8 traffic_class,
			     const struct ib_gid_attr *sgid_attr)
{}
EXPORT_SYMBOL();

/**
 * rdma_destroy_ah_attr - Release reference to SGID attribute of
 * ah attribute.
 * @ah_attr: Pointer to ah attribute
 *
 * Release reference to the SGID attribute of the ah attribute if it is
 * non NULL. It is safe to call this multiple times, and safe to call it on
 * a zero initialized ah_attr.
 */
void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
{}
EXPORT_SYMBOL();

struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
				   const struct ib_grh *grh, u32 port_num)
{}
EXPORT_SYMBOL();

int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{}
EXPORT_SYMBOL();

int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{}
EXPORT_SYMBOL();

int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/* Shared receive queues */

/**
 * ib_create_srq_user - Creates a SRQ associated with the specified protection
 *   domain.
 * @pd: The protection domain associated with the SRQ.
 * @srq_init_attr: A list of initial attributes required to create the
 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
 *   the actual capabilities of the created SRQ.
 * @uobject: uobject pointer if this is not a kernel SRQ
 * @udata: udata pointer if this is not a kernel SRQ
 *
 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
 * requested size of the SRQ, and set to the actual values allocated
 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
 * will always be at least as large as the requested values.
 */
struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
				  struct ib_srq_init_attr *srq_init_attr,
				  struct ib_usrq_object *uobject,
				  struct ib_udata *udata)
{}
EXPORT_SYMBOL();

int ib_modify_srq(struct ib_srq *srq,
		  struct ib_srq_attr *srq_attr,
		  enum ib_srq_attr_mask srq_attr_mask)
{}
EXPORT_SYMBOL();

int ib_query_srq(struct ib_srq *srq,
		 struct ib_srq_attr *srq_attr)
{}
EXPORT_SYMBOL();

int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/* Queue pairs */

static void __ib_qp_event_handler(struct ib_event *event, void *context)
{}

static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
{}

static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
				  void (*event_handler)(struct ib_event *, void *),
				  void *qp_context)
{}

struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
			 struct ib_qp_open_attr *qp_open_attr)
{}
EXPORT_SYMBOL();

static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
					struct ib_qp_init_attr *qp_init_attr)
{}

static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
			       struct ib_qp_init_attr *attr,
			       struct ib_udata *udata,
			       struct ib_uqp_object *uobj, const char *caller)
{}

/**
 * ib_create_qp_user - Creates a QP associated with the specified protection
 *   domain.
 * @dev: IB device
 * @pd: The protection domain associated with the QP.
 * @attr: A list of initial attributes required to create the
 *   QP.  If QP creation succeeds, then the attributes are updated to
 *   the actual capabilities of the created QP.
 * @udata: User data
 * @uobj: uverbs obect
 * @caller: caller's build-time module name
 */
struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
				struct ib_qp_init_attr *attr,
				struct ib_udata *udata,
				struct ib_uqp_object *uobj, const char *caller)
{}
EXPORT_SYMBOL();

void ib_qp_usecnt_inc(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

void ib_qp_usecnt_dec(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
				  struct ib_qp_init_attr *qp_init_attr,
				  const char *caller)
{}
EXPORT_SYMBOL();

static const struct {} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] =;

bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
			enum ib_qp_type type, enum ib_qp_attr_mask mask)
{}
EXPORT_SYMBOL();

/**
 * ib_resolve_eth_dmac - Resolve destination mac address
 * @device:		Device to consider
 * @ah_attr:		address handle attribute which describes the
 *			source and destination parameters
 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
 * returns 0 on success or appropriate error code. It initializes the
 * necessary ah_attr fields when call is successful.
 */
static int ib_resolve_eth_dmac(struct ib_device *device,
			       struct rdma_ah_attr *ah_attr)
{}

static bool is_qp_type_connected(const struct ib_qp *qp)
{}

/*
 * IB core internal function to perform QP attributes modification.
 */
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
			 int attr_mask, struct ib_udata *udata)
{}

/**
 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
 * @ib_qp: The QP to modify.
 * @attr: On input, specifies the QP attributes to modify.  On output,
 *   the current values of selected QP attributes are returned.
 * @attr_mask: A bit-mask used to specify which attributes of the QP
 *   are being modified.
 * @udata: pointer to user's input output buffer information
 *   are being modified.
 * It returns 0 on success and returns appropriate error code on error.
 */
int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
			    int attr_mask, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
				   u16 *speed, u8 *width)
{}

int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
{}
EXPORT_SYMBOL();

int ib_modify_qp(struct ib_qp *qp,
		 struct ib_qp_attr *qp_attr,
		 int qp_attr_mask)
{}
EXPORT_SYMBOL();

int ib_query_qp(struct ib_qp *qp,
		struct ib_qp_attr *qp_attr,
		int qp_attr_mask,
		struct ib_qp_init_attr *qp_init_attr)
{}
EXPORT_SYMBOL();

int ib_close_qp(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

static int __ib_destroy_shared_qp(struct ib_qp *qp)
{}

int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/* Completion queues */

struct ib_cq *__ib_create_cq(struct ib_device *device,
			     ib_comp_handler comp_handler,
			     void (*event_handler)(struct ib_event *, void *),
			     void *cq_context,
			     const struct ib_cq_init_attr *cq_attr,
			     const char *caller)
{}
EXPORT_SYMBOL();

int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{}
EXPORT_SYMBOL();

int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

int ib_resize_cq(struct ib_cq *cq, int cqe)
{}
EXPORT_SYMBOL();

/* Memory regions */

struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
			     u64 virt_addr, int access_flags)
{}
EXPORT_SYMBOL();

int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
		 u32 flags, struct ib_sge *sg_list, u32 num_sge)
{}
EXPORT_SYMBOL();

int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/**
 * ib_alloc_mr() - Allocates a memory region
 * @pd:            protection domain associated with the region
 * @mr_type:       memory region type
 * @max_num_sg:    maximum sg entries available for registration.
 *
 * Notes:
 * Memory registeration page/sg lists must not exceed max_num_sg.
 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
 * max_num_sg * used_page_size.
 *
 */
struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
			  u32 max_num_sg)
{}
EXPORT_SYMBOL();

/**
 * ib_alloc_mr_integrity() - Allocates an integrity memory region
 * @pd:                      protection domain associated with the region
 * @max_num_data_sg:         maximum data sg entries available for registration
 * @max_num_meta_sg:         maximum metadata sg entries available for
 *                           registration
 *
 * Notes:
 * Memory registration page/sg lists must not exceed max_num_sg,
 * also the integrity page/sg lists must not exceed max_num_meta_sg.
 *
 */
struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
				    u32 max_num_data_sg,
				    u32 max_num_meta_sg)
{}
EXPORT_SYMBOL();

/* Multicast groups */

static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
{}

int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{}
EXPORT_SYMBOL();

int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{}
EXPORT_SYMBOL();

/**
 * ib_alloc_xrcd_user - Allocates an XRC domain.
 * @device: The device on which to allocate the XRC domain.
 * @inode: inode to connect XRCD
 * @udata: Valid user data or NULL for kernel object
 */
struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
				   struct inode *inode, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/**
 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
 * @xrcd: The XRC domain to deallocate.
 * @udata: Valid user data or NULL for kernel object
 */
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

/**
 * ib_create_wq - Creates a WQ associated with the specified protection
 * domain.
 * @pd: The protection domain associated with the WQ.
 * @wq_attr: A list of initial attributes required to create the
 * WQ. If WQ creation succeeds, then the attributes are updated to
 * the actual capabilities of the created WQ.
 *
 * wq_attr->max_wr and wq_attr->max_sge determine
 * the requested size of the WQ, and set to the actual values allocated
 * on return.
 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
 * at least as large as the requested values.
 */
struct ib_wq *ib_create_wq(struct ib_pd *pd,
			   struct ib_wq_init_attr *wq_attr)
{}
EXPORT_SYMBOL();

/**
 * ib_destroy_wq_user - Destroys the specified user WQ.
 * @wq: The WQ to destroy.
 * @udata: Valid user data
 */
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
{}
EXPORT_SYMBOL();

int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
		       struct ib_mr_status *mr_status)
{}
EXPORT_SYMBOL();

int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
			 int state)
{}
EXPORT_SYMBOL();

int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
		     struct ifla_vf_info *info)
{}
EXPORT_SYMBOL();

int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
		    struct ifla_vf_stats *stats)
{}
EXPORT_SYMBOL();

int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
		   int type)
{}
EXPORT_SYMBOL();

int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
		   struct ifla_vf_guid *node_guid,
		   struct ifla_vf_guid *port_guid)
{}
EXPORT_SYMBOL();
/**
 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
 *     information) and set an appropriate memory region for registration.
 * @mr:             memory region
 * @data_sg:        dma mapped scatterlist for data
 * @data_sg_nents:  number of entries in data_sg
 * @data_sg_offset: offset in bytes into data_sg
 * @meta_sg:        dma mapped scatterlist for metadata
 * @meta_sg_nents:  number of entries in meta_sg
 * @meta_sg_offset: offset in bytes into meta_sg
 * @page_size:      page vector desired page size
 *
 * Constraints:
 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
 *
 * Return: 0 on success.
 *
 * After this completes successfully, the  memory region
 * is ready for registration.
 */
int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
		    int data_sg_nents, unsigned int *data_sg_offset,
		    struct scatterlist *meta_sg, int meta_sg_nents,
		    unsigned int *meta_sg_offset, unsigned int page_size)
{}
EXPORT_SYMBOL();

/**
 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
 *     and set it the memory region.
 * @mr:            memory region
 * @sg:            dma mapped scatterlist
 * @sg_nents:      number of entries in sg
 * @sg_offset:     offset in bytes into sg
 * @page_size:     page vector desired page size
 *
 * Constraints:
 *
 * - The first sg element is allowed to have an offset.
 * - Each sg element must either be aligned to page_size or virtually
 *   contiguous to the previous element. In case an sg element has a
 *   non-contiguous offset, the mapping prefix will not include it.
 * - The last sg element is allowed to have length less than page_size.
 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
 *   then only max_num_sg entries will be mapped.
 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
 *   constraints holds and the page_size argument is ignored.
 *
 * Returns the number of sg elements that were mapped to the memory region.
 *
 * After this completes successfully, the  memory region
 * is ready for registration.
 */
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
		 unsigned int *sg_offset, unsigned int page_size)
{}
EXPORT_SYMBOL();

/**
 * ib_sg_to_pages() - Convert the largest prefix of a sg list
 *     to a page vector
 * @mr:            memory region
 * @sgl:           dma mapped scatterlist
 * @sg_nents:      number of entries in sg
 * @sg_offset_p:   ==== =======================================================
 *                 IN   start offset in bytes into sg
 *                 OUT  offset in bytes for element n of the sg of the first
 *                      byte that has not been processed where n is the return
 *                      value of this function.
 *                 ==== =======================================================
 * @set_page:      driver page assignment function pointer
 *
 * Core service helper for drivers to convert the largest
 * prefix of given sg list to a page vector. The sg list
 * prefix converted is the prefix that meet the requirements
 * of ib_map_mr_sg.
 *
 * Returns the number of sg elements that were assigned to
 * a page vector.
 */
int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
{}
EXPORT_SYMBOL();

struct ib_drain_cqe {};

static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
{}

/*
 * Post a WR and block until its completion is reaped for the SQ.
 */
static void __ib_drain_sq(struct ib_qp *qp)
{}

/*
 * Post a WR and block until its completion is reaped for the RQ.
 */
static void __ib_drain_rq(struct ib_qp *qp)
{}

/*
 * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
 *                    expires.
 * @qp:               queue pair associated with SRQ to drain
 *
 * Quoting 10.3.1 Queue Pair and EE Context States:
 *
 * Note, for QPs that are associated with an SRQ, the Consumer should take the
 * QP through the Error State before invoking a Destroy QP or a Modify QP to the
 * Reset State.  The Consumer may invoke the Destroy QP without first performing
 * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
 * Last WQE Reached Event. However, if the Consumer does not wait for the
 * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
 * leakage may occur. Therefore, it is good programming practice to tear down a
 * QP that is associated with an SRQ by using the following process:
 *
 * - Put the QP in the Error State
 * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
 * - either:
 *       drain the CQ by invoking the Poll CQ verb and either wait for CQ
 *       to be empty or the number of Poll CQ operations has exceeded
 *       CQ capacity size;
 * - or
 *       post another WR that completes on the same CQ and wait for this
 *       WR to return as a WC;
 * - and then invoke a Destroy QP or Reset QP.
 *
 * We use the first option.
 */
static void __ib_drain_srq(struct ib_qp *qp)
{}

/**
 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
 *		   application.
 * @qp:            queue pair to drain
 *
 * If the device has a provider-specific drain function, then
 * call that.  Otherwise call the generic drain function
 * __ib_drain_sq().
 *
 * The caller must:
 *
 * ensure there is room in the CQ and SQ for the drain work request and
 * completion.
 *
 * allocate the CQ using ib_alloc_cq().
 *
 * ensure that there are no other contexts that are posting WRs concurrently.
 * Otherwise the drain is not guaranteed.
 */
void ib_drain_sq(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

/**
 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
 *		   application.
 * @qp:            queue pair to drain
 *
 * If the device has a provider-specific drain function, then
 * call that.  Otherwise call the generic drain function
 * __ib_drain_rq().
 *
 * The caller must:
 *
 * ensure there is room in the CQ and RQ for the drain work request and
 * completion.
 *
 * allocate the CQ using ib_alloc_cq().
 *
 * ensure that there are no other contexts that are posting WRs concurrently.
 * Otherwise the drain is not guaranteed.
 */
void ib_drain_rq(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

/**
 * ib_drain_qp() - Block until all CQEs have been consumed by the
 *		   application on both the RQ and SQ.
 * @qp:            queue pair to drain
 *
 * The caller must:
 *
 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
 * and completions.
 *
 * allocate the CQs using ib_alloc_cq().
 *
 * ensure that there are no other contexts that are posting WRs concurrently.
 * Otherwise the drain is not guaranteed.
 */
void ib_drain_qp(struct ib_qp *qp)
{}
EXPORT_SYMBOL();

struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
				     enum rdma_netdev_t type, const char *name,
				     unsigned char name_assign_type,
				     void (*setup)(struct net_device *))
{}
EXPORT_SYMBOL();

int rdma_init_netdev(struct ib_device *device, u32 port_num,
		     enum rdma_netdev_t type, const char *name,
		     unsigned char name_assign_type,
		     void (*setup)(struct net_device *),
		     struct net_device *netdev)
{}
EXPORT_SYMBOL();

void __rdma_block_iter_start(struct ib_block_iter *biter,
			     struct scatterlist *sglist, unsigned int nents,
			     unsigned long pgsz)
{}
EXPORT_SYMBOL();

bool __rdma_block_iter_next(struct ib_block_iter *biter)
{}
EXPORT_SYMBOL();

/**
 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 *   for the drivers.
 * @descs: array of static descriptors
 * @num_counters: number of elements in array
 * @lifespan: milliseconds between updates
 */
struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
	const struct rdma_stat_desc *descs, int num_counters,
	unsigned long lifespan)
{}
EXPORT_SYMBOL();

/**
 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
 * @stats: statistics to release
 */
void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
{}
EXPORT_SYMBOL();