linux/drivers/infiniband/core/cma.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
 */

#include <linux/completion.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/igmp.h>
#include <linux/xarray.h>
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/route.h>

#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip_fib.h>
#include <net/ip6_route.h>

#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_sa.h>
#include <rdma/iw_cm.h>

#include "core_priv.h"
#include "cma_priv.h"
#include "cma_trace.h"

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();

#define CMA_CM_RESPONSE_TIMEOUT
#define CMA_MAX_CM_RETRIES
#define CMA_CM_MRA_SETTING
#define CMA_IBOE_PACKET_LIFETIME
#define CMA_PREFERRED_ROCE_GID_TYPE

static const char * const cma_events[] =;

static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
			      enum ib_gid_type gid_type);

const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{}
EXPORT_SYMBOL();

const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
						int reason)
{}
EXPORT_SYMBOL();

/**
 * rdma_is_consumer_reject - return true if the consumer rejected the connect
 *                           request.
 * @id: Communication identifier that received the REJECT event.
 * @reason: Value returned in the REJECT event status field.
 */
static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
{}

const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
				      struct rdma_cm_event *ev, u8 *data_len)
{}
EXPORT_SYMBOL();

/**
 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
 * @id: Communication Identifier
 */
struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

/**
 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
 * @res: rdma resource tracking entry pointer
 */
struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
{}
EXPORT_SYMBOL();

static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);

static struct ib_client cma_client =;

static struct ib_sa_client sa_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
static struct rb_root id_table =;
/* Serialize operations of id_table tree */
static DEFINE_SPINLOCK(id_table_lock);
static struct workqueue_struct *cma_wq;
static unsigned int cma_pernet_id;

struct cma_pernet {};

static struct cma_pernet *cma_pernet(struct net *net)
{}

static
struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
{}

struct id_table_entry {};

struct cma_device {};

struct rdma_bind_list {};

static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
			struct rdma_bind_list *bind_list, int snum)
{}

static struct rdma_bind_list *cma_ps_find(struct net *net,
					  enum rdma_ucm_port_space ps, int snum)
{}

static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
			  int snum)
{}

enum {};

void cma_dev_get(struct cma_device *cma_dev)
{}

void cma_dev_put(struct cma_device *cma_dev)
{}

struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
					     void		*cookie)
{}

int cma_get_default_gid_type(struct cma_device *cma_dev,
			     u32 port)
{}

int cma_set_default_gid_type(struct cma_device *cma_dev,
			     u32 port,
			     enum ib_gid_type default_gid_type)
{}

int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
{}

int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
			     u8 default_roce_tos)
{}
struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
{}

/*
 * Device removal can occur at anytime, so we need extra handling to
 * serialize notifying the user of device removal with other callbacks.
 * We do this by disabling removal notification while a callback is in process,
 * and reporting it after the callback completes.
 */

struct cma_multicast {};

struct cma_work {};

cma_ip_addr;

struct cma_hdr {};

#define CMA_VERSION

struct cma_req_info {};

static int cma_comp_exch(struct rdma_id_private *id_priv,
			 enum rdma_cm_state comp, enum rdma_cm_state exch)
{}

static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
{}

static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
{}

static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
{}

static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
{}

static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
{}

static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
				 struct id_table_entry *entry_b)
{}

static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
{}

static struct id_table_entry *
node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
{}

static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
{}

static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
			       struct cma_device *cma_dev)
{}

static void cma_attach_to_dev(struct rdma_id_private *id_priv,
			      struct cma_device *cma_dev)
{}

static void cma_release_dev(struct rdma_id_private *id_priv)
{}

static inline unsigned short cma_family(struct rdma_id_private *id_priv)
{}

static int cma_set_default_qkey(struct rdma_id_private *id_priv)
{}

static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
{}

static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{}

static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
{}

static const struct ib_gid_attr *
cma_validate_port(struct ib_device *device, u32 port,
		  enum ib_gid_type gid_type,
		  union ib_gid *gid,
		  struct rdma_id_private *id_priv)
{}

static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
			       const struct ib_gid_attr *sgid_attr)
{}

/**
 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
 * based on source ip address.
 * @id_priv:	cm_id which should be bound to cma device
 *
 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
 * based on source IP address. It returns 0 on success or error code otherwise.
 * It is applicable to active and passive side cm_id.
 */
static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
{}

/**
 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
 * @id_priv:		cm id to bind to cma device
 * @listen_id_priv:	listener cm id to match against
 * @req:		Pointer to req structure containaining incoming
 *			request information
 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
 * rdma device matches for listen_id and incoming request. It also verifies
 * that a GID table entry is present for the source address.
 * Returns 0 on success, or returns error code otherwise.
 */
static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
			      const struct rdma_id_private *listen_id_priv,
			      struct cma_req_info *req)
{}

static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
			      const struct rdma_id_private *listen_id_priv)
{}

/*
 * Select the source IB device and address to reach the destination IB address.
 */
static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
{}

static void cma_id_get(struct rdma_id_private *id_priv)
{}

static void cma_id_put(struct rdma_id_private *id_priv)
{}

static struct rdma_id_private *
__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
		 void *context, enum rdma_ucm_port_space ps,
		 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
{}

struct rdma_cm_id *
__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
			void *context, enum rdma_ucm_port_space ps,
			enum ib_qp_type qp_type, const char *caller)
{}
EXPORT_SYMBOL();

struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
				       void *context,
				       enum rdma_ucm_port_space ps,
				       enum ib_qp_type qp_type)
{}
EXPORT_SYMBOL();

static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{}

static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{}

int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
		   struct ib_qp_init_attr *qp_init_attr)
{}
EXPORT_SYMBOL();

void rdma_destroy_qp(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
			     struct rdma_conn_param *conn_param)
{}

static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
			     struct rdma_conn_param *conn_param)
{}

static int cma_modify_qp_err(struct rdma_id_private *id_priv)
{}

static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
			       struct ib_qp_attr *qp_attr, int *qp_attr_mask)
{}

int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
		       int *qp_attr_mask)
{}
EXPORT_SYMBOL();

static inline bool cma_zero_addr(const struct sockaddr *addr)
{}

static inline bool cma_loopback_addr(const struct sockaddr *addr)
{}

static inline bool cma_any_addr(const struct sockaddr *addr)
{}

static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
{}

static __be16 cma_port(const struct sockaddr *addr)
{}

static inline int cma_any_port(const struct sockaddr *addr)
{}

static void cma_save_ib_info(struct sockaddr *src_addr,
			     struct sockaddr *dst_addr,
			     const struct rdma_cm_id *listen_id,
			     const struct sa_path_rec *path)
{}

static void cma_save_ip4_info(struct sockaddr_in *src_addr,
			      struct sockaddr_in *dst_addr,
			      struct cma_hdr *hdr,
			      __be16 local_port)
{}

static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
			      struct sockaddr_in6 *dst_addr,
			      struct cma_hdr *hdr,
			      __be16 local_port)
{}

static u16 cma_port_from_service_id(__be64 service_id)
{}

static int cma_save_ip_info(struct sockaddr *src_addr,
			    struct sockaddr *dst_addr,
			    const struct ib_cm_event *ib_event,
			    __be64 service_id)
{}

static int cma_save_net_info(struct sockaddr *src_addr,
			     struct sockaddr *dst_addr,
			     const struct rdma_cm_id *listen_id,
			     const struct ib_cm_event *ib_event,
			     sa_family_t sa_family, __be64 service_id)
{}

static int cma_save_req_info(const struct ib_cm_event *ib_event,
			     struct cma_req_info *req)
{}

static bool validate_ipv4_net_dev(struct net_device *net_dev,
				  const struct sockaddr_in *dst_addr,
				  const struct sockaddr_in *src_addr)
{}

static bool validate_ipv6_net_dev(struct net_device *net_dev,
				  const struct sockaddr_in6 *dst_addr,
				  const struct sockaddr_in6 *src_addr)
{}

static bool validate_net_dev(struct net_device *net_dev,
			     const struct sockaddr *daddr,
			     const struct sockaddr *saddr)
{}

static struct net_device *
roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
{}

static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
					  struct cma_req_info *req)
{}

static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
{}

static bool cma_match_private_data(struct rdma_id_private *id_priv,
				   const struct cma_hdr *hdr)
{}

static bool cma_protocol_roce(const struct rdma_cm_id *id)
{}

static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
{}

static bool cma_match_net_dev(const struct rdma_cm_id *id,
			      const struct net_device *net_dev,
			      const struct cma_req_info *req)
{}

static struct rdma_id_private *cma_find_listener(
		const struct rdma_bind_list *bind_list,
		const struct ib_cm_id *cm_id,
		const struct ib_cm_event *ib_event,
		const struct cma_req_info *req,
		const struct net_device *net_dev)
{}

static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id *cm_id,
		     const struct ib_cm_event *ib_event,
		     struct cma_req_info *req,
		     struct net_device **net_dev)
{}

static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
{}

static void cma_cancel_route(struct rdma_id_private *id_priv)
{}

static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{}

static void cma_cancel_listens(struct rdma_id_private *id_priv)
{}

static void cma_cancel_operation(struct rdma_id_private *id_priv,
				 enum rdma_cm_state state)
{}

static void cma_release_port(struct rdma_id_private *id_priv)
{}

static void destroy_mc(struct rdma_id_private *id_priv,
		       struct cma_multicast *mc)
{}

static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
{}

static void _destroy_id(struct rdma_id_private *id_priv,
			enum rdma_cm_state state)
{}

/*
 * destroy an ID from within the handler_mutex. This ensures that no other
 * handlers can start running concurrently.
 */
static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
	__releases(&idprv->handler_mutex)
{}

void rdma_destroy_id(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

static int cma_rep_recv(struct rdma_id_private *id_priv)
{}

static void cma_set_rep_event_data(struct rdma_cm_event *event,
				   const struct ib_cm_rep_event_param *rep_data,
				   void *private_data)
{}

static int cma_cm_event_handler(struct rdma_id_private *id_priv,
				struct rdma_cm_event *event)
{}

static int cma_ib_handler(struct ib_cm_id *cm_id,
			  const struct ib_cm_event *ib_event)
{}

static struct rdma_id_private *
cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
		   const struct ib_cm_event *ib_event,
		   struct net_device *net_dev)
{}

static struct rdma_id_private *
cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
		  const struct ib_cm_event *ib_event,
		  struct net_device *net_dev)
{}

static void cma_set_req_event_data(struct rdma_cm_event *event,
				   const struct ib_cm_req_event_param *req_data,
				   void *private_data, int offset)
{}

static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
				    const struct ib_cm_event *ib_event)
{}

static int cma_ib_req_handler(struct ib_cm_id *cm_id,
			      const struct ib_cm_event *ib_event)
{}

__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
{}
EXPORT_SYMBOL();

void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
		    union ib_gid *dgid)
{}
EXPORT_SYMBOL();

static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{}

static int iw_conn_req_handler(struct iw_cm_id *cm_id,
			       struct iw_cm_event *iw_event)
{}

static int cma_ib_listen(struct rdma_id_private *id_priv)
{}

static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{}

static int cma_listen_handler(struct rdma_cm_id *id,
			      struct rdma_cm_event *event)
{}

static int cma_listen_on_dev(struct rdma_id_private *id_priv,
			     struct cma_device *cma_dev,
			     struct rdma_id_private **to_destroy)
{}

static int cma_listen_on_all(struct rdma_id_private *id_priv)
{}

void rdma_set_service_type(struct rdma_cm_id *id, int tos)
{}
EXPORT_SYMBOL();

/**
 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
 *                          with a connection identifier.
 * @id: Communication identifier to associated with service type.
 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
 *
 * This function should be called before rdma_connect() on active side,
 * and on passive side before rdma_accept(). It is applicable to primary
 * path only. The timeout will affect the local side of the QP, it is not
 * negotiated with remote side and zero disables the timer. In case it is
 * set before rdma_resolve_route, the value will also be used to determine
 * PacketLifeTime for RoCE.
 *
 * Return: 0 for success
 */
int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
{}
EXPORT_SYMBOL();

/**
 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
 *			      QP associated with a connection identifier.
 * @id: Communication identifier to associated with service type.
 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
 *		   Timer Field" in the IBTA specification.
 *
 * This function should be called before rdma_connect() on active
 * side, and on passive side before rdma_accept(). The timer value
 * will be associated with the local QP. When it receives a send it is
 * not read to handle, typically if the receive queue is empty, an RNR
 * Retry NAK is returned to the requester with the min_rnr_timer
 * encoded. The requester will then wait at least the time specified
 * in the NAK before retrying. The default is zero, which translates
 * to a minimum RNR Timer value of 655 ms.
 *
 * Return: 0 for success
 */
int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
{}
EXPORT_SYMBOL();

static int route_set_path_rec_inbound(struct cma_work *work,
				      struct sa_path_rec *path_rec)
{}

static int route_set_path_rec_outbound(struct cma_work *work,
				       struct sa_path_rec *path_rec)
{}

static void cma_query_handler(int status, struct sa_path_rec *path_rec,
			      unsigned int num_prs, void *context)
{}

static int cma_query_ib_route(struct rdma_id_private *id_priv,
			      unsigned long timeout_ms, struct cma_work *work)
{}

static void cma_iboe_join_work_handler(struct work_struct *work)
{}

static void cma_work_handler(struct work_struct *_work)
{}

static void cma_init_resolve_route_work(struct cma_work *work,
					struct rdma_id_private *id_priv)
{}

static void enqueue_resolve_addr_work(struct cma_work *work,
				      struct rdma_id_private *id_priv)
{}

static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
				unsigned long timeout_ms)
{}

static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
					   unsigned long supported_gids,
					   enum ib_gid_type default_gid)
{}

/*
 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
 * path record type based on GID type.
 * It also sets up other L2 fields which includes destination mac address
 * netdev ifindex, of the path record.
 * It returns the netdev of the bound interface for this path record entry.
 */
static struct net_device *
cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
{}

int rdma_set_ib_path(struct rdma_cm_id *id,
		     struct sa_path_rec *path_rec)
{}
EXPORT_SYMBOL();

static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
{}

static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{}

struct iboe_prio_tc_map {};

static int get_lower_vlan_dev_tc(struct net_device *dev,
				 struct netdev_nested_priv *priv)
{}

static int iboe_tos_to_sl(struct net_device *ndev, int tos)
{}

static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
{}

static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{}

int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{}
EXPORT_SYMBOL();

static void cma_set_loopback(struct sockaddr *addr)
{}

static int cma_bind_loopback(struct rdma_id_private *id_priv)
{}

static void addr_handler(int status, struct sockaddr *src_addr,
			 struct rdma_dev_addr *dev_addr, void *context)
{}

static int cma_resolve_loopback(struct rdma_id_private *id_priv)
{}

static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
{}

int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{}
EXPORT_SYMBOL();

int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
{}
EXPORT_SYMBOL();

static void cma_bind_port(struct rdma_bind_list *bind_list,
			  struct rdma_id_private *id_priv)
{}

static int cma_alloc_port(enum rdma_ucm_port_space ps,
			  struct rdma_id_private *id_priv, unsigned short snum)
{}

static int cma_port_is_unique(struct rdma_bind_list *bind_list,
			      struct rdma_id_private *id_priv)
{}

static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
			      struct rdma_id_private *id_priv)
{}

/*
 * Check that the requested port is available.  This is called when trying to
 * bind to a specific port, or when trying to listen on a bound port.  In
 * the latter case, the provided id_priv may already be on the bind_list, but
 * we still need to check that it's okay to start listening.
 */
static int cma_check_port(struct rdma_bind_list *bind_list,
			  struct rdma_id_private *id_priv, uint8_t reuseaddr)
{}

static int cma_use_port(enum rdma_ucm_port_space ps,
			struct rdma_id_private *id_priv)
{}

static enum rdma_ucm_port_space
cma_select_inet_ps(struct rdma_id_private *id_priv)
{}

static enum rdma_ucm_port_space
cma_select_ib_ps(struct rdma_id_private *id_priv)
{}

static int cma_get_port(struct rdma_id_private *id_priv)
{}

static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
			       struct sockaddr *addr)
{}

int rdma_listen(struct rdma_cm_id *id, int backlog)
{}
EXPORT_SYMBOL();

static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
			      struct sockaddr *addr, const struct sockaddr *daddr)
{}

static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
			 const struct sockaddr *dst_addr)
{}

/*
 * If required, resolve the source address for bind and leave the id_priv in
 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
 * ignored.
 */
static int resolve_prepare_src(struct rdma_id_private *id_priv,
			       struct sockaddr *src_addr,
			       const struct sockaddr *dst_addr)
{}

int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
{}
EXPORT_SYMBOL();

int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{}
EXPORT_SYMBOL();

static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
{}

static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
				const struct ib_cm_event *ib_event)
{}

static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
			      struct rdma_conn_param *conn_param)
{}

static int cma_connect_ib(struct rdma_id_private *id_priv,
			  struct rdma_conn_param *conn_param)
{}

static int cma_connect_iw(struct rdma_id_private *id_priv,
			  struct rdma_conn_param *conn_param)
{}

/**
 * rdma_connect_locked - Initiate an active connection request.
 * @id: Connection identifier to connect.
 * @conn_param: Connection information used for connected QPs.
 *
 * Same as rdma_connect() but can only be called from the
 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
 */
int rdma_connect_locked(struct rdma_cm_id *id,
			struct rdma_conn_param *conn_param)
{}
EXPORT_SYMBOL();

/**
 * rdma_connect - Initiate an active connection request.
 * @id: Connection identifier to connect.
 * @conn_param: Connection information used for connected QPs.
 *
 * Users must have resolved a route for the rdma_cm_id to connect with by having
 * called rdma_resolve_route before calling this routine.
 *
 * This call will either connect to a remote QP or obtain remote QP information
 * for unconnected rdma_cm_id's.  The actual operation is based on the
 * rdma_cm_id's port space.
 */
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{}
EXPORT_SYMBOL();

/**
 * rdma_connect_ece - Initiate an active connection request with ECE data.
 * @id: Connection identifier to connect.
 * @conn_param: Connection information used for connected QPs.
 * @ece: ECE parameters
 *
 * See rdma_connect() explanation.
 */
int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
		     struct rdma_ucm_ece *ece)
{}
EXPORT_SYMBOL();

static int cma_accept_ib(struct rdma_id_private *id_priv,
			 struct rdma_conn_param *conn_param)
{}

static int cma_accept_iw(struct rdma_id_private *id_priv,
		  struct rdma_conn_param *conn_param)
{}

static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
			     enum ib_cm_sidr_status status, u32 qkey,
			     const void *private_data, int private_data_len)
{}

/**
 * rdma_accept - Called to accept a connection request or response.
 * @id: Connection identifier associated with the request.
 * @conn_param: Information needed to establish the connection.  This must be
 *   provided if accepting a connection request.  If accepting a connection
 *   response, this parameter must be NULL.
 *
 * Typically, this routine is only called by the listener to accept a connection
 * request.  It must also be called on the active side of a connection if the
 * user is performing their own QP transitions.
 *
 * In the case of error, a reject message is sent to the remote side and the
 * state of the qp associated with the id is modified to error, such that any
 * previously posted receive buffers would be flushed.
 *
 * This function is for use by kernel ULPs and must be called from under the
 * handler callback.
 */
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{}
EXPORT_SYMBOL();

int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
		    struct rdma_ucm_ece *ece)
{}
EXPORT_SYMBOL();

void rdma_lock_handler(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

void rdma_unlock_handler(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{}
EXPORT_SYMBOL();

int rdma_reject(struct rdma_cm_id *id, const void *private_data,
		u8 private_data_len, u8 reason)
{}
EXPORT_SYMBOL();

int rdma_disconnect(struct rdma_cm_id *id)
{}
EXPORT_SYMBOL();

static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
			      struct ib_sa_multicast *multicast,
			      struct rdma_cm_event *event,
			      struct cma_multicast *mc)
{}

static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
{}

static void cma_set_mgid(struct rdma_id_private *id_priv,
			 struct sockaddr *addr, union ib_gid *mgid)
{}

static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
				 struct cma_multicast *mc)
{}

static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
			      enum ib_gid_type gid_type)
{}

static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
				   struct cma_multicast *mc)
{}

int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
			u8 join_state, void *context)
{}
EXPORT_SYMBOL();

void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
{}
EXPORT_SYMBOL();

static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
{}

static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
			       void *ptr)
{}

static void cma_netevent_work_handler(struct work_struct *_work)
{}

static int cma_netevent_callback(struct notifier_block *self,
				 unsigned long event, void *ctx)
{}

static struct notifier_block cma_nb =;

static struct notifier_block cma_netevent_cb =;

static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
{}

static void cma_process_remove(struct cma_device *cma_dev)
{}

static bool cma_supported(struct ib_device *device)
{}

static int cma_add_one(struct ib_device *device)
{}

static void cma_remove_one(struct ib_device *device, void *client_data)
{}

static int cma_init_net(struct net *net)
{}

static void cma_exit_net(struct net *net)
{}

static struct pernet_operations cma_pernet_operations =;

static int __init cma_init(void)
{}

static void __exit cma_cleanup(void)
{}

module_init();
module_exit(cma_cleanup);