linux/drivers/infiniband/core/sa_query.c

/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
 * Copyright (c) 2006 Intel Corporation.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/init.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_cache.h>
#include <rdma/rdma_netlink.h>
#include <net/netlink.h>
#include <uapi/rdma/ib_user_sa.h>
#include <rdma/ib_marshall.h>
#include <rdma/ib_addr.h>
#include <rdma/opa_addr.h>
#include <rdma/rdma_cm.h>
#include "sa.h"
#include "core_priv.h"

#define IB_SA_LOCAL_SVC_TIMEOUT_MIN
#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT
#define IB_SA_LOCAL_SVC_TIMEOUT_MAX
#define IB_SA_CPI_MAX_RETRY_CNT
#define IB_SA_CPI_RETRY_WAIT
static int sa_local_svc_timeout_ms =;

struct ib_sa_sm_ah {};

enum rdma_class_port_info_type {};

struct rdma_class_port_info {};

struct ib_sa_classport_cache {};

struct ib_sa_port {};

struct ib_sa_device {};

struct ib_sa_query {};

#define IB_SA_ENABLE_LOCAL_SERVICE
#define IB_SA_CANCEL
#define IB_SA_QUERY_OPA

struct ib_sa_path_query {};

struct ib_sa_guidinfo_query {};

struct ib_sa_classport_info_query {};

struct ib_sa_mcmember_query {};

static LIST_HEAD(ib_nl_request_list);
static DEFINE_SPINLOCK(ib_nl_request_lock);
static atomic_t ib_nl_sa_request_seq;
static struct workqueue_struct *ib_nl_wq;
static struct delayed_work ib_nl_timed_work;
static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] =;


static int ib_sa_add_one(struct ib_device *device);
static void ib_sa_remove_one(struct ib_device *device, void *client_data);

static struct ib_client sa_client =;

static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);

static DEFINE_SPINLOCK(tid_lock);
static u32 tid;

#define PATH_REC_FIELD(field)

static const struct ib_field path_rec_table[] =;

#define OPA_PATH_REC_FIELD(field)

static const struct ib_field opa_path_rec_table[] =;

#define MCMEMBER_REC_FIELD(field)

static const struct ib_field mcmember_rec_table[] =;

#define CLASSPORTINFO_REC_FIELD(field)

static const struct ib_field ib_classport_info_rec_table[] =;

#define OPA_CLASSPORTINFO_REC_FIELD(field)

static const struct ib_field opa_classport_info_rec_table[] =;

#define GUIDINFO_REC_FIELD(field)

static const struct ib_field guidinfo_rec_table[] =;

#define RDMA_PRIMARY_PATH_MAX_REC_NUM

static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
{}

static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
{}

static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
				     struct ib_sa_query *query)
{}

static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
{}

static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{}

static int ib_nl_cancel_request(struct ib_sa_query *query)
{}

static void send_handler(struct ib_mad_agent *agent,
			 struct ib_mad_send_wc *mad_send_wc);

static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
					   const struct nlmsghdr *nlh)
{}

static void ib_nl_request_timeout(struct work_struct *work)
{}

int ib_nl_handle_set_timeout(struct sk_buff *skb,
			     struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack)
{}

static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
{}

int ib_nl_handle_resolve_resp(struct sk_buff *skb,
			      struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
{}

static void free_sm_ah(struct kref *kref)
{}

void ib_sa_register_client(struct ib_sa_client *client)
{}
EXPORT_SYMBOL();

void ib_sa_unregister_client(struct ib_sa_client *client)
{}
EXPORT_SYMBOL();

/**
 * ib_sa_cancel_query - try to cancel an SA query
 * @id:ID of query to cancel
 * @query:query pointer to cancel
 *
 * Try to cancel an SA query.  If the id and query don't match up or
 * the query has already completed, nothing is done.  Otherwise the
 * query is canceled and will complete with a status of -EINTR.
 */
void ib_sa_cancel_query(int id, struct ib_sa_query *query)
{}
EXPORT_SYMBOL();

static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
{}

static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
				   struct sa_path_rec *rec,
				   struct rdma_ah_attr *ah_attr,
				   const struct ib_gid_attr *gid_attr)
{}

/**
 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
 *   an SA path record.
 * @device: Device associated ah attributes initialization.
 * @port_num: Port on the specified device.
 * @rec: path record entry to use for ah attributes initialization.
 * @ah_attr: address handle attributes to initialization from path record.
 * @gid_attr: SGID attribute to consider during initialization.
 *
 * When ib_init_ah_attr_from_path() returns success,
 * (a) for IB link layer it optionally contains a reference to SGID attribute
 * when GRH is present for IB link layer.
 * (b) for RoCE link layer it contains a reference to SGID attribute.
 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
 * attributes which are initialized using ib_init_ah_attr_from_path().
 */
int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
			      struct sa_path_rec *rec,
			      struct rdma_ah_attr *ah_attr,
			      const struct ib_gid_attr *gid_attr)
{}
EXPORT_SYMBOL();

static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{}

static void free_mad(struct ib_sa_query *query)
{}

static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
{}

static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
		    gfp_t gfp_mask)
{}

void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
{}
EXPORT_SYMBOL();

void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
{}
EXPORT_SYMBOL();

static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
					 struct ib_sa_device *sa_dev,
					 u32 port_num)
{}

enum opa_pr_supported {};

/*
 * opa_pr_query_possible - Check if current PR query can be an OPA query.
 *
 * Retuns PR_NOT_SUPPORTED if a path record query is not
 * possible, PR_OPA_SUPPORTED if an OPA path record query
 * is possible and PR_IB_SUPPORTED if an IB path record
 * query is possible.
 */
static int opa_pr_query_possible(struct ib_sa_client *client,
				 struct ib_sa_device *sa_dev,
				 struct ib_device *device, u32 port_num)
{}

static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
				    int status, struct ib_sa_mad *mad)
{}

static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{}

/**
 * ib_sa_path_rec_get - Start a Path get query
 * @client:SA client
 * @device:device to send query on
 * @port_num: port number to send query on
 * @rec:Path Record to send in query
 * @comp_mask:component mask to send in query
 * @timeout_ms:time to wait for response
 * @gfp_mask:GFP mask to use for internal allocations
 * @callback:function called when query completes, times out or is
 * canceled
 * @context:opaque user context passed to callback
 * @sa_query:query context, used to cancel query
 *
 * Send a Path Record Get query to the SA to look up a path.  The
 * callback function will be called when the query completes (or
 * fails); status is 0 for a successful response, -EINTR if the query
 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
 * occurred sending the query.  The resp parameter of the callback is
 * only valid if status is 0.
 *
 * If the return value of ib_sa_path_rec_get() is negative, it is an
 * error code.  Otherwise it is a query ID that can be used to cancel
 * the query.
 */
int ib_sa_path_rec_get(struct ib_sa_client *client,
		       struct ib_device *device, u32 port_num,
		       struct sa_path_rec *rec,
		       ib_sa_comp_mask comp_mask,
		       unsigned long timeout_ms, gfp_t gfp_mask,
		       void (*callback)(int status,
					struct sa_path_rec *resp,
					unsigned int num_paths, void *context),
		       void *context,
		       struct ib_sa_query **sa_query)
{}
EXPORT_SYMBOL();

static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
					int status, struct ib_sa_mad *mad)
{}

static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
{}

int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
			     struct ib_device *device, u32 port_num,
			     u8 method,
			     struct ib_sa_mcmember_rec *rec,
			     ib_sa_comp_mask comp_mask,
			     unsigned long timeout_ms, gfp_t gfp_mask,
			     void (*callback)(int status,
					      struct ib_sa_mcmember_rec *resp,
					      void *context),
			     void *context,
			     struct ib_sa_query **sa_query)
{}

/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
					int status, struct ib_sa_mad *mad)
{}

static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
{}

int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
			      struct ib_device *device, u32 port_num,
			      struct ib_sa_guidinfo_rec *rec,
			      ib_sa_comp_mask comp_mask, u8 method,
			      unsigned long timeout_ms, gfp_t gfp_mask,
			      void (*callback)(int status,
					       struct ib_sa_guidinfo_rec *resp,
					       void *context),
			      void *context,
			      struct ib_sa_query **sa_query)
{}
EXPORT_SYMBOL();

struct ib_classport_info_context {};

static void ib_classportinfo_cb(void *context)
{}

static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
					      int status, struct ib_sa_mad *mad)
{}

static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
{}

static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
					  unsigned long timeout_ms,
					  void (*callback)(void *context),
					  void *context,
					  struct ib_sa_query **sa_query)
{}

static void update_ib_cpi(struct work_struct *work)
{}

static void send_handler(struct ib_mad_agent *agent,
			 struct ib_mad_send_wc *mad_send_wc)
{}

static void recv_handler(struct ib_mad_agent *mad_agent,
			 struct ib_mad_send_buf *send_buf,
			 struct ib_mad_recv_wc *mad_recv_wc)
{}

static void update_sm_ah(struct work_struct *work)
{}

static void ib_sa_event(struct ib_event_handler *handler,
			struct ib_event *event)
{}

static int ib_sa_add_one(struct ib_device *device)
{}

static void ib_sa_remove_one(struct ib_device *device, void *client_data)
{}

int ib_sa_init(void)
{}

void ib_sa_cleanup(void)
{}