linux/drivers/infiniband/core/mad.c

/*
 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
 * Copyright (c) 2009 HNR Consulting. All rights reserved.
 * Copyright (c) 2014,2018 Intel Corporation.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

#define pr_fmt(fmt)

#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/xarray.h>
#include <rdma/ib_cache.h>

#include "mad_priv.h"
#include "core_priv.h"
#include "mad_rmpp.h"
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"

#define CREATE_TRACE_POINTS
#include <trace/events/ib_mad.h>

#ifdef CONFIG_TRACEPOINTS
static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
			  struct ib_mad_qp_info *qp_info,
			  struct trace_event_raw_ib_mad_send_template *entry)
{}
#endif

static int mad_sendq_size =;
static int mad_recvq_size =;

module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC();
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC();

static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;

/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);

/* Forward declarations */
static int method_in_use(struct ib_mad_mgmt_method_table **method,
			 struct ib_mad_reg_req *mad_reg_req);
static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
static struct ib_mad_agent_private *find_mad_agent(
					struct ib_mad_port_private *port_priv,
					const struct ib_mad_hdr *mad);
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
				    struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
static void timeout_sends(struct work_struct *work);
static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			      struct ib_mad_agent_private *agent_priv,
			      u8 mgmt_class);
static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			   struct ib_mad_agent_private *agent_priv);
static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
			      struct ib_wc *wc);
static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);

/*
 * Returns a ib_mad_port_private structure or NULL for a device/port
 * Assumes ib_mad_port_list_lock is being held
 */
static inline struct ib_mad_port_private *
__ib_get_mad_port(struct ib_device *device, u32 port_num)
{}

/*
 * Wrapper function to return a ib_mad_port_private structure or NULL
 * for a device/port
 */
static inline struct ib_mad_port_private *
ib_get_mad_port(struct ib_device *device, u32 port_num)
{}

static inline u8 convert_mgmt_class(u8 mgmt_class)
{}

static int get_spl_qp_index(enum ib_qp_type qp_type)
{}

static int vendor_class_index(u8 mgmt_class)
{}

static int is_vendor_class(u8 mgmt_class)
{}

static int is_vendor_oui(char *oui)
{}

static int is_vendor_method_in_use(
		struct ib_mad_mgmt_vendor_class *vendor_class,
		struct ib_mad_reg_req *mad_reg_req)
{}

int ib_response_mad(const struct ib_mad_hdr *hdr)
{}
EXPORT_SYMBOL();

/*
 * ib_register_mad_agent - Register to send/receive MADs
 *
 * Context: Process context.
 */
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
					   u32 port_num,
					   enum ib_qp_type qp_type,
					   struct ib_mad_reg_req *mad_reg_req,
					   u8 rmpp_version,
					   ib_mad_send_handler send_handler,
					   ib_mad_recv_handler recv_handler,
					   void *context,
					   u32 registration_flags)
{}
EXPORT_SYMBOL();

static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{}

static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{}

/*
 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 *
 * Context: Process context.
 */
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{}
EXPORT_SYMBOL();

static void dequeue_mad(struct ib_mad_list_head *mad_list)
{}

static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
		u16 pkey_index, u32 port_num, struct ib_wc *wc)
{}

static size_t mad_priv_size(const struct ib_mad_private *mp)
{}

static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
{}

static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
{}

static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
{}

/*
 * Return 0 if SMP is to be sent
 * Return 1 if SMP was consumed locally (whether or not solicited)
 * Return < 0 if error
 */
static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
				  struct ib_mad_send_wr_private *mad_send_wr)
{}

static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
{}

static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
{}

static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
				size_t mad_size, gfp_t gfp_mask)
{}

int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
{}
EXPORT_SYMBOL();

struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
					   u32 remote_qpn, u16 pkey_index,
					   int rmpp_active, int hdr_len,
					   int data_len, gfp_t gfp_mask,
					   u8 base_version)
{}
EXPORT_SYMBOL();

int ib_get_mad_data_offset(u8 mgmt_class)
{}
EXPORT_SYMBOL();

int ib_is_mad_class_rmpp(u8 mgmt_class)
{}
EXPORT_SYMBOL();

void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
{}
EXPORT_SYMBOL();

static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
{}

void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
{}
EXPORT_SYMBOL();

int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{}

/*
 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
 *  with the registered client
 */
int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
		     struct ib_mad_send_buf **bad_send_buf)
{}
EXPORT_SYMBOL();

/*
 * ib_free_recv_mad - Returns data buffers used to receive
 *  a MAD to the access layer
 */
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
{}
EXPORT_SYMBOL();

static int method_in_use(struct ib_mad_mgmt_method_table **method,
			 struct ib_mad_reg_req *mad_reg_req)
{}

static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{}

/*
 * Check to see if there are any methods still in use
 */
static int check_method_table(struct ib_mad_mgmt_method_table *method)
{}

/*
 * Check to see if there are any method tables for this class still in use
 */
static int check_class_table(struct ib_mad_mgmt_class_table *class)
{}

static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
{}

static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
			   const char *oui)
{}

static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
{}

static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
				     struct ib_mad_agent_private *agent)
{}

static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			      struct ib_mad_agent_private *agent_priv,
			      u8 mgmt_class)
{}

static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			   struct ib_mad_agent_private *agent_priv)
{}

static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
{}

static struct ib_mad_agent_private *
find_mad_agent(struct ib_mad_port_private *port_priv,
	       const struct ib_mad_hdr *mad_hdr)
{}

static int validate_mad(const struct ib_mad_hdr *mad_hdr,
			const struct ib_mad_qp_info *qp_info,
			bool opa)
{}

static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
			    const struct ib_mad_hdr *mad_hdr)
{}

static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
				     const struct ib_mad_recv_wc *rwc)
{}

static inline int
rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
		 const struct ib_mad_send_wr_private *wr,
		 const struct ib_mad_recv_wc *rwc)
{}

static inline int is_direct(u8 class)
{}

struct ib_mad_send_wr_private*
ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
		 const struct ib_mad_recv_wc *wc)
{}

void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
{}

static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
				 struct ib_mad_recv_wc *mad_recv_wc)
{}

static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
				     const struct ib_mad_qp_info *qp_info,
				     const struct ib_wc *wc,
				     u32 port_num,
				     struct ib_mad_private *recv,
				     struct ib_mad_private *response)
{}

static bool generate_unmatched_resp(const struct ib_mad_private *recv,
				    struct ib_mad_private *response,
				    size_t *resp_len, bool opa)
{}

static enum smi_action
handle_opa_smi(struct ib_mad_port_private *port_priv,
	       struct ib_mad_qp_info *qp_info,
	       struct ib_wc *wc,
	       u32 port_num,
	       struct ib_mad_private *recv,
	       struct ib_mad_private *response)
{}

static enum smi_action
handle_smi(struct ib_mad_port_private *port_priv,
	   struct ib_mad_qp_info *qp_info,
	   struct ib_wc *wc,
	   u32 port_num,
	   struct ib_mad_private *recv,
	   struct ib_mad_private *response,
	   bool opa)
{}

static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
{}

static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
{}

void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
			  unsigned long timeout_ms)
{}

/*
 * Process a send work completion
 */
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
			     struct ib_mad_send_wc *mad_send_wc)
{}

static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
{}

static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
		struct ib_wc *wc)
{}

static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
{}

static struct ib_mad_send_wr_private*
find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
	     struct ib_mad_send_buf *send_buf)
{}

int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
{}
EXPORT_SYMBOL();

static void local_completions(struct work_struct *work)
{}

static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
{}

static void timeout_sends(struct work_struct *work)
{}

/*
 * Allocate receive MADs and post receive WRs for them
 */
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
				    struct ib_mad_private *mad)
{}

/*
 * Return all the posted receive MADs
 */
static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
{}

/*
 * Start the port
 */
static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
{}

static void qp_event_handler(struct ib_event *event, void *qp_context)
{}

static void init_mad_queue(struct ib_mad_qp_info *qp_info,
			   struct ib_mad_queue *mad_queue)
{}

static void init_mad_qp(struct ib_mad_port_private *port_priv,
			struct ib_mad_qp_info *qp_info)
{}

static int create_mad_qp(struct ib_mad_qp_info *qp_info,
			 enum ib_qp_type qp_type)
{}

static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{}

/*
 * Open the port
 * Create the QP, PD, MR, and CQ if needed
 */
static int ib_mad_port_open(struct ib_device *device,
			    u32 port_num)
{}

/*
 * Close the port
 * If there are no classes using the port, free the port
 * resources (CQ, MR, PD, QP) and remove the port's info structure
 */
static int ib_mad_port_close(struct ib_device *device, u32 port_num)
{}

static int ib_mad_init_device(struct ib_device *device)
{}

static void ib_mad_remove_device(struct ib_device *device, void *client_data)
{}

static struct ib_client mad_client =;

int ib_mad_init(void)
{}

void ib_mad_cleanup(void)
{}