linux/drivers/infiniband/ulp/srp/ib_srp.c

/*
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
#include <linux/inet.h>
#include <rdma/ib_cache.h>

#include <linux/atomic.h>

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_tcq.h>
#include <scsi/srp.h>
#include <scsi/scsi_transport_srp.h>

#include "ib_srp.h"

#define DRV_NAME
#define PFX

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();

static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
static bool register_always =;
static bool never_register;
static int topspin_workarounds =;

module_param(srp_sg_tablesize, uint, 0444);
MODULE_PARM_DESC();

module_param(cmd_sg_entries, uint, 0444);
MODULE_PARM_DESC();

module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC();

module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC();

module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC();

module_param(register_always, bool, 0444);
MODULE_PARM_DESC();

module_param(never_register, bool, 0444);
MODULE_PARM_DESC();

static const struct kernel_param_ops srp_tmo_ops;

static int srp_reconnect_delay =;
module_param_cb();
MODULE_PARM_DESC();

static int srp_fast_io_fail_tmo =;
module_param_cb();
MODULE_PARM_DESC();

static int srp_dev_loss_tmo =;
module_param_cb();
MODULE_PARM_DESC();

static bool srp_use_imm_data =;
module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
MODULE_PARM_DESC();

static unsigned int srp_max_imm_data =;
module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
MODULE_PARM_DESC();

static unsigned ch_count;
module_param(ch_count, uint, 0444);
MODULE_PARM_DESC();

static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
		const char *opname);
static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
			     const struct ib_cm_event *event);
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
			       struct rdma_cm_event *event);

static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;

static struct ib_client srp_client =;

static struct ib_sa_client srp_sa_client;

static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
{}

static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops srp_tmo_ops =;

static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{}

static const char *srp_target_info(struct Scsi_Host *host)
{}

static int srp_target_is_topspin(struct srp_target_port *target)
{}

static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
				   gfp_t gfp_mask,
				   enum dma_data_direction direction)
{}

static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
{}

static void srp_qp_event(struct ib_event *event, void *context)
{}

static int srp_init_ib_qp(struct srp_target_port *target,
			  struct ib_qp *qp)
{}

static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{}

static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
{}

static int srp_new_cm_id(struct srp_rdma_ch *ch)
{}

/**
 * srp_destroy_fr_pool() - free the resources owned by a pool
 * @pool: Fast registration pool to be destroyed.
 */
static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
{}

/**
 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
 * @device:            IB device to allocate fast registration descriptors for.
 * @pd:                Protection domain associated with the FR descriptors.
 * @pool_size:         Number of descriptors to allocate.
 * @max_page_list_len: Maximum fast registration work request page list length.
 */
static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
					      struct ib_pd *pd, int pool_size,
					      int max_page_list_len)
{}

/**
 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
 * @pool: Pool to obtain descriptor from.
 */
static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
{}

/**
 * srp_fr_pool_put() - put an FR descriptor back in the free list
 * @pool: Pool the descriptor was allocated from.
 * @desc: Pointer to an array of fast registration descriptor pointers.
 * @n:    Number of descriptors to put back.
 *
 * Note: The caller must already have queued an invalidation request for
 * desc->mr->rkey before calling this function.
 */
static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
			    int n)
{}

static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
{}

/**
 * srp_destroy_qp() - destroy an RDMA queue pair
 * @ch: SRP RDMA channel.
 *
 * Drain the qp before destroying it.  This avoids that the receive
 * completion handler can access the queue pair while it is
 * being destroyed.
 */
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{}

static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{}

/*
 * Note: this function may be called without srp_alloc_iu_bufs() having been
 * invoked. Hence the ch->[rt]x_ring checks.
 */
static void srp_free_ch_ib(struct srp_target_port *target,
			   struct srp_rdma_ch *ch)
{}

static void srp_path_rec_completion(int status,
				    struct sa_path_rec *pathrec,
				    unsigned int num_paths, void *ch_ptr)
{}

static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{}

static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
{}

static int srp_lookup_path(struct srp_rdma_ch *ch)
{}

static u8 srp_get_subnet_timeout(struct srp_host *host)
{}

static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
			bool multich)
{}

static bool srp_queue_remove_work(struct srp_target_port *target)
{}

static void srp_disconnect_target(struct srp_target_port *target)
{}

static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{}

static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{}

/**
 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
 * @shost: SCSI host whose attributes to remove from sysfs.
 *
 * Note: Any attributes defined in the host template and that did not exist
 * before invocation of this function will be ignored.
 */
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{}

static void srp_remove_target(struct srp_target_port *target)
{}

static void srp_remove_work(struct work_struct *work)
{}

static void srp_rport_delete(struct srp_rport *rport)
{}

/**
 * srp_connected_ch() - number of connected channels
 * @target: SRP target port.
 */
static int srp_connected_ch(struct srp_target_port *target)
{}

static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
			  bool multich)
{}

static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
		u32 rkey)
{}

static void srp_unmap_data(struct scsi_cmnd *scmnd,
			   struct srp_rdma_ch *ch,
			   struct srp_request *req)
{}

/**
 * srp_claim_req - Take ownership of the scmnd associated with a request.
 * @ch: SRP RDMA channel.
 * @req: SRP request.
 * @sdev: If not NULL, only take ownership for this SCSI device.
 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
 *         ownership of @req->scmnd if it equals @scmnd.
 *
 * Return value:
 * Either NULL or a pointer to the SCSI command the caller became owner of.
 */
static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
				       struct srp_request *req,
				       struct scsi_device *sdev,
				       struct scsi_cmnd *scmnd)
{}

/**
 * srp_free_req() - Unmap data and adjust ch->req_lim.
 * @ch:     SRP RDMA channel.
 * @req:    Request to be freed.
 * @scmnd:  SCSI command associated with @req.
 * @req_lim_delta: Amount to be added to @target->req_lim.
 */
static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
{}

static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
			   struct scsi_device *sdev, int result)
{}

struct srp_terminate_context {};

static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
{}

static void srp_terminate_io(struct srp_rport *rport)
{}

/* Calculate maximum initiator to target information unit length. */
static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
				  uint32_t max_it_iu_size)
{}

/*
 * It is up to the caller to ensure that srp_rport_reconnect() calls are
 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
 * srp_reset_device() or srp_reset_host() calls will occur while this function
 * is in progress. One way to realize that is not to call this function
 * directly but to call srp_reconnect_rport() instead since that last function
 * serializes calls of this function via rport->mutex and also blocks
 * srp_queuecommand() calls before invoking this function.
 */
static int srp_rport_reconnect(struct srp_rport *rport)
{}

static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
			 unsigned int dma_len, u32 rkey)
{}

static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{}

/*
 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
 * where to start in the first element. If sg_offset_p != NULL then
 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
 * byte that has not yet been mapped.
 */
static int srp_map_finish_fr(struct srp_map_state *state,
			     struct srp_request *req,
			     struct srp_rdma_ch *ch, int sg_nents,
			     unsigned int *sg_offset_p)
{}

static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
			 struct srp_request *req, struct scatterlist *scat,
			 int count)
{}

static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
			  struct srp_request *req, struct scatterlist *scat,
			  int count)
{}

/*
 * Register the indirect data buffer descriptor with the HCA.
 *
 * Note: since the indirect data buffer descriptor has been allocated with
 * kmalloc() it is guaranteed that this buffer is a physically contiguous
 * memory buffer.
 */
static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
		       void **next_mr, void **end_mr, u32 idb_len,
		       __be32 *idb_rkey)
{}

static void srp_check_mapping(struct srp_map_state *state,
			      struct srp_rdma_ch *ch, struct srp_request *req,
			      struct scatterlist *scat, int count)
{}

/**
 * srp_map_data() - map SCSI data buffer onto an SRP request
 * @scmnd: SCSI command to map
 * @ch: SRP RDMA channel
 * @req: SRP request
 *
 * Returns the length in bytes of the SRP_CMD IU or a negative value if
 * mapping failed. The size of any immediate data is not included in the
 * return value.
 */
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
			struct srp_request *req)
{}

/*
 * Return an IU and possible credit to the free pool
 */
static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
			  enum srp_iu_type iu_type)
{}

/*
 * Must be called with ch->lock held to protect req_lim and free_tx.
 * If IU is not sent, it must be returned using srp_put_tx_iu().
 *
 * Note:
 * An upper limit for the number of allocated information units for each
 * request type is:
 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
 *   more than Scsi_Host.can_queue requests.
 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
 *   one unanswered SRP request to an initiator.
 */
static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
				      enum srp_iu_type iu_type)
{}

/*
 * Note: if this function is called from inside ib_drain_sq() then it will
 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
 * with status IB_WC_SUCCESS then that's a bug.
 */
static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
{}

/**
 * srp_post_send() - send an SRP information unit
 * @ch: RDMA channel over which to send the information unit.
 * @iu: Information unit to send.
 * @len: Length of the information unit excluding immediate data.
 */
static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
{}

static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
{}

static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
{}

static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
			       void *rsp, int len)
{}

static void srp_process_cred_req(struct srp_rdma_ch *ch,
				 struct srp_cred_req *req)
{}

static void srp_process_aer_req(struct srp_rdma_ch *ch,
				struct srp_aer_req *req)
{}

static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{}

/**
 * srp_tl_err_work() - handle a transport layer error
 * @work: Work structure embedded in an SRP target port.
 *
 * Note: This function may get invoked before the rport has been created,
 * hence the target->rport test.
 */
static void srp_tl_err_work(struct work_struct *work)
{}

static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
		const char *opname)
{}

static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{}

/*
 * Note: the resources allocated in this function are freed in
 * srp_free_ch_ib().
 */
static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
{}

static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
{}

static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
			       const struct srp_login_rsp *lrsp,
			       struct srp_rdma_ch *ch)
{}

static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
				  const struct ib_cm_event *event,
				  struct srp_rdma_ch *ch)
{}

static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
			     const struct ib_cm_event *event)
{}

static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
				    struct rdma_cm_event *event)
{}

static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
			       struct rdma_cm_event *event)
{}

/**
 * srp_change_queue_depth - setting device queue depth
 * @sdev: scsi device struct
 * @qdepth: requested queue depth
 *
 * Returns queue depth.
 */
static int
srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
{}

static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
			     u8 func, u8 *status)
{}

static int srp_abort(struct scsi_cmnd *scmnd)
{}

static int srp_reset_device(struct scsi_cmnd *scmnd)
{}

static int srp_reset_host(struct scsi_cmnd *scmnd)
{}

static int srp_target_alloc(struct scsi_target *starget)
{}

static int srp_slave_configure(struct scsi_device *sdev)
{}

static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{}

static DEVICE_ATTR_RO(id_ext);

static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{}

static DEVICE_ATTR_RO(ioc_guid);

static ssize_t service_id_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(service_id);

static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{}

static DEVICE_ATTR_RO(pkey);

static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{}

static DEVICE_ATTR_RO(sgid);

static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{}

static DEVICE_ATTR_RO(dgid);

static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
{}

static DEVICE_ATTR_RO(orig_dgid);

static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{}

static DEVICE_ATTR_RO(req_lim);

static ssize_t zero_req_lim_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(zero_req_lim);

static ssize_t local_ib_port_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(local_ib_port);

static ssize_t local_ib_device_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(local_ib_device);

static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{}

static DEVICE_ATTR_RO(ch_count);

static ssize_t comp_vector_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(comp_vector);

static ssize_t tl_retry_count_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(tl_retry_count);

static ssize_t cmd_sg_entries_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(cmd_sg_entries);

static ssize_t allow_ext_sg_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(allow_ext_sg);

static struct attribute *srp_host_attrs[] =;

ATTRIBUTE_GROUPS();

static const struct scsi_host_template srp_template =;

static int srp_sdev_count(struct Scsi_Host *host)
{}

/*
 * Return values:
 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
 *    removal has been scheduled.
 * 0 and target->state != SRP_TARGET_REMOVED upon success.
 */
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{}

static void srp_release_dev(struct device *dev)
{}

static struct attribute *srp_class_attrs[];

ATTRIBUTE_GROUPS();

static struct class srp_class =;

/**
 * srp_conn_unique() - check whether the connection to a target is unique
 * @host:   SRP host.
 * @target: SRP target port.
 */
static bool srp_conn_unique(struct srp_host *host,
			    struct srp_target_port *target)
{}

/*
 * Target ports are added by writing
 *
 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
 *     pkey=<P_Key>,service_id=<service ID>
 * or
 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
 *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
 *
 * to the add_target sysfs attribute.
 */
enum {};

static unsigned int srp_opt_mandatory[] =;

static const match_table_t srp_opt_tokens =;

/**
 * srp_parse_in - parse an IP address and port number combination
 * @net:	   [in]  Network namespace.
 * @sa:		   [out] Address family, IP address and port number.
 * @addr_port_str: [in]  IP address and port number.
 * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
 *
 * Parse the following address formats:
 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
 */
static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
			const char *addr_port_str, bool *has_port)
{}

static int srp_parse_options(struct net *net, const char *buf,
			     struct srp_target_port *target)
{}

static ssize_t add_target_store(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{}

static DEVICE_ATTR_WO(add_target);

static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
			  char *buf)
{}

static DEVICE_ATTR_RO(ibdev);

static ssize_t port_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{}

static DEVICE_ATTR_RO(port);

static struct attribute *srp_class_attrs[] =;

static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
{}

static void srp_rename_dev(struct ib_device *device, void *client_data)
{}

static int srp_add_one(struct ib_device *device)
{}

static void srp_remove_one(struct ib_device *device, void *client_data)
{}

static struct srp_function_template ib_srp_transport_functions =;

static int __init srp_init_module(void)
{}

static void __exit srp_cleanup_module(void)
{}

module_init();
module_exit(srp_cleanup_module);