linux/drivers/nvme/target/rdma.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVMe over Fabrics RDMA target.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt)
#include <linux/atomic.h>
#include <linux/blk-integrity.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/nvme.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/inet.h>
#include <asm/unaligned.h>

#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
#include <rdma/ib_cm.h>

#include <linux/nvme-rdma.h>
#include "nvmet.h"

/*
 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
 */
#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE
#define NVMET_RDMA_MAX_INLINE_SGE
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE

/* Assume mpsmin == device_page_size == 4KB */
#define NVMET_RDMA_MAX_MDTS
#define NVMET_RDMA_MAX_METADATA_MDTS

#define NVMET_RDMA_BACKLOG

struct nvmet_rdma_srq;

struct nvmet_rdma_cmd {};

enum {};

struct nvmet_rdma_rsp {};

enum nvmet_rdma_queue_state {};

struct nvmet_rdma_queue {};

struct nvmet_rdma_port {};

struct nvmet_rdma_srq {};

struct nvmet_rdma_device {};

static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC();

static int srq_size_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops srq_size_ops =;

static int nvmet_rdma_srq_size =;
module_param_cb();
MODULE_PARM_DESC();

static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);

static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_mutex);

static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_rsp *r);
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_rsp *r);

static const struct nvmet_fabrics_ops nvmet_rdma_ops;

static int srq_size_set(const char *val, const struct kernel_param *kp)
{}

static int num_pages(int len)
{}

static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{}

static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{}

static inline struct nvmet_rdma_rsp *
nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
{}

static inline void
nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{}

static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_cmd *c)
{}

static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_cmd *c)
{}

static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
			struct nvmet_rdma_cmd *c, bool admin)
{}

static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *c, bool admin)
{}

static struct nvmet_rdma_cmd *
nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
		int nr_cmds, bool admin)
{}

static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
{}

static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_rsp *r)
{}

static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_rsp *r)
{}

static int
nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
{}

static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *cmd)
{}

static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
{}

static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
{}

static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
		struct nvme_command *cmd, struct ib_sig_domain *domain,
		u16 control, u8 pi_type)
{}

static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
				     struct ib_sig_attrs *sig_attrs)
{}

static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
				  struct ib_sig_attrs *sig_attrs)
{}

static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
{}

static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
{}

static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void nvmet_rdma_queue_response(struct nvmet_req *req)
{}

static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
		u64 off)
{}

static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
{}

static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
{}

static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
{}

static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
{}

static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
		struct nvmet_rdma_rsp *cmd)
{}

static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{}

static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
{}

static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
{}

static struct nvmet_rdma_srq *
nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
{}

static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
{}

static void nvmet_rdma_free_dev(struct kref *ref)
{}

static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{}

static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_release_queue_work(struct work_struct *w)
{}

static int
nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
				struct nvmet_rdma_queue *queue)
{}

static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
				enum nvme_rdma_cm_status status)
{}

static struct nvmet_rdma_queue *
nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
		struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{}

static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
{}

static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue,
		struct rdma_conn_param *p)
{}

static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{}

static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
{}

static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
{}

static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue)
{}

/**
 * nvmet_rdma_device_removal() - Handle RDMA device removal
 * @cm_id:	rdma_cm id, used for nvmet port
 * @queue:      nvmet rdma queue (cm id qp_context)
 *
 * DEVICE_REMOVAL event notifies us that the RDMA device is about
 * to unplug. Note that this event can be generated on a normal
 * queue cm_id and/or a device bound listener cm_id (where in this
 * case queue will be null).
 *
 * We registered an ib_client to handle device removal for queues,
 * so we only need to handle the listening port cm_ids. In this case
 * we nullify the priv to prevent double cm_id destruction and destroying
 * the cm_id implicitely by returning a non-zero rc to the callout.
 */
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue)
{}

static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{}

static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
{}

static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
{}

static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{}

static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
{}

static void nvmet_rdma_repair_port_work(struct work_struct *w)
{}

static int nvmet_rdma_add_port(struct nvmet_port *nport)
{}

static void nvmet_rdma_remove_port(struct nvmet_port *nport)
{}

static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
		struct nvmet_port *nport, char *traddr)
{}

static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl,
		char *traddr, size_t traddr_len)
{}

static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
{}

static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{}

static const struct nvmet_fabrics_ops nvmet_rdma_ops =;

static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{}

static struct ib_client nvmet_rdma_ib_client =;

static int __init nvmet_rdma_init(void)
{}

static void __exit nvmet_rdma_exit(void)
{}

module_init();
module_exit(nvmet_rdma_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS(); /* 1 == NVMF_TRTYPE_RDMA */