#define pr_fmt(fmt) …
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <rdma/mr_pool.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/nvme.h>
#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/nvme-rdma.h>
#include "nvme.h"
#include "fabrics.h"
#define NVME_RDMA_CM_TIMEOUT_MS …
#define NVME_RDMA_MAX_SEGMENTS …
#define NVME_RDMA_MAX_INLINE_SEGMENTS …
#define NVME_RDMA_DATA_SGL_SIZE …
#define NVME_RDMA_METADATA_SGL_SIZE …
struct nvme_rdma_device { … };
struct nvme_rdma_qe { … };
struct nvme_rdma_sgl { … };
struct nvme_rdma_queue;
struct nvme_rdma_request { … };
enum nvme_rdma_queue_flags { … };
struct nvme_rdma_queue { … };
struct nvme_rdma_ctrl { … };
static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
{ … }
static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(nvme_rdma_ctrl_list);
static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
static bool register_always = …;
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(…) …;
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvme_rdma_complete_rq(struct request *rq);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{ … }
static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
{ … }
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
size_t capsule_size, enum dma_data_direction dir)
{ … }
static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
size_t capsule_size, enum dma_data_direction dir)
{ … }
static void nvme_rdma_free_ring(struct ib_device *ibdev,
struct nvme_rdma_qe *ring, size_t ib_queue_size,
size_t capsule_size, enum dma_data_direction dir)
{ … }
static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
size_t ib_queue_size, size_t capsule_size,
enum dma_data_direction dir)
{ … }
static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{ … }
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
{ … }
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{ … }
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{ … }
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{ … }
static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{ … }
static void nvme_rdma_free_dev(struct kref *ref)
{ … }
static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
{ … }
static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
{ … }
static struct nvme_rdma_device *
nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
{ … }
static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
{ … }
static int nvme_rdma_create_cq(struct ib_device *ibdev,
struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int idx, size_t queue_size)
{ … }
static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
{ … }
static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
{ … }
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{ … }
static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
int first, int last)
{ … }
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{ … }
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{ … }
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
{ … }
static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool new)
{ … }
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{ … }
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{ … }
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{ … }
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{ … }
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{ … }
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
int status)
{ … }
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{ … }
static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
{ … }
static void nvme_rdma_error_recovery_work(struct work_struct *work)
{ … }
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
{ … }
static void nvme_rdma_end_request(struct nvme_rdma_request *req)
{ … }
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
const char *op)
{ … }
static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req)
{ … }
static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
{ … }
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{ … }
static int nvme_rdma_set_sg_null(struct nvme_command *c)
{ … }
static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req, struct nvme_command *c,
int count)
{ … }
static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req, struct nvme_command *c)
{ … }
static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req, struct nvme_command *c,
int count)
{ … }
static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
struct nvme_command *cmd, struct ib_sig_domain *domain,
u16 control, u8 pi_type)
{ … }
static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
u8 pi_type)
{ … }
static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
{ … }
static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req, struct nvme_command *c,
int count, int pi_count)
{ … }
static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
int *count, int *pi_count)
{ … }
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{ … }
static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
struct ib_send_wr *first)
{ … }
static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe)
{ … }
static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
{ … }
static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{ … }
static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct nvme_completion *cqe, struct ib_wc *wc)
{ … }
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
struct rdma_cm_event *ev)
{ … }
static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
{ … }
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{ … }
static void nvme_rdma_complete_timed_out(struct request *rq)
{ … }
static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
{ … }
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{ … }
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{ … }
static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
{ … }
static void nvme_rdma_complete_rq(struct request *rq)
{ … }
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{ … }
static const struct blk_mq_ops nvme_rdma_mq_ops = …;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = …;
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{ … }
static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
{ … }
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{ … }
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = …;
static bool
nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
{ … }
static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{ … }
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{ … }
static struct nvmf_transport_ops nvme_rdma_transport = …;
static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{ … }
static struct ib_client nvme_rdma_ib_client = …;
static int __init nvme_rdma_init_module(void)
{ … }
static void __exit nvme_rdma_cleanup_module(void)
{ … }
module_init(…) …;
module_exit(nvme_rdma_cleanup_module);
MODULE_DESCRIPTION(…) …;
MODULE_LICENSE(…) …;