linux/drivers/nvme/target/loop.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVMe over Fabrics loopback device.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt)
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/nvme.h>
#include <linux/module.h>
#include <linux/parser.h>
#include "nvmet.h"
#include "../host/nvme.h"
#include "../host/fabrics.h"

#define NVME_LOOP_MAX_SEGMENTS

struct nvme_loop_iod {};

struct nvme_loop_ctrl {};

static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
{}

enum nvme_loop_queue_flags {};

struct nvme_loop_queue {};

static LIST_HEAD(nvme_loop_ports);
static DEFINE_MUTEX(nvme_loop_ports_mutex);

static LIST_HEAD(nvme_loop_ctrl_list);
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);

static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);

static const struct nvmet_fabrics_ops nvme_loop_ops;

static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
{}

static void nvme_loop_complete_rq(struct request *req)
{}

static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
{}

static void nvme_loop_queue_response(struct nvmet_req *req)
{}

static void nvme_loop_execute_work(struct work_struct *work)
{}

static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
{}

static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
{}

static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
		struct nvme_loop_iod *iod, unsigned int queue_idx)
{}

static int nvme_loop_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
{}

static struct lock_class_key loop_hctx_fq_lock_key;

static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{}

static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{}

static const struct blk_mq_ops nvme_loop_mq_ops =;

static const struct blk_mq_ops nvme_loop_admin_mq_ops =;

static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{}

static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
{}

static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{}

static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
{}

static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
{}

static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{}

static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{}

static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
{}

static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
{}

static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{}

static const struct nvme_ctrl_ops nvme_loop_ctrl_ops =;

static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{}

static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
{}

static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{}

static int nvme_loop_add_port(struct nvmet_port *port)
{}

static void nvme_loop_remove_port(struct nvmet_port *port)
{}

static const struct nvmet_fabrics_ops nvme_loop_ops =;

static struct nvmf_transport_ops nvme_loop_transport =;

static int __init nvme_loop_init_module(void)
{}

static void __exit nvme_loop_cleanup_module(void)
{}

module_init();
module_exit(nvme_loop_cleanup_module);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS(); /* 254 == NVMF_TRTYPE_LOOP */