linux/drivers/nvme/target/core.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Common code for the NVMe target.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt)
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>

#include <generated/utsrelease.h>

#define CREATE_TRACE_POINTS
#include "trace.h"

#include "nvmet.h"
#include "debugfs.h"

struct kmem_cache *nvmet_bvec_cache;
struct workqueue_struct *buffered_io_wq;
struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);

struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL();

/*
 * This read/write semaphore is used to synchronize access to configuration
 * information on a target system that will result in discovery log page
 * information change for at least one host.
 * The full list of resources to protected by this semaphore is:
 *
 *  - subsystems list
 *  - per-subsystem allowed hosts list
 *  - allow_any_host subsystem attribute
 *  - nvmet_genctr
 *  - the nvmet_transports array
 *
 * When updating any of those lists/structures write lock should be obtained,
 * while when reading (popolating discovery log page or checking host-subsystem
 * link) read lock is obtained to allow concurrent reads.
 */
DECLARE_RWSEM();

u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
u64 nvmet_ana_chgcnt;
DECLARE_RWSEM();

inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
{}

u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
{}

static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
		const char *subsysnqn);

u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
		size_t len)
{}

u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{}

u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{}

static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
{}

static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
{}

static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{}

static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
{}

static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
{}

static void nvmet_async_event_work(struct work_struct *work)
{}

void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
		u8 event_info, u8 log_page)
{}

static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
{}

void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{}

void nvmet_send_ana_event(struct nvmet_subsys *subsys,
		struct nvmet_port *port)
{}

void nvmet_port_send_ana_event(struct nvmet_port *port)
{}

int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
{}
EXPORT_SYMBOL_GPL();

void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
{}
EXPORT_SYMBOL_GPL();

void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
{}

int nvmet_enable_port(struct nvmet_port *port)
{}

void nvmet_disable_port(struct nvmet_port *port)
{}

static void nvmet_keep_alive_timer(struct work_struct *work)
{}

void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{}

void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{}

u16 nvmet_req_find_ns(struct nvmet_req *req)
{}

static void nvmet_destroy_namespace(struct percpu_ref *ref)
{}

void nvmet_put_namespace(struct nvmet_ns *ns)
{}

static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
{}

static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
{}

/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
				    struct nvmet_ns *ns)
{}

bool nvmet_ns_revalidate(struct nvmet_ns *ns)
{}

int nvmet_ns_enable(struct nvmet_ns *ns)
{}

void nvmet_ns_disable(struct nvmet_ns *ns)
{}

void nvmet_ns_free(struct nvmet_ns *ns)
{}

struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{}

static void nvmet_update_sq_head(struct nvmet_req *req)
{}

static void nvmet_set_error(struct nvmet_req *req, u16 status)
{}

static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{}

void nvmet_req_complete(struct nvmet_req *req, u16 status)
{}
EXPORT_SYMBOL_GPL();

void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
		u16 qid, u16 size)
{}

void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
		u16 qid, u16 size)
{}

static void nvmet_confirm_sq(struct percpu_ref *ref)
{}

void nvmet_sq_destroy(struct nvmet_sq *sq)
{}
EXPORT_SYMBOL_GPL();

static void nvmet_sq_free(struct percpu_ref *ref)
{}

int nvmet_sq_init(struct nvmet_sq *sq)
{}
EXPORT_SYMBOL_GPL();

static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
		struct nvmet_ns *ns)
{}

static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
{}

static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{}

bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
{}
EXPORT_SYMBOL_GPL();

void nvmet_req_uninit(struct nvmet_req *req)
{}
EXPORT_SYMBOL_GPL();

bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{}
EXPORT_SYMBOL_GPL();

bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{}

static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
{}

static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
		struct nvmet_req *req)
{}

static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{}

int nvmet_req_alloc_sgls(struct nvmet_req *req)
{}
EXPORT_SYMBOL_GPL();

void nvmet_req_free_sgls(struct nvmet_req *req)
{}
EXPORT_SYMBOL_GPL();

static inline bool nvmet_cc_en(u32 cc)
{}

static inline u8 nvmet_cc_css(u32 cc)
{}

static inline u8 nvmet_cc_mps(u32 cc)
{}

static inline u8 nvmet_cc_ams(u32 cc)
{}

static inline u8 nvmet_cc_shn(u32 cc)
{}

static inline u8 nvmet_cc_iosqes(u32 cc)
{}

static inline u8 nvmet_cc_iocqes(u32 cc)
{}

static inline bool nvmet_css_supported(u8 cc_css)
{}

static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{}

static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
{}

void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
{}

static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{}

struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
				       const char *hostnqn, u16 cntlid,
				       struct nvmet_req *req)
{}

u16 nvmet_check_ctrl_status(struct nvmet_req *req)
{}

bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
{}

/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
		struct nvmet_req *req)
{}

/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{}

static void nvmet_fatal_error_handler(struct work_struct *work)
{}

u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{}

static void nvmet_ctrl_free(struct kref *ref)
{}

void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{}

void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
		char *traddr, size_t traddr_len)
{}

static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
		const char *subsysnqn)
{}

struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
		enum nvme_subsys_type type)
{}

static void nvmet_subsys_free(struct kref *ref)
{}

void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
{}

void nvmet_subsys_put(struct nvmet_subsys *subsys)
{}

static int __init nvmet_init(void)
{}

static void __exit nvmet_exit(void)
{}

module_init();
module_exit(nvmet_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();