linux/drivers/nvme/host/core.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/pm_qos.h>
#include <linux/ratelimit.h>
#include <linux/unaligned.h>

#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-auth.h>

#define CREATE_TRACE_POINTS
#include "trace.h"

#define NVME_MINORS

struct nvme_ns_info {};

unsigned int admin_timeout =;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC();
EXPORT_SYMBOL_GPL();

unsigned int nvme_io_timeout =;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
MODULE_PARM_DESC();
EXPORT_SYMBOL_GPL();

static unsigned char shutdown_timeout =;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC();

static u8 nvme_max_retries =;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
MODULE_PARM_DESC();

static unsigned long default_ps_max_latency_us =;
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC();

static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC();

static unsigned long apst_primary_timeout_ms =;
module_param(apst_primary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC();

static unsigned long apst_secondary_timeout_ms =;
module_param(apst_secondary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC();

static unsigned long apst_primary_latency_tol_us =;
module_param(apst_primary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC();

static unsigned long apst_secondary_latency_tol_us =;
module_param(apst_secondary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC();

/*
 * Older kernels didn't enable protection information if it was at an offset.
 * Newer kernels do, so it breaks reads on the upgrade if such formats were
 * used in prior kernels since the metadata written did not contain a valid
 * checksum.
 */
static bool disable_pi_offsets =;
module_param(disable_pi_offsets, bool, 0444);
MODULE_PARM_DESC();

/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL();

struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL();

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL();

static LIST_HEAD(nvme_subsystems);
DEFINE_MUTEX();

static DEFINE_IDA(nvme_instance_ida);
static dev_t nvme_ctrl_base_chr_devt;
static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
static const struct class nvme_class =;

static const struct class nvme_subsys_class =;

static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
static const struct class nvme_ns_chr_class =;

static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd);

void nvme_queue_scan(struct nvme_ctrl *ctrl)
{}

/*
 * Use this function to proceed with scheduling reset_work for a controller
 * that had previously been set to the resetting state. This is intended for
 * code paths that can't be interrupted by other reset attempts. A hot removal
 * may prevent this from succeeding.
 */
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static void nvme_failfast_work(struct work_struct *work)
{}

static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
{}

static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
{}


int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{}

static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{}

static void nvme_delete_ctrl_work(struct work_struct *work)
{}

int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{}

static blk_status_t nvme_error_status(u16 status)
{}

static void nvme_retry_req(struct request *req)
{}

static void nvme_log_error(struct request *req)
{}

static void nvme_log_err_passthru(struct request *req)
{}

enum nvme_disposition {};

static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
{}

static inline void nvme_end_req_zoned(struct request *req)
{}

static inline void __nvme_end_req(struct request *req)
{}

void nvme_end_req(struct request *req)
{}

void nvme_complete_rq(struct request *req)
{}
EXPORT_SYMBOL_GPL();

void nvme_complete_batch_req(struct request *req)
{}
EXPORT_SYMBOL_GPL();

/*
 * Called to unwind from ->queue_rq on a failed command submission so that the
 * multipathing code gets called to potentially failover to another path.
 * The caller needs to unwind all transport specific resource allocations and
 * must return propagate the return value.
 */
blk_status_t nvme_host_path_error(struct request *req)
{}
EXPORT_SYMBOL_GPL();

bool nvme_cancel_request(struct request *req, void *data)
{}
EXPORT_SYMBOL_GPL();

void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{}
EXPORT_SYMBOL_GPL();

/*
 * Waits for the controller state to be resetting, or returns false if it is
 * not possible to ever transition to that state.
 */
bool nvme_wait_reset(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static void nvme_free_ns_head(struct kref *ref)
{}

bool nvme_tryget_ns_head(struct nvme_ns_head *head)
{}

void nvme_put_ns_head(struct nvme_ns_head *head)
{}

static void nvme_free_ns(struct kref *kref)
{}

bool nvme_get_ns(struct nvme_ns *ns)
{}

void nvme_put_ns(struct nvme_ns *ns)
{}
EXPORT_SYMBOL_NS_GPL();

static inline void nvme_clear_nvme_request(struct request *req)
{}

/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{}
EXPORT_SYMBOL_GPL();

/*
 * For something we're not in a state to send to the device the default action
 * is to busy it and retry it after the controller state is recovered.  However,
 * if the controller is deleting or if anything is marked for failfast or
 * nvme multipath it is immediately failed.
 *
 * Note: commands used to initialize the controller will be marked for failfast.
 * Note: nvme cli/ioctl commands are marked for failfast.
 */
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
		struct request *rq)
{}
EXPORT_SYMBOL_GPL();

bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live, enum nvme_ctrl_state state)
{}
EXPORT_SYMBOL_GPL();

static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{}

static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
		struct nvme_command *cmnd)
{}

static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
			      struct request *req)
{}

static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
{}

/*
 * NVMe does not support a dedicated command to issue an atomic write. A write
 * which does adhere to the device atomic limits will silently be executed
 * non-atomically. The request issuer should ensure that the write is within
 * the queue atomic writes limits, but just validate this in case it is not.
 */
static bool nvme_valid_atomic_write(struct request *req)
{}

static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd,
		enum nvme_opcode op)
{}

void nvme_cleanup_cmd(struct request *req)
{}
EXPORT_SYMBOL_GPL();

blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{}
EXPORT_SYMBOL_GPL();

/*
 * Return values:
 * 0:  success
 * >0: nvme controller's cqe status response
 * <0: kernel error in lieu of controller response
 */
int nvme_execute_rq(struct request *rq, bool at_head)
{}
EXPORT_SYMBOL_NS_GPL();

/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		union nvme_result *result, void *buffer, unsigned bufflen,
		int qid, nvme_submit_flags_t flags)
{}
EXPORT_SYMBOL_GPL();

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{}
EXPORT_SYMBOL_GPL();

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{}
EXPORT_SYMBOL_NS_GPL();

u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{}
EXPORT_SYMBOL_NS_GPL();

void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
		       struct nvme_command *cmd, int status)
{}
EXPORT_SYMBOL_NS_GPL();

/*
 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
 *
 *   The host should send Keep Alive commands at half of the Keep Alive Timeout
 *   accounting for transport roundtrip times [..].
 */
static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
{}

static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{}

static void nvme_keep_alive_finish(struct request *rq,
		blk_status_t status, struct nvme_ctrl *ctrl)
{}

static void nvme_keep_alive_work(struct work_struct *work)
{}

static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
{}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd)
{}

static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns)
{}

static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{}

static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
		struct nvme_ns_id_desc *cur, bool *csi_seen)
{}

static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
		struct nvme_ns_info *info)
{}

int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_id_ns **id)
{}

static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
		struct nvme_ns_info *info)
{}

static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
		struct nvme_ns_info *info)
{}

static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{}

int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{}
EXPORT_SYMBOL_GPL();

int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{}
EXPORT_SYMBOL_GPL();

int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{}
EXPORT_SYMBOL_GPL();

#define NVME_AEN_SUPPORTED

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{}

static int nvme_ns_open(struct nvme_ns *ns)
{}

static void nvme_ns_release(struct nvme_ns *ns)
{}

static int nvme_open(struct gendisk *disk, blk_mode_t mode)
{}

static void nvme_release(struct gendisk *disk)
{}

int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{}

static bool nvme_init_integrity(struct nvme_ns_head *head,
		struct queue_limits *lim, struct nvme_ns_info *info)
{}

static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{}

static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{}

static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
		struct nvme_id_ns_nvm **nvmp)
{}

static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
		struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
{}

static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
		struct nvme_ns_head *head, struct nvme_id_ns *id,
		struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info)
{}


static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
			struct nvme_id_ns *id, struct queue_limits *lim,
			u32 bs, u32 atomic_bs)
{}

static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
{}

static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
		struct queue_limits *lim)
{}

static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
		struct queue_limits *lim)
{}

static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
{}

static inline bool nvme_first_scan(struct gendisk *disk)
{}

static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
		struct queue_limits *lim)
{}

static int nvme_update_ns_info_generic(struct nvme_ns *ns,
		struct nvme_ns_info *info)
{}

static int nvme_update_ns_info_block(struct nvme_ns *ns,
		struct nvme_ns_info *info)
{}

static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
{}

int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
		enum blk_unique_id type)
{}

static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
		enum blk_unique_id type)
{}

#ifdef CONFIG_BLK_SED_OPAL
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
{}

static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
{}
#else
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
{
}
#endif /* CONFIG_BLK_SED_OPAL */

#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_report_zones(struct gendisk *disk, sector_t sector,
		unsigned int nr_zones, report_zones_cb cb, void *data)
{}
#else
#define nvme_report_zones
#endif /* CONFIG_BLK_DEV_ZONED */

const struct block_device_operations nvme_bdev_ops =;

static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
		u32 timeout, const char *op)
{}

int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{}
EXPORT_SYMBOL_GPL();

int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{}

static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
{}

/*
 * The function checks whether the given total (exlat + enlat) latency of
 * a power state allows the latter to be used as an APST transition target.
 * It does so by comparing the latency to the primary and secondary latency
 * tolerances defined by module params. If there's a match, the corresponding
 * timeout value is returned and the matching tolerance index (1 or 2) is
 * reported.
 */
static bool nvme_apst_get_transition_time(u64 total_latency,
		u64 *transition_time, unsigned *last_index)
{}

/*
 * APST (Autonomous Power State Transition) lets us program a table of power
 * state transitions that the controller will perform automatically.
 *
 * Depending on module params, one of the two supported techniques will be used:
 *
 * - If the parameters provide explicit timeouts and tolerances, they will be
 *   used to build a table with up to 2 non-operational states to transition to.
 *   The default parameter values were selected based on the values used by
 *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
 *   regeneration of the APST table in the event of switching between external
 *   and battery power, the timeouts and tolerances reflect a compromise
 *   between values used by Microsoft for AC and battery scenarios.
 * - If not, we'll configure the table with a simple heuristic: we are willing
 *   to spend at most 2% of the time transitioning between power states.
 *   Therefore, when running in any given state, we will enter the next
 *   lower-power non-operational state after waiting 50 * (enlat + exlat)
 *   microseconds, as long as that state's exit latency is under the requested
 *   maximum latency.
 *
 * We will not autonomously enter any non-operational state for which the total
 * latency exceeds ps_max_latency_us.
 *
 * Users can set ps_max_latency_us to zero to turn off APST.
 */
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{}

struct nvme_core_quirk_entry {};

static const struct nvme_core_quirk_entry core_quirks[] =;

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{}

static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
{}

static void nvme_release_subsystem(struct device *dev)
{}

static void nvme_destroy_subsystem(struct kref *ref)
{}

static void nvme_put_subsystem(struct nvme_subsystem *subsys)
{}

static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
{}

static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{}

static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{}

static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{}

int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
		void *log, size_t size, u64 offset)
{}

static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
				struct nvme_effects_log **log)
{}

static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
{}

static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
{}

static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{}

static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{}

static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{}

static int nvme_init_identify(struct nvme_ctrl *ctrl)
{}

/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
{}
EXPORT_SYMBOL_GPL();

static int nvme_dev_open(struct inode *inode, struct file *file)
{}

static int nvme_dev_release(struct inode *inode, struct file *file)
{}

static const struct file_operations nvme_dev_fops =;

static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
		unsigned nsid)
{}

static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
		struct nvme_ns_ids *ids)
{}

static void nvme_cdev_rel(struct device *dev)
{}

void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{}

int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
		const struct file_operations *fops, struct module *owner)
{}

static int nvme_ns_chr_open(struct inode *inode, struct file *file)
{}

static int nvme_ns_chr_release(struct inode *inode, struct file *file)
{}

static const struct file_operations nvme_ns_chr_fops =;

static int nvme_add_ns_cdev(struct nvme_ns *ns)
{}

static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
		struct nvme_ns_info *info)
{}

static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
		struct nvme_ns_ids *ids)
{}

static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{}

struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{}
EXPORT_SYMBOL_NS_GPL();

/*
 * Add the namespace to the controller list while keeping the list ordered.
 */
static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{}

static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{}

static void nvme_ns_remove(struct nvme_ns *ns)
{}

static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{}

static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{}

static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{}

/**
 * struct async_scan_info - keeps track of controller & NSIDs to scan
 * @ctrl:	Controller on which namespaces are being scanned
 * @next_nsid:	Index of next NSID to scan in ns_list
 * @ns_list:	Pointer to list of NSIDs to scan
 *
 * Note: There is a single async_scan_info structure shared by all instances
 * of nvme_scan_ns_async() scanning a given controller, so the atomic
 * operations on next_nsid are critical to ensure each instance scans a unique
 * NSID.
 */
struct async_scan_info {};

static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
{}

static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{}

static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
{}

static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
{}

static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
{}

static void nvme_scan_work(struct work_struct *work)
{}

/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
{}

static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
{}

static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{}

static void nvme_async_event_work(struct work_struct *work)
{}

static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{}

static void nvme_fw_act_work(struct work_struct *work)
{}

static u32 nvme_aer_type(u32 result)
{}

static u32 nvme_aer_subtype(u32 result)
{}

static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{}

static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
{}

void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
		volatile union nvme_result *res)
{}
EXPORT_SYMBOL_GPL();

int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int cmd_size)
{}
EXPORT_SYMBOL_GPL();

void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int nr_maps,
		unsigned int cmd_size)
{}
EXPORT_SYMBOL_GPL();

void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

static void nvme_free_cels(struct nvme_ctrl *ctrl)
{}

static void nvme_free_ctrl(struct device *dev)
{}

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 *
 * On success, the caller must use the nvme_put_ctrl() to release this when
 * needed, which also invokes the ops->free_ctrl() callback.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{}
EXPORT_SYMBOL_GPL();

/*
 * On success, returns with an elevated controller reference and caller must
 * use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
 */
int nvme_add_ctrl(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

/* let I/O to all namespaces fail in preparation for surprise removal */
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_unfreeze(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{}
EXPORT_SYMBOL_GPL();

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

void nvme_sync_queues(struct nvme_ctrl *ctrl)
{}
EXPORT_SYMBOL_GPL();

struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
{}
EXPORT_SYMBOL_NS_GPL();

/*
 * Check we didn't inadvertently grow the command structure sizes:
 */
static inline void _nvme_check_size(void)
{}


static int __init nvme_core_init(void)
{}

static void __exit nvme_core_exit(void)
{}

MODULE_LICENSE();
MODULE_VERSION();
MODULE_DESCRIPTION();
module_init();
module_exit(nvme_core_exit);