linux/drivers/nvme/host/pci.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/acpi.h>
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kstrtox.h>
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/once.h>
#include <linux/pci.h>
#include <linux/suspend.h>
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>

#include "trace.h"
#include "nvme.h"

#define SQ_SIZE(q)
#define CQ_SIZE(q)

#define SGES_PER_PAGE

/*
 * These can be higher, but we need to ensure that any command doesn't
 * require an sg allocation that needs more than a page of data.
 */
#define NVME_MAX_KB_SZ
#define NVME_MAX_SEGS
#define NVME_MAX_NR_ALLOCATIONS

static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);

static bool use_cmb_sqes =;
module_param(use_cmb_sqes, bool, 0444);
MODULE_PARM_DESC();

static unsigned int max_host_mem_size_mb =;
module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC();

static unsigned int sgl_threshold =;
module_param(sgl_threshold, uint, 0644);
MODULE_PARM_DESC();

#define NVME_PCI_MIN_QUEUE_SIZE
#define NVME_PCI_MAX_QUEUE_SIZE
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops =;

static unsigned int io_queue_depth =;
module_param_cb();
MODULE_PARM_DESC();

static int io_queue_count_set(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops io_queue_count_ops =;

static unsigned int write_queues;
module_param_cb();
MODULE_PARM_DESC();

static unsigned int poll_queues;
module_param_cb();
MODULE_PARM_DESC();

static bool noacpi;
module_param(noacpi, bool, 0444);
MODULE_PARM_DESC();

struct nvme_dev;
struct nvme_queue;

static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
static void nvme_delete_io_queues(struct nvme_dev *dev);
static void nvme_update_attrs(struct nvme_dev *dev);

/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {};

static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{}

static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{}

static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{}

/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {};

nvme_descriptor;

/*
 * The nvme_iod describes the data in an I/O.
 *
 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
 * to the actual struct scatterlist.
 */
struct nvme_iod {};

static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
{}

static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{}

static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
{}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
					      volatile __le32 *dbbuf_ei)
{}

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_pci_npages_prp(void)
{}

static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
{}

static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
{}

static int nvme_pci_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
{}

static int queue_irq_offset(struct nvme_dev *dev)
{}

static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
{}

/*
 * Write sq tail if we are asked to, or if the next command would wrap.
 */
static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
{}

static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
				    struct nvme_command *cmd)
{}

static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
{}

static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
				     int nseg)
{}

static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{}

static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{}

static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{}

static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd)
{}

static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
		struct scatterlist *sg)
{}

static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
		dma_addr_t dma_addr, int entries)
{}

static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmd)
{}

static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd,
		struct bio_vec *bv)
{}

static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd,
		struct bio_vec *bv)
{}

static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
		struct nvme_command *cmnd)
{}

static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
		struct nvme_command *cmnd)
{}

static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
{}

/*
 * NOTE: ns is NULL when called on the admin queue.
 */
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
{}

static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
{}

static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
{}

static void nvme_queue_rqs(struct request **rqlist)
{}

static __always_inline void nvme_pci_unmap_rq(struct request *req)
{}

static void nvme_pci_complete_rq(struct request *req)
{}

static void nvme_pci_complete_batch(struct io_comp_batch *iob)
{}

/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{}

static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
{}

static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
{}

static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
				   struct io_comp_batch *iob, u16 idx)
{}

static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{}

static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
			       struct io_comp_batch *iob)
{}

static irqreturn_t nvme_irq(int irq, void *data)
{}

static irqreturn_t nvme_irq_check(int irq, void *data)
{}

/*
 * Poll for completions for any interrupt driven queue
 * Can be called from any context.
 */
static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{}

static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{}

static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{}

static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
{}

static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
		struct nvme_queue *nvmeq, s16 vector)
{}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{}

static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{}

static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{}

static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{}

static enum blk_eh_timer_return nvme_timeout(struct request *req)
{}

static void nvme_free_queue(struct nvme_queue *nvmeq)
{}

static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{}

static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid)
{}

static void nvme_suspend_io_queues(struct nvme_dev *dev)
{}

/*
 * Called only on a device that has been disabled and after all other threads
 * that can check this device's completion queues have synced, except
 * nvme_poll(). This is the last chance for the driver to see a natural
 * completion before nvme_cancel_request() terminates all incomplete requests.
 */
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{}

static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid)
{}

static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
{}

static int queue_request_irq(struct nvme_queue *nvmeq)
{}

static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{}

/*
 * Try getting shutdown_lock while setting up IO queues.
 */
static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
{}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{}

static const struct blk_mq_ops nvme_mq_admin_ops =;

static const struct blk_mq_ops nvme_mq_ops =;

static void nvme_dev_remove_admin(struct nvme_dev *dev)
{}

static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{}

static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
{}

static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
{}

static int nvme_create_io_queues(struct nvme_dev *dev)
{}

static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
{}

static u32 nvme_cmb_size(struct nvme_dev *dev)
{}

static void nvme_map_cmb(struct nvme_dev *dev)
{}

static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{}

static void nvme_free_host_mem(struct nvme_dev *dev)
{}

static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
		u32 chunk_size)
{}

static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{}

static int nvme_setup_host_mem(struct nvme_dev *dev)
{}

static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{}
static DEVICE_ATTR_RO(cmb);

static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{}
static DEVICE_ATTR_RO(cmbloc);

static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{}
static DEVICE_ATTR_RO(cmbsz);

static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
			char *buf)
{}

static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
			 const char *buf, size_t count)
{}
static DEVICE_ATTR_RW(hmb);

static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{}

static struct attribute *nvme_pci_attrs[] =;

static const struct attribute_group nvme_pci_dev_attrs_group =;

static const struct attribute_group *nvme_pci_dev_attr_groups[] =;

static void nvme_update_attrs(struct nvme_dev *dev)
{}

/*
 * nirqs is the number of interrupts available for write and read
 * queues. The core already reserved an interrupt for the admin queue.
 */
static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{}

static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
{}

static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
{}

static int nvme_setup_io_queues(struct nvme_dev *dev)
{}

static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
					     blk_status_t error)
{}

static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
					  blk_status_t error)
{}

static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
{}

static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode)
{}

static void nvme_delete_io_queues(struct nvme_dev *dev)
{}

static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
{}

static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
{}

static int nvme_pci_enable(struct nvme_dev *dev)
{}

static void nvme_dev_unmap(struct nvme_dev *dev)
{}

static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
{}

static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{}

static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
{}

static int nvme_setup_prp_pools(struct nvme_dev *dev)
{}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{}

static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{}

static void nvme_free_tagset(struct nvme_dev *dev)
{}

/* pairs with nvme_pci_alloc_dev */
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{}

static void nvme_reset_work(struct work_struct *work)
{}

static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{}

static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
{}

static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{}

static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{}

static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{}

static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
{}

static const struct nvme_ctrl_ops nvme_pci_ctrl_ops =;

static int nvme_dev_map(struct nvme_dev *dev)
{}

static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
{}

static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
		const struct pci_device_id *id)
{}

static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{}

static void nvme_reset_prepare(struct pci_dev *pdev)
{}

static void nvme_reset_done(struct pci_dev *pdev)
{}

static void nvme_shutdown(struct pci_dev *pdev)
{}

/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
static void nvme_remove(struct pci_dev *pdev)
{}

#ifdef CONFIG_PM_SLEEP
static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
{}

static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
{}

static int nvme_resume(struct device *dev)
{}

static int nvme_suspend(struct device *dev)
{}

static int nvme_simple_suspend(struct device *dev)
{}

static int nvme_simple_resume(struct device *dev)
{}

static const struct dev_pm_ops nvme_dev_pm_ops =;
#endif /* CONFIG_PM_SLEEP */

static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{}

static void nvme_error_resume(struct pci_dev *pdev)
{}

static const struct pci_error_handlers nvme_err_handler =;

static const struct pci_device_id nvme_id_table[] =;
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver =;

static int __init nvme_init(void)
{}

static void __exit nvme_exit(void)
{}

MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_VERSION();
MODULE_DESCRIPTION();
module_init();
module_exit(nvme_exit);