linux/drivers/nvme/host/apple.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Apple ANS NVM Express device driver
 * Copyright The Asahi Linux Contributors
 *
 * Based on the pci.c NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 * and on the rdma.c NVMe over Fabrics RDMA host code.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */

#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/once.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/soc/apple/rtkit.h>
#include <linux/soc/apple/sart.h>
#include <linux/reset.h>
#include <linux/time64.h>

#include "nvme.h"

#define APPLE_ANS_BOOT_TIMEOUT
#define APPLE_ANS_MAX_QUEUE_DEPTH

#define APPLE_ANS_COPROC_CPU_CONTROL
#define APPLE_ANS_COPROC_CPU_CONTROL_RUN

#define APPLE_ANS_ACQ_DB
#define APPLE_ANS_IOCQ_DB

#define APPLE_ANS_MAX_PEND_CMDS_CTRL

#define APPLE_ANS_BOOT_STATUS
#define APPLE_ANS_BOOT_STATUS_OK

#define APPLE_ANS_UNKNOWN_CTRL
#define APPLE_ANS_PRP_NULL_CHECK

#define APPLE_ANS_LINEAR_SQ_CTRL
#define APPLE_ANS_LINEAR_SQ_EN

#define APPLE_ANS_LINEAR_ASQ_DB
#define APPLE_ANS_LINEAR_IOSQ_DB

#define APPLE_NVMMU_NUM_TCBS
#define APPLE_NVMMU_ASQ_TCB_BASE
#define APPLE_NVMMU_IOSQ_TCB_BASE
#define APPLE_NVMMU_TCB_INVAL
#define APPLE_NVMMU_TCB_STAT

/*
 * This controller is a bit weird in the way command tags works: Both the
 * admin and the IO queue share the same tag space. Additionally, tags
 * cannot be higher than 0x40 which effectively limits the combined
 * queue depth to 0x40. Instead of wasting half of that on the admin queue
 * which gets much less traffic we instead reduce its size here.
 * The controller also doesn't support async event such that no space must
 * be reserved for NVME_NR_AEN_COMMANDS.
 */
#define APPLE_NVME_AQ_DEPTH
#define APPLE_NVME_AQ_MQ_TAG_DEPTH

/*
 * These can be higher, but we need to ensure that any command doesn't
 * require an sg allocation that needs more than a page of data.
 */
#define NVME_MAX_KB_SZ
#define NVME_MAX_SEGS

/*
 * This controller comes with an embedded IOMMU known as NVMMU.
 * The NVMMU is pointed to an array of TCBs indexed by the command tag.
 * Each command must be configured inside this structure before it's allowed
 * to execute, including commands that don't require DMA transfers.
 *
 * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
 * admin queue): Those commands must still be added to the NVMMU but the DMA
 * buffers cannot be represented as PRPs and must instead be allowed using SART.
 *
 * Programming the PRPs to the same values as those in the submission queue
 * looks rather silly at first. This hardware is however designed for a kernel
 * that runs the NVMMU code in a higher exception level than the NVMe driver.
 * In that setting the NVMe driver first programs the submission queue entry
 * and then executes a hypercall to the code that is allowed to program the
 * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
 * verifying that they don't point to kernel text, data, pagetables, or similar
 * protected areas before programming the TCB to point to this shadow copy.
 * Since Linux doesn't do any of that we may as well just point both the queue
 * and the TCB PRP pointer to the same memory.
 */
struct apple_nvmmu_tcb {};

/*
 * The Apple NVMe controller only supports a single admin and a single IO queue
 * which are both limited to 64 entries and share a single interrupt.
 *
 * The completion queue works as usual. The submission "queue" instead is
 * an array indexed by the command tag on this hardware. Commands must also be
 * present in the NVMMU's tcb array. They are triggered by writing their tag to
 * a MMIO register.
 */
struct apple_nvme_queue {};

/*
 * The apple_nvme_iod describes the data in an I/O.
 *
 * The sg pointer contains the list of PRP chunk allocations in addition
 * to the actual struct scatterlist.
 */
struct apple_nvme_iod {};

struct apple_nvme {};

static_assert();
static_assert();

static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
{}

static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
{}

static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
{}

static void apple_nvme_rtkit_crashed(void *cookie)
{}

static int apple_nvme_sart_dma_setup(void *cookie,
				     struct apple_rtkit_shmem *bfr)
{}

static void apple_nvme_sart_dma_destroy(void *cookie,
					struct apple_rtkit_shmem *bfr)
{}

static const struct apple_rtkit_ops apple_nvme_rtkit_ops =;

static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
{}

static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
				  struct nvme_command *cmd)
{}

/*
 * From pci.c:
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static inline size_t apple_nvme_iod_alloc_size(void)
{}

static void **apple_nvme_iod_list(struct request *req)
{}

static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
{}

static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
{}

static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
{}

static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
					  struct request *req,
					  struct nvme_rw_command *cmnd)
{}

static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
						struct request *req,
						struct nvme_rw_command *cmnd,
						struct bio_vec *bv)
{}

static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
					struct request *req,
					struct nvme_command *cmnd)
{}

static __always_inline void apple_nvme_unmap_rq(struct request *req)
{}

static void apple_nvme_complete_rq(struct request *req)
{}

static void apple_nvme_complete_batch(struct io_comp_batch *iob)
{}

static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
{}

static inline struct blk_mq_tags *
apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
{}

static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
					 struct io_comp_batch *iob, u16 idx)
{}

static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
{}

static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
			       struct io_comp_batch *iob)
{}

static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
{}

static irqreturn_t apple_nvme_irq(int irq, void *data)
{}

static int apple_nvme_create_cq(struct apple_nvme *anv)
{}

static int apple_nvme_remove_cq(struct apple_nvme *anv)
{}

static int apple_nvme_create_sq(struct apple_nvme *anv)
{}

static int apple_nvme_remove_sq(struct apple_nvme *anv)
{}

static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
					const struct blk_mq_queue_data *bd)
{}

static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
{}

static int apple_nvme_init_request(struct blk_mq_tag_set *set,
				   struct request *req, unsigned int hctx_idx,
				   unsigned int numa_node)
{}

static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{}

static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
{}

static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
			   struct io_comp_batch *iob)
{}

static const struct blk_mq_ops apple_nvme_mq_admin_ops =;

static const struct blk_mq_ops apple_nvme_mq_ops =;

static void apple_nvme_init_queue(struct apple_nvme_queue *q)
{}

static void apple_nvme_reset_work(struct work_struct *work)
{}

static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
{}

static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{}

static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
{}

static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{}

static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{}

static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
{}

static const struct nvme_ctrl_ops nvme_ctrl_ops =;

static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
{}

static void devm_apple_nvme_put_tag_set(void *data)
{}

static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
{}

static int apple_nvme_queue_alloc(struct apple_nvme *anv,
				  struct apple_nvme_queue *q)
{}

static void apple_nvme_detach_genpd(struct apple_nvme *anv)
{}

static int apple_nvme_attach_genpd(struct apple_nvme *anv)
{}

static void devm_apple_nvme_mempool_destroy(void *data)
{}

static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
{}

static int apple_nvme_probe(struct platform_device *pdev)
{}

static void apple_nvme_remove(struct platform_device *pdev)
{}

static void apple_nvme_shutdown(struct platform_device *pdev)
{}

static int apple_nvme_resume(struct device *dev)
{}

static int apple_nvme_suspend(struct device *dev)
{}

static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
				apple_nvme_resume);

static const struct of_device_id apple_nvme_of_match[] =;
MODULE_DEVICE_TABLE(of, apple_nvme_of_match);

static struct platform_driver apple_nvme_driver =;
module_platform_driver();

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();