linux/drivers/mmc/core/block.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Block driver for media (i.e., flash cards)
 *
 * Copyright 2002 Hewlett-Packard Company
 * Copyright 2005-2008 Pierre Ossman
 *
 * Use consistent with the GNU GPL is permitted,
 * provided that this copyright notice is
 * preserved in its entirety in all copies and derived works.
 *
 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
 * FITNESS FOR ANY PARTICULAR PURPOSE.
 *
 * Many thanks to Alessandro Rubini and Jonathan Corbet!
 *
 * Author:  Andrew Christian
 *          28 May 2002
 */
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/init.h>

#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/kref.h>
#include <linux/blkdev.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/debugfs.h>

#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>

#include <linux/uaccess.h>

#include "queue.h"
#include "block.h"
#include "core.h"
#include "card.h"
#include "crypto.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "quirks.h"
#include "sd_ops.h"

MODULE_ALIAS();
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX

/*
 * Set a 10 second timeout for polling write request busy state. Note, mmc core
 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
 * second software timer to timeout the whole request, so 10 seconds should be
 * ample.
 */
#define MMC_BLK_TIMEOUT_MS
#define MMC_EXTRACT_INDEX_FROM_ARG(x)
#define MMC_EXTRACT_VALUE_FROM_ARG(x)

static DEFINE_MUTEX(block_mutex);

/*
 * The defaults come from config options but can be overriden by module
 * or bootarg options.
 */
static int perdev_minors =;

/*
 * We've only got one major, so number of mmcblk devices is
 * limited to (1 << 20) / number of minors per device.  It is also
 * limited by the MAX_DEVICES below.
 */
static int max_devices;

#define MAX_DEVICES

static DEFINE_IDA(mmc_blk_ida);
static DEFINE_IDA(mmc_rpmb_ida);

struct mmc_blk_busy_data {};

/*
 * There is one mmc_blk_data per slot.
 */
struct mmc_blk_data {};

/* Device type for RPMB character devices */
static dev_t mmc_rpmb_devt;

/* Bus type for RPMB character devices */
static const struct bus_type mmc_rpmb_bus_type =;

/**
 * struct mmc_rpmb_data - special RPMB device type for these areas
 * @dev: the device for the RPMB area
 * @chrdev: character device for the RPMB area
 * @id: unique device ID number
 * @part_index: partition index (0 on first)
 * @md: parent MMC block device
 * @node: list item, so we can put this device on a list
 */
struct mmc_rpmb_data {};

static DEFINE_MUTEX(open_lock);

module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC();

static inline int mmc_blk_part_switch(struct mmc_card *card,
				      unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
			       struct mmc_card *card,
			       int recovery_mode,
			       struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static int mmc_spi_err_check(struct mmc_card *card);
static int mmc_blk_busy_cb(void *cb_data, bool *busy);

static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{}

static inline int mmc_get_devidx(struct gendisk *disk)
{}

static void mmc_blk_kref_release(struct kref *ref)
{}

static void mmc_blk_put(struct mmc_blk_data *md)
{}

static ssize_t power_ro_lock_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t power_ro_lock_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}

static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
		power_ro_lock_show, power_ro_lock_store);

static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{}

static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
			      const char *buf, size_t count)
{}

static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);

static struct attribute *mmc_disk_attrs[] =;

static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
		struct attribute *a, int n)
{}

static const struct attribute_group mmc_disk_attr_group =;

static const struct attribute_group *mmc_disk_attr_groups[] =;

static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
{}

static void mmc_blk_release(struct gendisk *disk)
{}

static int
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{}

struct mmc_blk_ioc_data {};

static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
	struct mmc_ioc_cmd __user *user)
{}

static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
				      struct mmc_blk_ioc_data *idata)
{}

static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
			       struct mmc_blk_ioc_data **idatas, int i)
{}

static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
			     struct mmc_ioc_cmd __user *ic_ptr,
			     struct mmc_rpmb_data *rpmb)
{}

static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
				   struct mmc_ioc_multi_cmd __user *user,
				   struct mmc_rpmb_data *rpmb)
{}

static int mmc_blk_check_blkdev(struct block_device *bdev)
{}

static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
	unsigned int cmd, unsigned long arg)
{}

#ifdef CONFIG_COMPAT
static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
	unsigned int cmd, unsigned long arg)
{}
#endif

static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
					  sector_t *sector)
{}

static const struct block_device_operations mmc_bdops =;

static int mmc_blk_part_switch_pre(struct mmc_card *card,
				   unsigned int part_type)
{}

static int mmc_blk_part_switch_post(struct mmc_card *card,
				    unsigned int part_type)
{}

static inline int mmc_blk_part_switch(struct mmc_card *card,
				      unsigned int part_type)
{}

static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
{}

static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
{}

static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
					    struct mmc_data *data)
{}

/*
 * Attempts to reset the card and get back to the requested partition.
 * Therefore any error here must result in cancelling the block layer
 * request, it must not be reattempted without going through the mmc_blk
 * partition sanity checks.
 */
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
			 int type)
{}

static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
{}

static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
{}

/*
 * The non-block commands come back from the block layer after it queued it and
 * processed it with all other requests and then they get issued in this
 * function.
 */
static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
{}

static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
				   int type, unsigned int erase_arg)
{}

static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
{}

static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{}

static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
				       struct request *req)
{}

static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{}

/*
 * Reformat current write as a reliable write, supporting
 * both legacy and the enhanced reliable write MMC cards.
 * In each transfer we'll handle only as much as a single
 * reliable write can handle, thus finish the request in
 * partial completions.
 */
static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
				    struct mmc_card *card,
				    struct request *req)
{}

#define CMD_ERRORS_EXCL_OOR

#define CMD_ERRORS	\

static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{}

static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
			      int recovery_mode, bool *do_rel_wr_p,
			      bool *do_data_tag_p)
{}

#define MMC_CQE_RETRIES

static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
{}

void mmc_blk_cqe_recovery(struct mmc_queue *mq)
{}

static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
{}

static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
{}

static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
						 struct request *req)
{}

static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
{}

static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{}

static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{}

static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
			       struct mmc_card *card,
			       int recovery_mode,
			       struct mmc_queue *mq)
{}

#define MMC_MAX_RETRIES
#define MMC_DATA_RETRIES
#define MMC_NO_RETRIES

static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
{}

static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{}

#define MMC_READ_SINGLE_RETRIES

/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{}

static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{}

static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
{}

/*
 * Check for errors the host controller driver might not have seen such as
 * response mode errors or invalid card state.
 */
static bool mmc_blk_status_error(struct request *req, u32 status)
{}

static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
{}

/*
 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
 * policy:
 * 1. A request that has transferred at least some data is considered
 * successful and will be requeued if there is remaining data to
 * transfer.
 * 2. Otherwise the number of retries is incremented and the request
 * will be requeued if there are remaining retries.
 * 3. Otherwise the request will be errored out.
 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
 * mqrq->retries. So there are only 4 possible actions here:
 *	1. do not accept the bytes_xfered value i.e. set it to zero
 *	2. change mqrq->retries to determine the number of retries
 *	3. try to reset the card
 *	4. read one sector at a time
 */
static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{}

static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{}

static int mmc_spi_err_check(struct mmc_card *card)
{}

static int mmc_blk_busy_cb(void *cb_data, bool *busy)
{}

static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{}

static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
					    struct request *req)
{}

static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
{}

static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
					struct mmc_queue_req *mqrq)
{}

static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
				 struct mmc_queue_req *mqrq)
{}

static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
{}

void mmc_blk_mq_complete(struct request *req)
{}

static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
				       struct request *req)
{}

static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{}

static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
				bool can_sleep)
{}

void mmc_blk_mq_recovery(struct mmc_queue *mq)
{}

static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
					 struct request **prev_req)
{}

void mmc_blk_mq_complete_work(struct work_struct *work)
{}

static void mmc_blk_mq_req_done(struct mmc_request *mrq)
{}

static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
{}

static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
{}

static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
				  struct request *req)
{}

static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
{}

enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{}

static inline int mmc_blk_readonly(struct mmc_card *card)
{}

static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
					      struct device *parent,
					      sector_t size,
					      bool default_ro,
					      const char *subname,
					      int area_type,
					      unsigned int part_type)
{}

static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{}

static int mmc_blk_alloc_part(struct mmc_card *card,
			      struct mmc_blk_data *md,
			      unsigned int part_type,
			      sector_t size,
			      bool default_ro,
			      const char *subname,
			      int area_type)
{}

/**
 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
 * @filp: the character device file
 * @cmd: the ioctl() command
 * @arg: the argument from userspace
 *
 * This will essentially just redirect the ioctl()s coming in over to
 * the main block device spawning the RPMB character device.
 */
static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
			   unsigned long arg)
{}

#ifdef CONFIG_COMPAT
static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
			      unsigned long arg)
{}
#endif

static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
{}

static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
{}

static const struct file_operations mmc_rpmb_fileops =;

static void mmc_blk_rpmb_device_release(struct device *dev)
{}

static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
				   struct mmc_blk_data *md,
				   unsigned int part_index,
				   sector_t size,
				   const char *subname)
{}

static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)

{}

/* MMC Physical partitions consist of two boot partitions and
 * up to four general purpose partitions.
 * For each partition enabled in EXT_CSD a block device will be allocatedi
 * to provide access to the partition.
 */

static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{}

static void mmc_blk_remove_req(struct mmc_blk_data *md)
{}

static void mmc_blk_remove_parts(struct mmc_card *card,
				 struct mmc_blk_data *md)
{}

#ifdef CONFIG_DEBUG_FS

static int mmc_dbg_card_status_get(void *data, u64 *val)
{}
DEFINE_DEBUGFS_ATTRIBUTE();

/* That is two digits * 512 + 1 for newline */
#define EXT_CSD_STR_LEN

static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
{}

static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
				size_t cnt, loff_t *ppos)
{}

static int mmc_ext_csd_release(struct inode *inode, struct file *file)
{}

static const struct file_operations mmc_dbg_ext_csd_fops =;

static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{}

static void mmc_blk_remove_debugfs(struct mmc_card *card,
				   struct mmc_blk_data *md)
{}

#else

static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
}

static void mmc_blk_remove_debugfs(struct mmc_card *card,
				   struct mmc_blk_data *md)
{
}

#endif /* CONFIG_DEBUG_FS */

static int mmc_blk_probe(struct mmc_card *card)
{}

static void mmc_blk_remove(struct mmc_card *card)
{}

static int _mmc_blk_suspend(struct mmc_card *card)
{}

static void mmc_blk_shutdown(struct mmc_card *card)
{}

#ifdef CONFIG_PM_SLEEP
static int mmc_blk_suspend(struct device *dev)
{}

static int mmc_blk_resume(struct device *dev)
{}
#endif

static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);

static struct mmc_driver mmc_driver =;

static int __init mmc_blk_init(void)
{}

static void __exit mmc_blk_exit(void)
{}

module_init();
module_exit(mmc_blk_exit);

MODULE_LICENSE();
MODULE_DESCRIPTION();