linux/drivers/target/target_core_user.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2013 Shaohua Li <[email protected]>
 * Copyright (C) 2014 Red Hat, Inc.
 * Copyright (C) 2015 Arrikto, Inc.
 * Copyright (C) 2017 Chinamobile, Inc.
 */

#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
#include <linux/xarray.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
#include <linux/configfs.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>

#include <linux/target_core_user.h>

/**
 * DOC: Userspace I/O
 * Userspace I/O
 * -------------
 *
 * Define a shared-memory interface for LIO to pass SCSI commands and
 * data to userspace for processing. This is to allow backends that
 * are too complex for in-kernel support to be possible.
 *
 * It uses the UIO framework to do a lot of the device-creation and
 * introspection work for us.
 *
 * See the .h file for how the ring is laid out. Note that while the
 * command ring is defined, the particulars of the data area are
 * not. Offset values in the command entry point to other locations
 * internal to the mmap-ed area. There is separate space outside the
 * command ring for data buffers. This leaves maximum flexibility for
 * moving buffer allocations, or even page flipping or other
 * allocation techniques, without altering the command ring layout.
 *
 * SECURITY:
 * The user process must be assumed to be malicious. There's no way to
 * prevent it breaking the command ring protocol if it wants, but in
 * order to prevent other issues we must only ever read *data* from
 * the shared memory area, not offsets or sizes. This applies to
 * command ring entries as well as the mailbox. Extra code needed for
 * this may have a 'UAM' comment.
 */

#define TCMU_TIME_OUT

/* For mailbox plus cmd ring, the size is fixed 8MB */
#define MB_CMDR_SIZE_DEF
/* Offset of cmd ring is size of mailbox */
#define CMDR_OFF
#define CMDR_SIZE_DEF

/*
 * For data area, the default block size is PAGE_SIZE and
 * the default total size is 256K * PAGE_SIZE.
 */
#define DATA_PAGES_PER_BLK_DEF
#define DATA_AREA_PAGES_DEF

#define TCMU_MBS_TO_PAGES(_mbs)
#define TCMU_PAGES_TO_MBS(_pages)

/*
 * Default number of global data blocks(512K * PAGE_SIZE)
 * when the unmap thread will be started.
 */
#define TCMU_GLOBAL_MAX_PAGES_DEF

static u8 tcmu_kern_cmd_reply_supported;
static u8 tcmu_netlink_blocked;

static struct device *tcmu_root_device;

struct tcmu_hba {};

#define TCMU_CONFIG_LEN

static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
static LIST_HEAD(tcmu_nl_cmd_list);

struct tcmu_dev;

struct tcmu_nl_cmd {};

struct tcmu_dev {};

#define TCMU_DEV(_se_dev)

struct tcmu_cmd {};

struct tcmu_tmr {};

/*
 * To avoid dead lock the mutex lock order should always be:
 *
 * mutex_lock(&root_udev_mutex);
 * ...
 * mutex_lock(&tcmu_dev->cmdr_lock);
 * mutex_unlock(&tcmu_dev->cmdr_lock);
 * ...
 * mutex_unlock(&root_udev_mutex);
 */
static DEFINE_MUTEX(root_udev_mutex);
static LIST_HEAD(root_udev);

static DEFINE_SPINLOCK(timed_out_udevs_lock);
static LIST_HEAD(timed_out_udevs);

static struct kmem_cache *tcmu_cmd_cache;

static atomic_t global_page_count =;
static struct delayed_work tcmu_unmap_work;
static int tcmu_global_max_pages =;

static int tcmu_set_global_max_data_area(const char *str,
					 const struct kernel_param *kp)
{}

static int tcmu_get_global_max_data_area(char *buffer,
					 const struct kernel_param *kp)
{}

static const struct kernel_param_ops tcmu_global_max_data_area_op =;

module_param_cb();
MODULE_PARM_DESC();

static int tcmu_get_block_netlink(char *buffer,
				  const struct kernel_param *kp)
{}

static int tcmu_set_block_netlink(const char *str,
				  const struct kernel_param *kp)
{}

static const struct kernel_param_ops tcmu_block_netlink_op =;

module_param_cb();
MODULE_PARM_DESC();

static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
{}

static int tcmu_set_reset_netlink(const char *str,
				  const struct kernel_param *kp)
{}

static const struct kernel_param_ops tcmu_reset_netlink_op =;

module_param_cb();
MODULE_PARM_DESC();

/* multicast group */
enum tcmu_multicast_groups {};

static const struct genl_multicast_group tcmu_mcgrps[] =;

static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] =;

static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{}

static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
{}

static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
{}

static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
				       struct genl_info *info)
{}

static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
{}

static const struct genl_small_ops tcmu_genl_ops[] =;

/* Our generic netlink family */
static struct genl_family tcmu_genl_family __ro_after_init =;

#define tcmu_cmd_set_dbi_cur(cmd, index)
#define tcmu_cmd_reset_dbi_cur(cmd)
#define tcmu_cmd_set_dbi(cmd, index)
#define tcmu_cmd_get_dbi(cmd)

static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
{}

static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
				       struct tcmu_cmd *tcmu_cmd,
				       int prev_dbi, int length, int *iov_cnt)
{}

static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
				 struct tcmu_cmd *tcmu_cmd, int length)
{}

static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{}

static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{}

static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
			    struct iovec **iov, int prev_dbi, int len)
{}

static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
			    struct iovec **iov, int data_length)
{}

static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
{}

static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
{}

/*
 * Some ring helper functions. We don't assume size is a power of 2 so
 * we can't use circ_buf.h.
 */
static inline size_t spc_used(size_t head, size_t tail, size_t size)
{}

static inline size_t spc_free(size_t head, size_t tail, size_t size)
{}

static inline size_t head_to_end(size_t head, size_t size)
{}

#define UPDATE_HEAD(head, used, size)

#define TCMU_SG_TO_DATA_AREA
#define TCMU_DATA_AREA_TO_SG

static inline void tcmu_copy_data(struct tcmu_dev *udev,
				  struct tcmu_cmd *tcmu_cmd, uint32_t direction,
				  struct scatterlist *sg, unsigned int sg_nents,
				  struct iovec **iov, size_t data_len)
{}

static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
			      struct iovec **iov)
{}

static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
			     bool bidi, uint32_t read_len)
{}

static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
{}

/*
 * We can't queue a command until we have space available on the cmd ring.
 *
 * Called with ring lock held.
 */
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{}

/*
 * We have to allocate data buffers before we can queue a command.
 * Returns -1 on error (not enough space) or number of needed iovs on success
 *
 * Called with ring lock held.
 */
static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
				  int *iov_bidi_cnt)
{}

static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
{}

static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
					   size_t base_command_size)
{}

static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
				 struct timer_list *timer)
{}

static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{}

static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
{}

static void tcmu_unplug_device(struct se_dev_plug *se_plug)
{}

static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
{}

/**
 * queue_cmd_ring - queue cmd to ring or internally
 * @tcmu_cmd: cmd to queue
 * @scsi_err: TCM error code if failure (-1) returned.
 *
 * Returns:
 * -1 we cannot queue internally or to the ring.
 *  0 success
 *  1 internally queued to wait for ring memory to free.
 */
static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
{}

/**
 * queue_tmr_ring - queue tmr info to ring or internally
 * @udev: related tcmu_dev
 * @tmr: tcmu_tmr containing tmr info to queue
 *
 * Returns:
 *  0 success
 *  1 internally queued to wait for ring memory to free.
 */
static int
queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
{}

static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{}

static void tcmu_set_next_deadline(struct list_head *queue,
				   struct timer_list *timer)
{}

static int
tcmu_tmr_type(enum tcm_tmreq_table tmf)
{}

static void
tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
		struct list_head *cmd_list)
{}

static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
				   struct tcmu_cmd_entry *entry, bool keep_buf)
{}

static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
{}

static bool tcmu_handle_completions(struct tcmu_dev *udev)
{}

static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{}

static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
{}

static void tcmu_device_timedout(struct tcmu_dev *udev)
{}

static void tcmu_cmd_timedout(struct timer_list *t)
{}

static void tcmu_qfull_timedout(struct timer_list *t)
{}

static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
{}

static void tcmu_detach_hba(struct se_hba *hba)
{}

static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
{}

static void tcmu_dev_call_rcu(struct rcu_head *p)
{}

static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{}

static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
				unsigned long last)
{}

static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
{}

static void tcmu_dev_kref_release(struct kref *kref)
{}

static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{}

static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
{}

/*
 * mmap code from uio.c. Copied here because we want to hook mmap()
 * and this stuff must come along.
 */
static int tcmu_find_mem_index(struct vm_area_struct *vma)
{}

static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
{}

static void tcmu_vma_open(struct vm_area_struct *vma)
{}

static void tcmu_vma_close(struct vm_area_struct *vma)
{}

static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct tcmu_vm_ops =;

static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{}

static int tcmu_open(struct uio_info *info, struct inode *inode)
{}

static int tcmu_release(struct uio_info *info, struct inode *inode)
{}

static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{}

static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
{}

static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{}

static int tcmu_netlink_event_init(struct tcmu_dev *udev,
				   enum tcmu_genl_cmd cmd,
				   struct sk_buff **buf, void **hdr)
{}

static int tcmu_netlink_event_send(struct tcmu_dev *udev,
				   enum tcmu_genl_cmd cmd,
				   struct sk_buff *skb, void *msg_header)
{}

static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
{}

static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
{}

static int tcmu_update_uio_info(struct tcmu_dev *udev)
{}

static int tcmu_configure_device(struct se_device *dev)
{}

static void tcmu_free_device(struct se_device *dev)
{}

static void tcmu_destroy_device(struct se_device *dev)
{}

static void tcmu_unblock_dev(struct tcmu_dev *udev)
{}

static void tcmu_block_dev(struct tcmu_dev *udev)
{}

static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
{}

enum {};

static match_table_t tokens =;

static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{}

static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
{}

static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
{}

static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
{}

static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
{}

static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
{}

static sector_t tcmu_get_blocks(struct se_device *dev)
{}

static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{}

static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{}

static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
				       size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
{}

static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
					 const char *page, size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
{}
CONFIGFS_ATTR_RO();

static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
					    char *page)
{}
CONFIGFS_ATTR_RO();

static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
{}
CONFIGFS_ATTR_RO();

static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{}

static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
				      const char *reconfig_data)
{}


static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
				     size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
{}

static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
{}

static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
				   size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
		char *page)
{}

static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
		const char *page, size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
					     char *page)
{}

static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
{}

static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
					      const char *page, size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
{}

static ssize_t tcmu_tmr_notification_store(struct config_item *item,
					   const char *page, size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
{}

static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
				    size_t count)
{}
CONFIGFS_ATTR();

static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
				     size_t count)
{}
CONFIGFS_ATTR_WO();

static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
					size_t count)
{}
CONFIGFS_ATTR_WO();

static struct configfs_attribute *tcmu_attrib_attrs[] =;

static struct configfs_attribute **tcmu_attrs;

static struct configfs_attribute *tcmu_action_attrs[] =;

static struct target_backend_ops tcmu_ops =;

static void find_free_blocks(void)
{}

static void check_timedout_devices(void)
{}

static void tcmu_unmap_work_fn(struct work_struct *work)
{}

static int __init tcmu_module_init(void)
{}

static void __exit tcmu_module_exit(void)
{}

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_LICENSE();

module_init();
module_exit(tcmu_module_exit);