linux/drivers/block/null_blk/main.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Add configfs and memory store: Kyungchan Koh <[email protected]> and
 * Shaohua Li <[email protected]>
 */
#include <linux/module.h>

#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/init.h>
#include "null_blk.h"

#undef pr_fmt
#define pr_fmt(fmt)

#define FREE_BATCH

#define TICKS_PER_SEC
#define TIMER_INTERVAL

#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static DECLARE_FAULT_ATTR(null_timeout_attr);
static DECLARE_FAULT_ATTR(null_requeue_attr);
static DECLARE_FAULT_ATTR(null_init_hctx_attr);
#endif

static inline u64 mb_per_tick(int mbps)
{}

/*
 * Status flags for nullb_device.
 *
 * CONFIGURED:	Device has been configured and turned on. Cannot reconfigure.
 * UP:		Device is currently on and visible in userspace.
 * THROTTLED:	Device is being throttled.
 * CACHE:	Device is using a write-back cache.
 */
enum nullb_device_flags {};

#define MAP_SZ
/*
 * nullb_page is a page in memory for nullb devices.
 *
 * @page:	The page holding the data.
 * @bitmap:	The bitmap represents which sector in the page has data.
 *		Each bit represents one block size. For example, sector 8
 *		will use the 7th bit
 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
 * page is being flushing to storage. FREE means the cache page is freed and
 * should be skipped from flushing to storage. Please see
 * null_make_cache_space
 */
struct nullb_page {};
#define NULLB_PAGE_LOCK
#define NULLB_PAGE_FREE

static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
static struct blk_mq_tag_set tag_set;

enum {};

static bool g_virt_boundary;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC();

static int g_no_sched;
module_param_named(no_sched, g_no_sched, int, 0444);
MODULE_PARM_DESC();

static int g_submit_queues =;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC();

static int g_poll_queues =;
module_param_named(poll_queues, g_poll_queues, int, 0444);
MODULE_PARM_DESC();

static int g_home_node =;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC();

#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
/*
 * For more details about fault injection, please refer to
 * Documentation/fault-injection/fault-injection.rst.
 */
static char g_timeout_str[80];
module_param_string();
MODULE_PARM_DESC();

static char g_requeue_str[80];
module_param_string();
MODULE_PARM_DESC();

static char g_init_hctx_str[80];
module_param_string();
MODULE_PARM_DESC();
#endif

/*
 * Historic queue modes.
 *
 * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
 * enum for error reporting.
 */
enum {};

static int g_queue_mode =;

static int null_param_store_val(const char *str, int *val, int min, int max)
{}

static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
{}

static const struct kernel_param_ops null_queue_mode_param_ops =;

device_param_cb();
MODULE_PARM_DESC();

static int g_gb =;
module_param_named(gb, g_gb, int, 0444);
MODULE_PARM_DESC();

static int g_bs =;
module_param_named(bs, g_bs, int, 0444);
MODULE_PARM_DESC();

static int g_max_sectors;
module_param_named(max_sectors, g_max_sectors, int, 0444);
MODULE_PARM_DESC();

static unsigned int nr_devices =;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC();

static bool g_blocking;
module_param_named(blocking, g_blocking, bool, 0444);
MODULE_PARM_DESC();

static bool g_shared_tags;
module_param_named(shared_tags, g_shared_tags, bool, 0444);
MODULE_PARM_DESC();

static bool g_shared_tag_bitmap;
module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
MODULE_PARM_DESC();

static int g_irqmode =;

static int null_set_irqmode(const char *str, const struct kernel_param *kp)
{}

static const struct kernel_param_ops null_irqmode_param_ops =;

device_param_cb();
MODULE_PARM_DESC();

static unsigned long g_completion_nsec =;
module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
MODULE_PARM_DESC();

static int g_hw_queue_depth =;
module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
MODULE_PARM_DESC();

static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC();

static bool g_memory_backed;
module_param_named(memory_backed, g_memory_backed, bool, 0444);
MODULE_PARM_DESC();

static bool g_discard;
module_param_named(discard, g_discard, bool, 0444);
MODULE_PARM_DESC();

static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
MODULE_PARM_DESC();

static bool g_fua =;
module_param_named(fua, g_fua, bool, 0444);
MODULE_PARM_DESC();

static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
MODULE_PARM_DESC();

static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC();

static unsigned long g_zone_size =;
module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
MODULE_PARM_DESC();

static unsigned long g_zone_capacity;
module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
MODULE_PARM_DESC();

static unsigned int g_zone_nr_conv;
module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
MODULE_PARM_DESC();

static unsigned int g_zone_max_open;
module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
MODULE_PARM_DESC();

static unsigned int g_zone_max_active;
module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
MODULE_PARM_DESC();

static int g_zone_append_max_sectors =;
module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
MODULE_PARM_DESC();

static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC();

static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
static int null_add_dev(struct nullb_device *dev);
static struct nullb *null_find_dev_by_name(const char *name);
static void null_free_device_storage(struct nullb_device *dev, bool is_cache);

static inline struct nullb_device *to_nullb_device(struct config_item *item)
{}

static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
{}

static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
	char *page)
{}

static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
{}

static ssize_t nullb_device_uint_attr_store(unsigned int *val,
	const char *page, size_t count)
{}

static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
	const char *page, size_t count)
{}

static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
	size_t count)
{}

/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY)

static int nullb_update_nr_hw_queues(struct nullb_device *dev,
				     unsigned int submit_queues,
				     unsigned int poll_queues)

{}

static int nullb_apply_submit_queues(struct nullb_device *dev,
				     unsigned int submit_queues)
{}

static int nullb_apply_poll_queues(struct nullb_device *dev,
				   unsigned int poll_queues)
{}

NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
NULLB_DEVICE_ATTR(blocking, bool, NULL);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
NULLB_DEVICE_ATTR(discard, bool, NULL);
NULLB_DEVICE_ATTR(mbps, uint, NULL);
NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(zone_full, bool, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);

static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{}

static ssize_t nullb_device_power_store(struct config_item *item,
				     const char *page, size_t count)
{}

CONFIGFS_ATTR();

static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
{}

static ssize_t nullb_device_badblocks_store(struct config_item *item,
				     const char *page, size_t count)
{}
CONFIGFS_ATTR();

static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
						const char *page, size_t count)
{}
CONFIGFS_ATTR_WO();

static ssize_t nullb_device_zone_offline_store(struct config_item *item,
					       const char *page, size_t count)
{}
CONFIGFS_ATTR_WO();

static struct configfs_attribute *nullb_device_attrs[] =;

static void nullb_device_release(struct config_item *item)
{}

static struct configfs_item_operations nullb_device_ops =;

static const struct config_item_type nullb_device_type =;

#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION

static void nullb_add_fault_config(struct nullb_device *dev)
{}

#else

static void nullb_add_fault_config(struct nullb_device *dev)
{
}

#endif

static struct
config_group *nullb_group_make_group(struct config_group *group, const char *name)
{}

static void
nullb_group_drop_item(struct config_group *group, struct config_item *item)
{}

static ssize_t memb_group_features_show(struct config_item *item, char *page)
{}

CONFIGFS_ATTR_RO();

static struct configfs_attribute *nullb_group_attrs[] =;

static struct configfs_group_operations nullb_group_ops =;

static const struct config_item_type nullb_group_type =;

static struct configfs_subsystem nullb_subsys =;

static inline int null_cache_active(struct nullb *nullb)
{}

static struct nullb_device *null_alloc_dev(void)
{}

static void null_free_dev(struct nullb_device *dev)
{}

static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{}

static void null_cmd_end_timer(struct nullb_cmd *cmd)
{}

static void null_complete_rq(struct request *rq)
{}

static struct nullb_page *null_alloc_page(void)
{}

static void null_free_page(struct nullb_page *t_page)
{}

static bool null_page_empty(struct nullb_page *page)
{}

static void null_free_sector(struct nullb *nullb, sector_t sector,
	bool is_cache)
{}

static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
	struct nullb_page *t_page, bool is_cache)
{}

static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
{}

static struct nullb_page *__null_lookup_page(struct nullb *nullb,
	sector_t sector, bool for_write, bool is_cache)
{}

static struct nullb_page *null_lookup_page(struct nullb *nullb,
	sector_t sector, bool for_write, bool ignore_cache)
{}

static struct nullb_page *null_insert_page(struct nullb *nullb,
					   sector_t sector, bool ignore_cache)
	__releases(&nullb->lock)
	__acquires(&nullb->lock)
{}

static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
{}

static int null_make_cache_space(struct nullb *nullb, unsigned long n)
{}

static int copy_to_nullb(struct nullb *nullb, struct page *source,
	unsigned int off, sector_t sector, size_t n, bool is_fua)
{}

static int copy_from_nullb(struct nullb *nullb, struct page *dest,
	unsigned int off, sector_t sector, size_t n)
{}

static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
			       unsigned int len, unsigned int off)
{}

blk_status_t null_handle_discard(struct nullb_device *dev,
				 sector_t sector, sector_t nr_sectors)
{}

static blk_status_t null_handle_flush(struct nullb *nullb)
{}

static int null_transfer(struct nullb *nullb, struct page *page,
	unsigned int len, unsigned int off, bool is_write, sector_t sector,
	bool is_fua)
{}

static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
{}

static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
{}

static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
						 sector_t sector,
						 sector_t nr_sectors)
{}

static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
						     enum req_op op,
						     sector_t sector,
						     sector_t nr_sectors)
{}

static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
{}

static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{}

blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
			      sector_t sector, unsigned int nr_sectors)
{}

static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
			    sector_t nr_sectors, enum req_op op)
{}

static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
{}

static void nullb_setup_bwtimer(struct nullb *nullb)
{}

#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION

static bool should_timeout_request(struct request *rq)
{}

static bool should_requeue_request(struct request *rq)
{}

static bool should_init_hctx_fail(struct nullb_device *dev)
{}

#else

static bool should_timeout_request(struct request *rq)
{
	return false;
}

static bool should_requeue_request(struct request *rq)
{
	return false;
}

static bool should_init_hctx_fail(struct nullb_device *dev)
{
	return false;
}

#endif

static void null_map_queues(struct blk_mq_tag_set *set)
{}

static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{}

static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{}

static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
				  const struct blk_mq_queue_data *bd)
{}

static void null_queue_rqs(struct request **rqlist)
{}

static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{}

static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
			  unsigned int hctx_idx)
{}

static const struct blk_mq_ops null_mq_ops =;

static void null_del_dev(struct nullb *nullb)
{}

static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
{}

static const struct block_device_operations null_ops =;

static int setup_queues(struct nullb *nullb)
{}

static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
{}

static int null_init_global_tag_set(void)
{}

static int null_setup_tagset(struct nullb *nullb)
{}

static int null_validate_conf(struct nullb_device *dev)
{}

#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static bool __null_setup_fault(struct fault_attr *attr, char *str)
{}
#endif

static bool null_setup_fault(void)
{}

static int null_add_dev(struct nullb_device *dev)
{}

static struct nullb *null_find_dev_by_name(const char *name)
{}

static int null_create_dev(void)
{}

static void null_destroy_dev(struct nullb *nullb)
{}

static int __init null_init(void)
{}

static void __exit null_exit(void)
{}

module_init();
module_exit(null_exit);

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();