linux/drivers/md/dm.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include "dm-core.h"
#include "dm-rq.h"
#include "dm-uevent.h"
#include "dm-ima.h"

#include <linux/bio-integrity.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#include <linux/blk-crypto-profile.h>

#define DM_MSG_PREFIX

/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME
#define DM_COOKIE_LENGTH

/*
 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
 * dm_io into one list, and reuse bio->bi_private as the list head. Before
 * ending this fs bio, we will recover its ->bi_private.
 */
#define REQ_DM_POLL_LIST

static const char *_name =;

static unsigned int major;
static unsigned int _major;

static DEFINE_IDR(_minor_idr);

static DEFINE_SPINLOCK(_minor_lock);

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

static struct workqueue_struct *deferred_remove_workqueue;

atomic_t dm_global_event_nr =;
DECLARE_WAIT_QUEUE_HEAD();

void dm_issue_global_event(void)
{}

DEFINE_STATIC_KEY_FALSE(stats_enabled);
DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
DEFINE_STATIC_KEY_FALSE(zoned_enabled);

/*
 * One of these is allocated (on-stack) per original bio.
 */
struct clone_info {};

static inline struct dm_target_io *clone_to_tio(struct bio *clone)
{}

void *dm_per_bio_data(struct bio *bio, size_t data_size)
{}
EXPORT_SYMBOL_GPL();

struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{}
EXPORT_SYMBOL_GPL();

unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
{}
EXPORT_SYMBOL_GPL();

#define MINOR_ALLOCED

#define DM_NUMA_NODE
static int dm_numa_node =;

#define DEFAULT_SWAP_BIOS
static int swap_bios =;
static int get_swap_bios(void)
{}

struct table_device {};

/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
#define RESERVED_BIO_BASED_IOS
static unsigned int reserved_bio_based_ios =;

static int __dm_get_module_param_int(int *module_param, int min, int max)
{}

unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
{}

unsigned int dm_get_reserved_bio_based_ios(void)
{}
EXPORT_SYMBOL_GPL();

static unsigned int dm_get_numa_node(void)
{}

static int __init local_init(void)
{}

static void local_exit(void)
{}

static int (*_inits[])(void) __initdata =;

static void (*_exits[])(void) =;

static int __init dm_init(void)
{}

static void __exit dm_exit(void)
{}

/*
 * Block device functions
 */
int dm_deleting_md(struct mapped_device *md)
{}

static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
{}

static void dm_blk_close(struct gendisk *disk)
{}

int dm_open_count(struct mapped_device *md)
{}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{}

int dm_cancel_deferred_remove(struct mapped_device *md)
{}

static void do_deferred_remove(struct work_struct *w)
{}

static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{}

static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
			    struct block_device **bdev)
{}

static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
{}

static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
			unsigned int cmd, unsigned long arg)
{}

u64 dm_start_time_ns_from_clone(struct bio *bio)
{}
EXPORT_SYMBOL_GPL();

static inline bool bio_is_flush_with_data(struct bio *bio)
{}

static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{}

static void dm_io_acct(struct dm_io *io, bool end)
{}

static void __dm_start_io_acct(struct dm_io *io)
{}

static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
{}

static void dm_end_io_acct(struct dm_io *io)
{}

static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
{}

static void free_io(struct dm_io *io)
{}

static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
{}

static void free_tio(struct bio *clone)
{}

/*
 * Add the bio to the list of deferred io.
 */
static void queue_io(struct mapped_device *md, struct bio *bio)
{}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
 * dm_put_live_table() when finished.
 */
struct dm_table *dm_get_live_table(struct mapped_device *md,
				   int *srcu_idx) __acquires(md->io_barrier)
{}

void dm_put_live_table(struct mapped_device *md,
		       int srcu_idx) __releases(md->io_barrier)
{}

void dm_sync_table(struct mapped_device *md)
{}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{}

static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{}

static char *_dm_claim_ptr =;

/*
 * Open a table device so we can use it as a map destination.
 */
static struct table_device *open_table_device(struct mapped_device *md,
		dev_t dev, blk_mode_t mode)
{}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
					      blk_mode_t mode)
{}

int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
			struct dm_dev **result)
{}

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{}

/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{}

static int __noflush_suspending(struct mapped_device *md)
{}

static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
{}

static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
{}

/*
 * Return true if the dm_io's original bio is requeued.
 * io->status is updated with error if requeue disallowed.
 */
static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
{}

static void __dm_io_complete(struct dm_io *io, bool first_stage)
{}

static void dm_wq_requeue_work(struct work_struct *work)
{}

/*
 * Two staged requeue:
 *
 * 1) io->orig_bio points to the real original bio, and the part mapped to
 *    this io must be requeued, instead of other parts of the original bio.
 *
 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
 */
static void dm_io_complete(struct dm_io *io)
{}

/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
static inline void __dm_io_dec_pending(struct dm_io *io)
{}

static void dm_io_set_error(struct dm_io *io, blk_status_t error)
{}

static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
{}

/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
 */
static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{}

void disable_discard(struct mapped_device *md)
{}

void disable_write_zeroes(struct mapped_device *md)
{}

static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{}

static void clone_endio(struct bio *bio)
{}

/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
						  sector_t target_offset)
{}

static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
			     unsigned int max_granularity,
			     unsigned int max_sectors)
{}

static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
{}

int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{}
EXPORT_SYMBOL_GPL();

static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
						sector_t sector, int *srcu_idx)
	__acquires(md->io_barrier)
{}

static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
		long nr_pages, enum dax_access_mode mode, void **kaddr,
		pfn_t *pfn)
{}

static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
				  size_t nr_pages)
{}

static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{}

/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
 * __send_duplicate_bios().
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <----- bio_sectors ----->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
{}
EXPORT_SYMBOL_GPL();

/*
 * @clone: clone bio that DM core passed to target's .map function
 * @tgt_clone: clone of @clone bio that target needs submitted
 *
 * Targets should use this interface to submit bios they take
 * ownership of when returning DM_MAPIO_SUBMITTED.
 *
 * Target should also enable ti->accounts_remapped_io
 */
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
{}
EXPORT_SYMBOL_GPL();

static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
{}

static void __map_bio(struct bio *clone)
{}

static void setup_split_accounting(struct clone_info *ci, unsigned int len)
{}

static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
				struct dm_target *ti, unsigned int num_bios,
				unsigned *len, gfp_t gfp_flag)
{}

static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
					  unsigned int num_bios, unsigned int *len,
					  gfp_t gfp_flag)
{}

static void __send_empty_flush(struct clone_info *ci)
{}

static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
			       unsigned int num_bios, unsigned int max_granularity,
			       unsigned int max_sectors)
{}

static bool is_abnormal_io(struct bio *bio)
{}

static blk_status_t __process_abnormal_io(struct clone_info *ci,
					  struct dm_target *ti)
{}

/*
 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
 * associated with this bio, and this bio's bi_private needs to be
 * stored in dm_io->data before the reuse.
 *
 * bio->bi_private is owned by fs or upper layer, so block layer won't
 * touch it after splitting. Meantime it won't be changed by anyone after
 * bio is submitted. So this reuse is safe.
 */
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
{}

static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
{}

/*
 * Select the correct strategy for processing a non-flush bio.
 */
static blk_status_t __split_and_process_bio(struct clone_info *ci)
{}

static void init_clone_info(struct clone_info *ci, struct dm_io *io,
			    struct dm_table *map, struct bio *bio, bool is_abnormal)
{}

#ifdef CONFIG_BLK_DEV_ZONED
static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
					   struct bio *bio)
{}
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{}

static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
						   struct dm_target *ti)
{}

static void __send_zone_reset_all_native(struct clone_info *ci,
					 struct dm_target *ti)
{}

static blk_status_t __send_zone_reset_all(struct clone_info *ci)
{}

#else
static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
					   struct bio *bio)
{
	return false;
}
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
	return false;
}
static blk_status_t __send_zone_reset_all(struct clone_info *ci)
{
	return BLK_STS_NOTSUPP;
}
#endif

/*
 * Entry point to split a bio into clones and submit them to the targets.
 */
static void dm_split_and_process_bio(struct mapped_device *md,
				     struct dm_table *map, struct bio *bio)
{}

static void dm_submit_bio(struct bio *bio)
{}

static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
			  unsigned int flags)
{}

static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
		       unsigned int flags)
{}

/*
 *---------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------
 */
static void free_minor(int minor)
{}

/*
 * See if the device with a specific minor # is free.
 */
static int specific_minor(int minor)
{}

static int next_free_minor(int *minor)
{}

static const struct block_device_operations dm_blk_dops;
static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops;

static void dm_wq_work(struct work_struct *work);

#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{}

#else /* CONFIG_BLK_INLINE_ENCRYPTION */

static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */

static void cleanup_mapped_device(struct mapped_device *md)
{}

/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{}

static void unlock_fs(struct mapped_device *md);

static void free_dev(struct mapped_device *md)
{}

/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{}

/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
{}

/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
{}

/*
 * Constructor for a new device.
 */
int dm_create(int minor, struct mapped_device **result)
{}

/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{}

void dm_unlock_md_type(struct mapped_device *md)
{}

void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{}

enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{}

struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{}

/*
 * Setup the DM device's queue based on md's type
 */
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{}

struct mapped_device *dm_get_md(dev_t dev)
{}
EXPORT_SYMBOL_GPL();

void *dm_get_mdptr(struct mapped_device *md)
{}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{}

void dm_get(struct mapped_device *md)
{}

int dm_hold(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

const char *dm_device_name(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

static void __dm_destroy(struct mapped_device *md, bool wait)
{}

void dm_destroy(struct mapped_device *md)
{}

void dm_destroy_immediate(struct mapped_device *md)
{}

void dm_put(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

static bool dm_in_flight_bios(struct mapped_device *md)
{}

static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
{}

static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
{}

/*
 * Process the deferred bios
 */
static void dm_wq_work(struct work_struct *work)
{}

static void dm_queue_flush(struct mapped_device *md)
{}

/*
 * Swap in a new table, returning the old one for the caller to destroy.
 */
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
static int lock_fs(struct mapped_device *md)
{}

static void unlock_fs(struct mapped_device *md)
{}

/*
 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
 *
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
 */
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
			unsigned int suspend_flags, unsigned int task_state,
			int dmf_suspended_flag)
{}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
{}

static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{}

int dm_resume(struct mapped_device *md)
{}

/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
{}

static void __dm_internal_resume(struct mapped_device *md)
{}

void dm_internal_suspend_noflush(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

void dm_internal_resume(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

void dm_internal_resume_fast(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

/*
 *---------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------
 */
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
		      unsigned int cookie, bool need_resize_uevent)
{}

uint32_t dm_next_uevent_seq(struct mapped_device *md)
{}

uint32_t dm_get_event_nr(struct mapped_device *md)
{}

int dm_wait_event(struct mapped_device *md, int event_nr)
{}

void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{}

/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{}
EXPORT_SYMBOL_GPL();

struct kobject *dm_kobject(struct mapped_device *md)
{}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{}

int dm_suspended_md(struct mapped_device *md)
{}

static int dm_post_suspending_md(struct mapped_device *md)
{}

int dm_suspended_internally_md(struct mapped_device *md)
{}

int dm_test_deferred_remove_flag(struct mapped_device *md)
{}

int dm_suspended(struct dm_target *ti)
{}
EXPORT_SYMBOL_GPL();

int dm_post_suspending(struct dm_target *ti)
{}
EXPORT_SYMBOL_GPL();

int dm_noflush_suspending(struct dm_target *ti)
{}
EXPORT_SYMBOL_GPL();

void dm_free_md_mempools(struct dm_md_mempools *pools)
{}

struct dm_pr {};

static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
		      struct dm_pr *pr)
{}

/*
 * For register / unregister we need to manually call out to every path.
 */
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{}

static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
			  u32 flags)
{}


static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
			   sector_t start, sector_t len, void *data)
{}

static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
			 u32 flags)
{}

/*
 * If there is a non-All Registrants type of reservation, the release must be
 * sent down the holding path. For the cases where there is no reservation or
 * the path is not the holder the device will also return success, so we must
 * try each path to make sure we got the correct path.
 */
static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
			   sector_t start, sector_t len, void *data)
{}

static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{}

static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
			   sector_t start, sector_t len, void *data)
{}

static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
			 enum pr_type type, bool abort)
{}

static int dm_pr_clear(struct block_device *bdev, u64 key)
{}

static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
			     sector_t start, sector_t len, void *data)
{}

static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
{}

static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
				    sector_t start, sector_t len, void *data)
{}

static int dm_pr_read_reservation(struct block_device *bdev,
				  struct pr_held_reservation *rsv)
{}

static const struct pr_ops dm_pr_ops =;

static const struct block_device_operations dm_blk_dops =;

static const struct block_device_operations dm_rq_blk_dops =;

static const struct dax_operations dm_dax_ops =;

/*
 * module hooks
 */
module_init();
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC();

module_param(reserved_bio_based_ios, uint, 0644);
MODULE_PARM_DESC();

module_param(dm_numa_node, int, 0644);
MODULE_PARM_DESC();

module_param(swap_bios, int, 0644);
MODULE_PARM_DESC();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();