#include "dm-core.h"
#include "dm-rq.h"
#include "dm-uevent.h"
#include "dm-ima.h"
#include <linux/bio-integrity.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#include <linux/blk-crypto-profile.h>
#define DM_MSG_PREFIX …
#define DM_COOKIE_ENV_VAR_NAME …
#define DM_COOKIE_LENGTH …
#define REQ_DM_POLL_LIST …
static const char *_name = …;
static unsigned int major;
static unsigned int _major;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
static void do_deferred_remove(struct work_struct *w);
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = …;
DECLARE_WAIT_QUEUE_HEAD(…);
void dm_issue_global_event(void)
{ … }
DEFINE_STATIC_KEY_FALSE(stats_enabled);
DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
DEFINE_STATIC_KEY_FALSE(zoned_enabled);
struct clone_info { … };
static inline struct dm_target_io *clone_to_tio(struct bio *clone)
{ … }
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{ … }
EXPORT_SYMBOL_GPL(…);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
{ … }
EXPORT_SYMBOL_GPL(…);
#define MINOR_ALLOCED …
#define DM_NUMA_NODE …
static int dm_numa_node = …;
#define DEFAULT_SWAP_BIOS …
static int swap_bios = …;
static int get_swap_bios(void)
{ … }
struct table_device { … };
#define RESERVED_BIO_BASED_IOS …
static unsigned int reserved_bio_based_ios = …;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{ … }
unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
{ … }
unsigned int dm_get_reserved_bio_based_ios(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static unsigned int dm_get_numa_node(void)
{ … }
static int __init local_init(void)
{ … }
static void local_exit(void)
{ … }
static int (*_inits[])(void) __initdata = …;
static void (*_exits[])(void) = …;
static int __init dm_init(void)
{ … }
static void __exit dm_exit(void)
{ … }
int dm_deleting_md(struct mapped_device *md)
{ … }
static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
{ … }
static void dm_blk_close(struct gendisk *disk)
{ … }
int dm_open_count(struct mapped_device *md)
{ … }
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{ … }
int dm_cancel_deferred_remove(struct mapped_device *md)
{ … }
static void do_deferred_remove(struct work_struct *w)
{ … }
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{ … }
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
{ … }
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
{ … }
static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{ … }
u64 dm_start_time_ns_from_clone(struct bio *bio)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline bool bio_is_flush_with_data(struct bio *bio)
{ … }
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{ … }
static void dm_io_acct(struct dm_io *io, bool end)
{ … }
static void __dm_start_io_acct(struct dm_io *io)
{ … }
static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
{ … }
static void dm_end_io_acct(struct dm_io *io)
{ … }
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
{ … }
static void free_io(struct dm_io *io)
{ … }
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
{ … }
static void free_tio(struct bio *clone)
{ … }
static void queue_io(struct mapped_device *md, struct bio *bio)
{ … }
struct dm_table *dm_get_live_table(struct mapped_device *md,
int *srcu_idx) __acquires(md->io_barrier)
{ … }
void dm_put_live_table(struct mapped_device *md,
int srcu_idx) __releases(md->io_barrier)
{ … }
void dm_sync_table(struct mapped_device *md)
{ … }
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{ … }
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{ … }
static char *_dm_claim_ptr = …;
static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{ … }
static void close_table_device(struct table_device *td, struct mapped_device *md)
{ … }
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
blk_mode_t mode)
{ … }
int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
struct dm_dev **result)
{ … }
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{ … }
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{ … }
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{ … }
static int __noflush_suspending(struct mapped_device *md)
{ … }
static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
{ … }
static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
{ … }
static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
{ … }
static void __dm_io_complete(struct dm_io *io, bool first_stage)
{ … }
static void dm_wq_requeue_work(struct work_struct *work)
{ … }
static void dm_io_complete(struct dm_io *io)
{ … }
static inline void __dm_io_dec_pending(struct dm_io *io)
{ … }
static void dm_io_set_error(struct dm_io *io, blk_status_t error)
{ … }
static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
{ … }
static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{ … }
void disable_discard(struct mapped_device *md)
{ … }
void disable_write_zeroes(struct mapped_device *md)
{ … }
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{ … }
static void clone_endio(struct bio *bio)
{ … }
static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
sector_t target_offset)
{ … }
static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
unsigned int max_granularity,
unsigned int max_sectors)
{ … }
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
{ … }
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{ … }
EXPORT_SYMBOL_GPL(…);
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
sector_t sector, int *srcu_idx)
__acquires(md->io_barrier)
{ … }
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn)
{ … }
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{ … }
static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{ … }
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
{ … }
EXPORT_SYMBOL_GPL(…);
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
{ … }
static void __map_bio(struct bio *clone)
{ … }
static void setup_split_accounting(struct clone_info *ci, unsigned int len)
{ … }
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios,
unsigned *len, gfp_t gfp_flag)
{ … }
static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios, unsigned int *len,
gfp_t gfp_flag)
{ … }
static void __send_empty_flush(struct clone_info *ci)
{ … }
static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios, unsigned int max_granularity,
unsigned int max_sectors)
{ … }
static bool is_abnormal_io(struct bio *bio)
{ … }
static blk_status_t __process_abnormal_io(struct clone_info *ci,
struct dm_target *ti)
{ … }
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
{ … }
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
{ … }
static blk_status_t __split_and_process_bio(struct clone_info *ci)
{ … }
static void init_clone_info(struct clone_info *ci, struct dm_io *io,
struct dm_table *map, struct bio *bio, bool is_abnormal)
{ … }
#ifdef CONFIG_BLK_DEV_ZONED
static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
struct bio *bio)
{ … }
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{ … }
static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
struct dm_target *ti)
{ … }
static void __send_zone_reset_all_native(struct clone_info *ci,
struct dm_target *ti)
{ … }
static blk_status_t __send_zone_reset_all(struct clone_info *ci)
{ … }
#else
static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
struct bio *bio)
{
return false;
}
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
return false;
}
static blk_status_t __send_zone_reset_all(struct clone_info *ci)
{
return BLK_STS_NOTSUPP;
}
#endif
static void dm_split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{ … }
static void dm_submit_bio(struct bio *bio)
{ … }
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
unsigned int flags)
{ … }
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
unsigned int flags)
{ … }
static void free_minor(int minor)
{ … }
static int specific_minor(int minor)
{ … }
static int next_free_minor(int *minor)
{ … }
static const struct block_device_operations dm_blk_dops;
static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{ … }
#else
static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif
static void cleanup_mapped_device(struct mapped_device *md)
{ … }
static struct mapped_device *alloc_dev(int minor)
{ … }
static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{ … }
static void event_callback(void *context)
{ … }
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{ … }
static struct dm_table *__unbind(struct mapped_device *md)
{ … }
int dm_create(int minor, struct mapped_device **result)
{ … }
void dm_lock_md_type(struct mapped_device *md)
{ … }
void dm_unlock_md_type(struct mapped_device *md)
{ … }
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{ … }
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{ … }
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{ … }
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{ … }
struct mapped_device *dm_get_md(dev_t dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void *dm_get_mdptr(struct mapped_device *md)
{ … }
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{ … }
void dm_get(struct mapped_device *md)
{ … }
int dm_hold(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
const char *dm_device_name(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __dm_destroy(struct mapped_device *md, bool wait)
{ … }
void dm_destroy(struct mapped_device *md)
{ … }
void dm_destroy_immediate(struct mapped_device *md)
{ … }
void dm_put(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool dm_in_flight_bios(struct mapped_device *md)
{ … }
static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
{ … }
static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
{ … }
static void dm_wq_work(struct work_struct *work)
{ … }
static void dm_queue_flush(struct mapped_device *md)
{ … }
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{ … }
static int lock_fs(struct mapped_device *md)
{ … }
static void unlock_fs(struct mapped_device *md)
{ … }
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
unsigned int suspend_flags, unsigned int task_state,
int dmf_suspended_flag)
{ … }
int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
{ … }
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{ … }
int dm_resume(struct mapped_device *md)
{ … }
static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
{ … }
static void __dm_internal_resume(struct mapped_device *md)
{ … }
void dm_internal_suspend_noflush(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_internal_resume(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_internal_suspend_fast(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_internal_resume_fast(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned int cookie, bool need_resize_uevent)
{ … }
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{ … }
uint32_t dm_get_event_nr(struct mapped_device *md)
{ … }
int dm_wait_event(struct mapped_device *md, int event_nr)
{ … }
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{ … }
struct gendisk *dm_disk(struct mapped_device *md)
{ … }
EXPORT_SYMBOL_GPL(…);
struct kobject *dm_kobject(struct mapped_device *md)
{ … }
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{ … }
int dm_suspended_md(struct mapped_device *md)
{ … }
static int dm_post_suspending_md(struct mapped_device *md)
{ … }
int dm_suspended_internally_md(struct mapped_device *md)
{ … }
int dm_test_deferred_remove_flag(struct mapped_device *md)
{ … }
int dm_suspended(struct dm_target *ti)
{ … }
EXPORT_SYMBOL_GPL(…);
int dm_post_suspending(struct dm_target *ti)
{ … }
EXPORT_SYMBOL_GPL(…);
int dm_noflush_suspending(struct dm_target *ti)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_free_md_mempools(struct dm_md_mempools *pools)
{ … }
struct dm_pr { … };
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
struct dm_pr *pr)
{ … }
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags)
{ … }
static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
u32 flags)
{ … }
static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{ … }
static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{ … }
static int dm_pr_clear(struct block_device *bdev, u64 key)
{ … }
static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
{ … }
static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_pr_read_reservation(struct block_device *bdev,
struct pr_held_reservation *rsv)
{ … }
static const struct pr_ops dm_pr_ops = …;
static const struct block_device_operations dm_blk_dops = …;
static const struct block_device_operations dm_rq_blk_dops = …;
static const struct dax_operations dm_dax_ops = …;
module_init(…) …;
module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(…) …;
module_param(reserved_bio_based_ios, uint, 0644);
MODULE_PARM_DESC(…) …;
module_param(dm_numa_node, int, 0644);
MODULE_PARM_DESC(…) …;
module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(…) …;
MODULE_DESCRIPTION(…) …;
MODULE_AUTHOR(…) …;
MODULE_LICENSE(…) …;