#include "dm-thin-metadata.h"
#include "dm-bio-prison-v1.h"
#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/rbtree.h>
#define DM_MSG_PREFIX …
#define ENDIO_HOOK_POOL_SIZE …
#define MAPPING_POOL_SIZE …
#define COMMIT_PERIOD …
#define NO_SPACE_TIMEOUT_SECS …
static unsigned int no_space_timeout_secs = …;
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(…) …;
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS …
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS …
#define MAX_DEV_ID …
enum lock_space { … };
static bool build_key(struct dm_thin_device *td, enum lock_space ls,
dm_block_t b, dm_block_t e, struct dm_cell_key *key)
{ … }
static void build_data_key(struct dm_thin_device *td, dm_block_t b,
struct dm_cell_key *key)
{ … }
static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
struct dm_cell_key *key)
{ … }
#define THROTTLE_THRESHOLD …
struct throttle { … };
static void throttle_init(struct throttle *t)
{ … }
static void throttle_work_start(struct throttle *t)
{ … }
static void throttle_work_update(struct throttle *t)
{ … }
static void throttle_work_complete(struct throttle *t)
{ … }
static void throttle_lock(struct throttle *t)
{ … }
static void throttle_unlock(struct throttle *t)
{ … }
struct dm_thin_new_mapping;
enum pool_mode { … };
struct pool_features { … };
struct thin_c;
process_bio_fn;
process_cell_fn;
process_mapping_fn;
#define CELL_SORT_ARRAY_SIZE …
struct pool { … };
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
static enum pool_mode get_pool_mode(struct pool *pool)
{ … }
static void notify_of_pool_mode_change(struct pool *pool)
{ … }
struct pool_c { … };
struct thin_c { … };
static bool block_size_is_power_of_two(struct pool *pool)
{ … }
static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
{ … }
struct discard_op { … };
static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
{ … }
static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
{ … }
static void end_discard(struct discard_op *op, int r)
{ … }
static void wake_worker(struct pool *pool)
{ … }
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
struct dm_bio_prison_cell **cell_result)
{ … }
static void cell_release(struct pool *pool,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{ … }
static void cell_visit_release(struct pool *pool,
void (*fn)(void *, struct dm_bio_prison_cell *),
void *context,
struct dm_bio_prison_cell *cell)
{ … }
static void cell_release_no_holder(struct pool *pool,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{ … }
static void cell_error_with_code(struct pool *pool,
struct dm_bio_prison_cell *cell, blk_status_t error_code)
{ … }
static blk_status_t get_pool_io_error_code(struct pool *pool)
{ … }
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{ … }
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
{ … }
static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{ … }
static struct dm_thin_pool_table { … } dm_thin_pool_table;
static void pool_table_init(void)
{ … }
static void pool_table_exit(void)
{ … }
static void __pool_table_insert(struct pool *pool)
{ … }
static void __pool_table_remove(struct pool *pool)
{ … }
static struct pool *__pool_table_lookup(struct mapped_device *md)
{ … }
static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{ … }
struct dm_thin_endio_hook { … };
static void error_bio_list(struct bio_list *bios, blk_status_t error)
{ … }
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
blk_status_t error)
{ … }
static void requeue_deferred_cells(struct thin_c *tc)
{ … }
static void requeue_io(struct thin_c *tc)
{ … }
static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
{ … }
static void error_retry_list(struct pool *pool)
{ … }
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{ … }
static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
dm_block_t *begin, dm_block_t *end)
{ … }
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{ … }
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{ … }
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{ … }
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{ … }
static void issue(struct thin_c *tc, struct bio *bio)
{ … }
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{ … }
static void remap_and_issue(struct thin_c *tc, struct bio *bio,
dm_block_t block)
{ … }
struct dm_thin_new_mapping { … };
static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
{ … }
static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
{ … }
static void copy_complete(int read_err, unsigned long write_err, void *context)
{ … }
static void overwrite_endio(struct bio *bio)
{ … }
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
struct remap_info { … };
static void __inc_remap_and_issue_cell(void *context,
struct dm_bio_prison_cell *cell)
{ … }
static void inc_remap_and_issue_cell(struct thin_c *tc,
struct dm_bio_prison_cell *cell,
dm_block_t block)
{ … }
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{ … }
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
{ … }
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{ … }
static void free_discard_mapping(struct dm_thin_new_mapping *m)
{ … }
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
{ … }
static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
{ … }
static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
{ … }
static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
struct bio *discard_parent)
{ … }
static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
{ … }
static void passdown_endio(struct bio *bio)
{ … }
static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
{ … }
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
{ … }
static void process_prepared(struct pool *pool, struct list_head *head,
process_mapping_fn *fn)
{ … }
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{ … }
static int io_overwrites_block(struct pool *pool, struct bio *bio)
{ … }
static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
bio_end_io_t *fn)
{ … }
static int ensure_next_mapping(struct pool *pool)
{ … }
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{ … }
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
sector_t begin, sector_t end)
{ … }
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
dm_block_t data_begin,
struct dm_thin_new_mapping *m)
{ … }
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct dm_dev *origin, dm_block_t data_origin,
dm_block_t data_dest,
struct dm_bio_prison_cell *cell, struct bio *bio,
sector_t len)
{ … }
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_origin, dm_block_t data_dest,
struct dm_bio_prison_cell *cell, struct bio *bio)
{ … }
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_block, struct dm_bio_prison_cell *cell,
struct bio *bio)
{ … }
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_dest,
struct dm_bio_prison_cell *cell, struct bio *bio)
{ … }
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void requeue_bios(struct pool *pool);
static bool is_read_only_pool_mode(enum pool_mode mode)
{ … }
static bool is_read_only(struct pool *pool)
{ … }
static void check_for_metadata_space(struct pool *pool)
{ … }
static void check_for_data_space(struct pool *pool)
{ … }
static int commit(struct pool *pool)
{ … }
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{ … }
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{ … }
static void retry_on_resume(struct bio *bio)
{ … }
static blk_status_t should_error_unserviceable_bio(struct pool *pool)
{ … }
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{ … }
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
{ … }
static void process_discard_cell_no_passdown(struct thin_c *tc,
struct dm_bio_prison_cell *virt_cell)
{ … }
static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
struct bio *bio)
{ … }
static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
{ … }
static void process_discard_bio(struct thin_c *tc, struct bio *bio)
{ … }
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
struct dm_cell_key *key,
struct dm_thin_lookup_result *lookup_result,
struct dm_bio_prison_cell *cell)
{ … }
static void __remap_and_issue_shared_cell(void *context,
struct dm_bio_prison_cell *cell)
{ … }
static void remap_and_issue_shared_cell(struct thin_c *tc,
struct dm_bio_prison_cell *cell,
dm_block_t block)
{ … }
static void process_shared_bio(struct thin_c *tc, struct bio *bio,
dm_block_t block,
struct dm_thin_lookup_result *lookup_result,
struct dm_bio_prison_cell *virt_cell)
{ … }
static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
struct dm_bio_prison_cell *cell)
{ … }
static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static void process_bio(struct thin_c *tc, struct bio *bio)
{ … }
static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
struct dm_bio_prison_cell *cell)
{ … }
static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
{ … }
static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static void process_bio_success(struct thin_c *tc, struct bio *bio)
{ … }
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{ … }
static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static int need_commit_due_to_time(struct pool *pool)
{ … }
#define thin_pbd(node) …
#define thin_bio(pbd) …
static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
{ … }
static void __extract_sorted_bios(struct thin_c *tc)
{ … }
static void __sort_thin_deferred_bios(struct thin_c *tc)
{ … }
static void process_thin_deferred_bios(struct thin_c *tc)
{ … }
static int cmp_cells(const void *lhs, const void *rhs)
{ … }
static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
{ … }
static void process_thin_deferred_cells(struct thin_c *tc)
{ … }
static void thin_get(struct thin_c *tc);
static void thin_put(struct thin_c *tc);
static struct thin_c *get_first_thin(struct pool *pool)
{ … }
static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
{ … }
static void process_deferred_bios(struct pool *pool)
{ … }
static void do_worker(struct work_struct *ws)
{ … }
static void do_waker(struct work_struct *ws)
{ … }
static void do_no_space_timeout(struct work_struct *ws)
{ … }
struct pool_work { … };
static struct pool_work *to_pool_work(struct work_struct *ws)
{ … }
static void pool_work_complete(struct pool_work *pw)
{ … }
static void pool_work_wait(struct pool_work *pw, struct pool *pool,
void (*fn)(struct work_struct *))
{ … }
struct noflush_work { … };
static struct noflush_work *to_noflush(struct work_struct *ws)
{ … }
static void do_noflush_start(struct work_struct *ws)
{ … }
static void do_noflush_stop(struct work_struct *ws)
{ … }
static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{ … }
static void set_discard_callbacks(struct pool *pool)
{ … }
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{ … }
static void abort_transaction(struct pool *pool)
{ … }
static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{ … }
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{ … }
static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
{ … }
static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ … }
static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
{ … }
static int thin_bio_map(struct dm_target *ti, struct bio *bio)
{ … }
static void requeue_bios(struct pool *pool)
{ … }
static bool is_factor(sector_t block_size, uint32_t n)
{ … }
static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{ … }
static int bind_control_target(struct pool *pool, struct dm_target *ti)
{ … }
static void unbind_control_target(struct pool *pool, struct dm_target *ti)
{ … }
static void pool_features_init(struct pool_features *pf)
{ … }
static void __pool_destroy(struct pool *pool)
{ … }
static struct kmem_cache *_new_mapping_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
struct block_device *data_dev,
unsigned long block_size,
int read_only, char **error)
{ … }
static void __pool_inc(struct pool *pool)
{ … }
static void __pool_dec(struct pool *pool)
{ … }
static struct pool *__pool_find(struct mapped_device *pool_md,
struct block_device *metadata_dev,
struct block_device *data_dev,
unsigned long block_size, int read_only,
char **error, int *created)
{ … }
static void pool_dtr(struct dm_target *ti)
{ … }
static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
struct dm_target *ti)
{ … }
static void metadata_low_callback(void *context)
{ … }
static int metadata_pre_commit_callback(void *context)
{ … }
static sector_t get_dev_size(struct block_device *bdev)
{ … }
static void warn_if_metadata_device_too_big(struct block_device *bdev)
{ … }
static sector_t get_metadata_dev_size(struct block_device *bdev)
{ … }
static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{ … }
static dm_block_t calc_metadata_threshold(struct pool_c *pt)
{ … }
static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ … }
static int pool_map(struct dm_target *ti, struct bio *bio)
{ … }
static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
{ … }
static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
{ … }
static int pool_preresume(struct dm_target *ti)
{ … }
static void pool_suspend_active_thins(struct pool *pool)
{ … }
static void pool_resume_active_thins(struct pool *pool)
{ … }
static void pool_resume(struct dm_target *ti)
{ … }
static void pool_presuspend(struct dm_target *ti)
{ … }
static void pool_presuspend_undo(struct dm_target *ti)
{ … }
static void pool_postsuspend(struct dm_target *ti)
{ … }
static int check_arg_count(unsigned int argc, unsigned int args_required)
{ … }
static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
{ … }
static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{ … }
static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{ … }
static void emit_flags(struct pool_features *pf, char *result,
unsigned int sz, unsigned int maxlen)
{ … }
static void pool_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{ … }
static int pool_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{ … }
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ … }
static struct target_type pool_target = …;
static void thin_get(struct thin_c *tc)
{ … }
static void thin_put(struct thin_c *tc)
{ … }
static void thin_dtr(struct dm_target *ti)
{ … }
static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ … }
static int thin_map(struct dm_target *ti, struct bio *bio)
{ … }
static int thin_endio(struct dm_target *ti, struct bio *bio,
blk_status_t *err)
{ … }
static void thin_presuspend(struct dm_target *ti)
{ … }
static void thin_postsuspend(struct dm_target *ti)
{ … }
static int thin_preresume(struct dm_target *ti)
{ … }
static void thin_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{ … }
static int thin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{ … }
static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ … }
static struct target_type thin_target = …;
static int __init dm_thin_init(void)
{ … }
static void dm_thin_exit(void)
{ … }
module_init(…) …;
module_exit(dm_thin_exit);
module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(…) …;
MODULE_DESCRIPTION(…) …;
MODULE_AUTHOR(…) …;
MODULE_LICENSE(…) …;