#include "dm.h"
#include "dm-bio-prison-v2.h"
#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include "dm-io-tracker.h"
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#define DM_MSG_PREFIX …
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(…) …;
struct continuation { … };
static inline void init_continuation(struct continuation *k,
void (*fn)(struct work_struct *))
{ … }
static inline void queue_continuation(struct workqueue_struct *wq,
struct continuation *k)
{ … }
struct batcher { … };
static void __commit(struct work_struct *_ws)
{ … }
static void batcher_init(struct batcher *b,
blk_status_t (*commit_op)(void *),
void *commit_context,
void (*issue_op)(struct bio *bio, void *),
void *issue_context,
struct workqueue_struct *wq)
{ … }
static void async_commit(struct batcher *b)
{ … }
static void continue_after_commit(struct batcher *b, struct continuation *k)
{ … }
static void issue_after_commit(struct batcher *b, struct bio *bio)
{ … }
static void schedule_commit(struct batcher *b)
{ … }
struct dm_hook_info { … };
static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
bio_end_io_t *bi_end_io, void *bi_private)
{ … }
static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{ … }
#define MIGRATION_POOL_SIZE …
#define COMMIT_PERIOD …
#define MIGRATION_COUNT_WINDOW …
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS …
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS …
enum cache_metadata_mode { … };
enum cache_io_mode { … };
struct cache_features { … };
struct cache_stats { … };
struct cache { … };
struct per_bio_data { … };
struct dm_cache_migration { … };
static bool writethrough_mode(struct cache *cache)
{ … }
static bool writeback_mode(struct cache *cache)
{ … }
static inline bool passthrough_mode(struct cache *cache)
{ … }
static void wake_deferred_bio_worker(struct cache *cache)
{ … }
static void wake_migration_worker(struct cache *cache)
{ … }
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{ … }
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
{ … }
static struct dm_cache_migration *alloc_migration(struct cache *cache)
{ … }
static void free_migration(struct dm_cache_migration *mg)
{ … }
static inline dm_oblock_t oblock_succ(dm_oblock_t b)
{ … }
static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
{ … }
#define WRITE_LOCK_LEVEL …
#define READ_WRITE_LOCK_LEVEL …
static unsigned int lock_level(struct bio *bio)
{ … }
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{ … }
static struct per_bio_data *init_per_bio_data(struct bio *bio)
{ … }
static void defer_bio(struct cache *cache, struct bio *bio)
{ … }
static void defer_bios(struct cache *cache, struct bio_list *bios)
{ … }
static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
{ … }
static bool is_dirty(struct cache *cache, dm_cblock_t b)
{ … }
static void set_dirty(struct cache *cache, dm_cblock_t cblock)
{ … }
static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
{ … }
static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
{ … }
static bool block_size_is_power_of_two(struct cache *cache)
{ … }
static dm_block_t block_div(dm_block_t b, uint32_t n)
{ … }
static dm_block_t oblocks_per_dblock(struct cache *cache)
{ … }
static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
{ … }
static void set_discard(struct cache *cache, dm_dblock_t b)
{ … }
static void clear_discard(struct cache *cache, dm_dblock_t b)
{ … }
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{ … }
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{ … }
static void remap_to_origin(struct cache *cache, struct bio *bio)
{ … }
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{ … }
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{ … }
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
dm_oblock_t oblock)
{ … }
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{ … }
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{ … }
static bool accountable_bio(struct cache *cache, struct bio *bio)
{ … }
static void accounted_begin(struct cache *cache, struct bio *bio)
{ … }
static void accounted_complete(struct cache *cache, struct bio *bio)
{ … }
static void accounted_request(struct cache *cache, struct bio *bio)
{ … }
static void issue_op(struct bio *bio, void *context)
{ … }
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{ … }
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{ … }
static const char *cache_device_name(struct cache *cache)
{ … }
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
{ … }
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{ … }
static void abort_transaction(struct cache *cache)
{ … }
static void metadata_operation_failed(struct cache *cache, const char *op, int r)
{ … }
static void load_stats(struct cache *cache)
{ … }
static void save_stats(struct cache *cache)
{ … }
static void update_stats(struct cache_stats *stats, enum policy_operation op)
{ … }
static void inc_io_migrations(struct cache *cache)
{ … }
static void dec_io_migrations(struct cache *cache)
{ … }
static bool discard_or_flush(struct bio *bio)
{ … }
static void calc_discard_block_range(struct cache *cache, struct bio *bio,
dm_dblock_t *b, dm_dblock_t *e)
{ … }
static void prevent_background_work(struct cache *cache)
{ … }
static void allow_background_work(struct cache *cache)
{ … }
static bool background_work_begin(struct cache *cache)
{ … }
static void background_work_end(struct cache *cache)
{ … }
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{ … }
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{ … }
static void quiesce(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{ … }
static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{ … }
static void copy_complete(int read_err, unsigned long write_err, void *context)
{ … }
static void copy(struct dm_cache_migration *mg, bool promote)
{ … }
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
{ … }
static void overwrite_endio(struct bio *bio)
{ … }
static void overwrite(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{ … }
static void mg_complete(struct dm_cache_migration *mg, bool success)
{ … }
static void mg_success(struct work_struct *ws)
{ … }
static void mg_update_metadata(struct work_struct *ws)
{ … }
static void mg_update_metadata_after_copy(struct work_struct *ws)
{ … }
static void mg_upgrade_lock(struct work_struct *ws)
{ … }
static void mg_full_copy(struct work_struct *ws)
{ … }
static void mg_copy(struct work_struct *ws)
{ … }
static int mg_lock_writes(struct dm_cache_migration *mg)
{ … }
static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
{ … }
static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{ … }
static void invalidate_completed(struct work_struct *ws)
{ … }
static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{ … }
static void invalidate_remove(struct work_struct *ws)
{ … }
static int invalidate_lock(struct dm_cache_migration *mg)
{ … }
static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
dm_oblock_t oblock, struct bio *bio)
{ … }
enum busy { … };
static enum busy spare_migration_bandwidth(struct cache *cache)
{ … }
static void inc_hit_counter(struct cache *cache, struct bio *bio)
{ … }
static void inc_miss_counter(struct cache *cache, struct bio *bio)
{ … }
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bool *commit_needed)
{ … }
static bool process_bio(struct cache *cache, struct bio *bio)
{ … }
static int commit(struct cache *cache, bool clean_shutdown)
{ … }
static blk_status_t commit_op(void *context)
{ … }
static bool process_flush_bio(struct cache *cache, struct bio *bio)
{ … }
static bool process_discard_bio(struct cache *cache, struct bio *bio)
{ … }
static void process_deferred_bios(struct work_struct *ws)
{ … }
static void requeue_deferred_bios(struct cache *cache)
{ … }
static void do_waker(struct work_struct *ws)
{ … }
static void check_migrations(struct work_struct *ws)
{ … }
static void destroy(struct cache *cache)
{ … }
static void cache_dtr(struct dm_target *ti)
{ … }
static sector_t get_dev_size(struct dm_dev *dev)
{ … }
struct cache_args { … };
static void destroy_cache_args(struct cache_args *ca)
{ … }
static bool at_least_one_arg(struct dm_arg_set *as, char **error)
{ … }
static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static void init_features(struct cache_features *cf)
{ … }
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{ … }
static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
char **error)
{ … }
static struct kmem_cache *migration_cache;
#define NOT_CORE_OPTION …
static int process_config_option(struct cache *cache, const char *key, const char *value)
{ … }
static int set_config_value(struct cache *cache, const char *key, const char *value)
{ … }
static int set_config_values(struct cache *cache, int argc, const char **argv)
{ … }
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{ … }
#define MAX_DISCARD_BLOCKS …
static bool too_many_discard_blocks(sector_t discard_block_size,
sector_t origin_size)
{ … }
static sector_t calculate_discard_block_size(sector_t cache_block_size,
sector_t origin_size)
{ … }
static void set_cache_size(struct cache *cache, dm_cblock_t size)
{ … }
#define DEFAULT_MIGRATION_THRESHOLD …
static int cache_create(struct cache_args *ca, struct cache **result)
{ … }
static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{ … }
static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ … }
static int cache_map(struct dm_target *ti, struct bio *bio)
{ … }
static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{ … }
static int write_dirty_bitset(struct cache *cache)
{ … }
static int write_discard_bitset(struct cache *cache)
{ … }
static int write_hints(struct cache *cache)
{ … }
static bool sync_metadata(struct cache *cache)
{ … }
static void cache_postsuspend(struct dm_target *ti)
{ … }
static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
bool dirty, uint32_t hint, bool hint_valid)
{ … }
struct discard_load_info { … };
static void discard_load_info_init(struct cache *cache,
struct discard_load_info *li)
{ … }
static void set_discard_range(struct discard_load_info *li)
{ … }
static int load_discard(void *context, sector_t discard_block_size,
dm_dblock_t dblock, bool discard)
{ … }
static dm_cblock_t get_cache_dev_size(struct cache *cache)
{ … }
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{ … }
static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
{ … }
static int cache_preresume(struct dm_target *ti)
{ … }
static void cache_resume(struct dm_target *ti)
{ … }
static void emit_flags(struct cache *cache, char *result,
unsigned int maxlen, ssize_t *sz_ptr)
{ … }
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{ … }
struct cblock_range { … };
static int parse_cblock_range(struct cache *cache, const char *str,
struct cblock_range *result)
{ … }
static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
{ … }
static inline dm_cblock_t cblock_succ(dm_cblock_t b)
{ … }
static int request_invalidation(struct cache *cache, struct cblock_range *range)
{ … }
static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
const char **cblock_ranges)
{ … }
static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{ … }
static int cache_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{ … }
static void disable_passdown_if_not_supported(struct cache *cache)
{ … }
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{ … }
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ … }
static struct target_type cache_target = …;
static int __init dm_cache_init(void)
{ … }
static void __exit dm_cache_exit(void)
{ … }
module_init(…) …;
module_exit(dm_cache_exit);
MODULE_DESCRIPTION(…) …;
MODULE_AUTHOR(…) …;
MODULE_LICENSE(…) …;