linux/drivers/md/dm-cache-target.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2012 Red Hat. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include "dm.h"
#include "dm-bio-prison-v2.h"
#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include "dm-io-tracker.h"

#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

#define DM_MSG_PREFIX

DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM();

/*----------------------------------------------------------------*/

/*
 * Glossary:
 *
 * oblock: index of an origin block
 * cblock: index of a cache block
 * promotion: movement of a block from origin to cache
 * demotion: movement of a block from cache to origin
 * migration: movement of a block between the origin and cache device,
 *	      either direction
 */

/*----------------------------------------------------------------*/

/*
 * Represents a chunk of future work.  'input' allows continuations to pass
 * values between themselves, typically error values.
 */
struct continuation {};

static inline void init_continuation(struct continuation *k,
				     void (*fn)(struct work_struct *))
{}

static inline void queue_continuation(struct workqueue_struct *wq,
				      struct continuation *k)
{}

/*----------------------------------------------------------------*/

/*
 * The batcher collects together pieces of work that need a particular
 * operation to occur before they can proceed (typically a commit).
 */
struct batcher {};

static void __commit(struct work_struct *_ws)
{}

static void batcher_init(struct batcher *b,
			 blk_status_t (*commit_op)(void *),
			 void *commit_context,
			 void (*issue_op)(struct bio *bio, void *),
			 void *issue_context,
			 struct workqueue_struct *wq)
{}

static void async_commit(struct batcher *b)
{}

static void continue_after_commit(struct batcher *b, struct continuation *k)
{}

/*
 * Bios are errored if commit failed.
 */
static void issue_after_commit(struct batcher *b, struct bio *bio)
{}

/*
 * Call this if some urgent work is waiting for the commit to complete.
 */
static void schedule_commit(struct batcher *b)
{}

/*
 * There are a couple of places where we let a bio run, but want to do some
 * work before calling its endio function.  We do this by temporarily
 * changing the endio fn.
 */
struct dm_hook_info {};

static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
			bio_end_io_t *bi_end_io, void *bi_private)
{}

static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{}

/*----------------------------------------------------------------*/

#define MIGRATION_POOL_SIZE
#define COMMIT_PERIOD
#define MIGRATION_COUNT_WINDOW

/*
 * The block size of the device holding cache data must be
 * between 32KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS

enum cache_metadata_mode {};

enum cache_io_mode {};

struct cache_features {};

struct cache_stats {};

struct cache {};

struct per_bio_data {};

struct dm_cache_migration {};

/*----------------------------------------------------------------*/

static bool writethrough_mode(struct cache *cache)
{}

static bool writeback_mode(struct cache *cache)
{}

static inline bool passthrough_mode(struct cache *cache)
{}

/*----------------------------------------------------------------*/

static void wake_deferred_bio_worker(struct cache *cache)
{}

static void wake_migration_worker(struct cache *cache)
{}

/*----------------------------------------------------------------*/

static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{}

static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
{}

static struct dm_cache_migration *alloc_migration(struct cache *cache)
{}

static void free_migration(struct dm_cache_migration *mg)
{}

/*----------------------------------------------------------------*/

static inline dm_oblock_t oblock_succ(dm_oblock_t b)
{}

static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
{}

/*
 * We have two lock levels.  Level 0, which is used to prevent WRITEs, and
 * level 1 which prevents *both* READs and WRITEs.
 */
#define WRITE_LOCK_LEVEL
#define READ_WRITE_LOCK_LEVEL

static unsigned int lock_level(struct bio *bio)
{}

/*
 *--------------------------------------------------------------
 * Per bio data
 *--------------------------------------------------------------
 */

static struct per_bio_data *get_per_bio_data(struct bio *bio)
{}

static struct per_bio_data *init_per_bio_data(struct bio *bio)
{}

/*----------------------------------------------------------------*/

static void defer_bio(struct cache *cache, struct bio *bio)
{}

static void defer_bios(struct cache *cache, struct bio_list *bios)
{}

/*----------------------------------------------------------------*/

static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
{}

/*----------------------------------------------------------------*/

static bool is_dirty(struct cache *cache, dm_cblock_t b)
{}

static void set_dirty(struct cache *cache, dm_cblock_t cblock)
{}

/*
 * These two are called when setting after migrations to force the policy
 * and dirty bitset to be in sync.
 */
static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
{}

static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
{}

/*----------------------------------------------------------------*/

static bool block_size_is_power_of_two(struct cache *cache)
{}

static dm_block_t block_div(dm_block_t b, uint32_t n)
{}

static dm_block_t oblocks_per_dblock(struct cache *cache)
{}

static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
{}

static void set_discard(struct cache *cache, dm_dblock_t b)
{}

static void clear_discard(struct cache *cache, dm_dblock_t b)
{}

static bool is_discarded(struct cache *cache, dm_dblock_t b)
{}

static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{}

/*
 * -------------------------------------------------------------
 * Remapping
 *--------------------------------------------------------------
 */
static void remap_to_origin(struct cache *cache, struct bio *bio)
{}

static void remap_to_cache(struct cache *cache, struct bio *bio,
			   dm_cblock_t cblock)
{}

static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{}

static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
					  dm_oblock_t oblock)
{}

static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
				 dm_oblock_t oblock, dm_cblock_t cblock)
{}

static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{}

static bool accountable_bio(struct cache *cache, struct bio *bio)
{}

static void accounted_begin(struct cache *cache, struct bio *bio)
{}

static void accounted_complete(struct cache *cache, struct bio *bio)
{}

static void accounted_request(struct cache *cache, struct bio *bio)
{}

static void issue_op(struct bio *bio, void *context)
{}

/*
 * When running in writethrough mode we need to send writes to clean blocks
 * to both the cache and origin devices.  Clone the bio and send them in parallel.
 */
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
				      dm_oblock_t oblock, dm_cblock_t cblock)
{}

/*
 *--------------------------------------------------------------
 * Failure modes
 *--------------------------------------------------------------
 */
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{}

static const char *cache_device_name(struct cache *cache)
{}

static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
{}

static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{}

static void abort_transaction(struct cache *cache)
{}

static void metadata_operation_failed(struct cache *cache, const char *op, int r)
{}

/*----------------------------------------------------------------*/

static void load_stats(struct cache *cache)
{}

static void save_stats(struct cache *cache)
{}

static void update_stats(struct cache_stats *stats, enum policy_operation op)
{}

/*
 *---------------------------------------------------------------------
 * Migration processing
 *
 * Migration covers moving data from the origin device to the cache, or
 * vice versa.
 *---------------------------------------------------------------------
 */
static void inc_io_migrations(struct cache *cache)
{}

static void dec_io_migrations(struct cache *cache)
{}

static bool discard_or_flush(struct bio *bio)
{}

static void calc_discard_block_range(struct cache *cache, struct bio *bio,
				     dm_dblock_t *b, dm_dblock_t *e)
{}

/*----------------------------------------------------------------*/

static void prevent_background_work(struct cache *cache)
{}

static void allow_background_work(struct cache *cache)
{}

static bool background_work_begin(struct cache *cache)
{}

static void background_work_end(struct cache *cache)
{}

/*----------------------------------------------------------------*/

static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{}

static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{}

static void quiesce(struct dm_cache_migration *mg,
		    void (*continuation)(struct work_struct *))
{}

static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{}

static void copy(struct dm_cache_migration *mg, bool promote)
{}

static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
{}

static void overwrite_endio(struct bio *bio)
{}

static void overwrite(struct dm_cache_migration *mg,
		      void (*continuation)(struct work_struct *))
{}

/*
 * Migration steps:
 *
 * 1) exclusive lock preventing WRITEs
 * 2) quiesce
 * 3) copy or issue overwrite bio
 * 4) upgrade to exclusive lock preventing READs and WRITEs
 * 5) quiesce
 * 6) update metadata and commit
 * 7) unlock
 */
static void mg_complete(struct dm_cache_migration *mg, bool success)
{}

static void mg_success(struct work_struct *ws)
{}

static void mg_update_metadata(struct work_struct *ws)
{}

static void mg_update_metadata_after_copy(struct work_struct *ws)
{}

static void mg_upgrade_lock(struct work_struct *ws)
{}

static void mg_full_copy(struct work_struct *ws)
{}

static void mg_copy(struct work_struct *ws)
{}

static int mg_lock_writes(struct dm_cache_migration *mg)
{}

static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
{}

/*
 *--------------------------------------------------------------
 * invalidation processing
 *--------------------------------------------------------------
 */

static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{}

static void invalidate_completed(struct work_struct *ws)
{}

static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{}

static void invalidate_remove(struct work_struct *ws)
{}

static int invalidate_lock(struct dm_cache_migration *mg)
{}

static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
			    dm_oblock_t oblock, struct bio *bio)
{}

/*
 *--------------------------------------------------------------
 * bio processing
 *--------------------------------------------------------------
 */

enum busy {};

static enum busy spare_migration_bandwidth(struct cache *cache)
{}

static void inc_hit_counter(struct cache *cache, struct bio *bio)
{}

static void inc_miss_counter(struct cache *cache, struct bio *bio)
{}

/*----------------------------------------------------------------*/

static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
		   bool *commit_needed)
{}

static bool process_bio(struct cache *cache, struct bio *bio)
{}

/*
 * A non-zero return indicates read_only or fail_io mode.
 */
static int commit(struct cache *cache, bool clean_shutdown)
{}

/*
 * Used by the batcher.
 */
static blk_status_t commit_op(void *context)
{}

/*----------------------------------------------------------------*/

static bool process_flush_bio(struct cache *cache, struct bio *bio)
{}

static bool process_discard_bio(struct cache *cache, struct bio *bio)
{}

static void process_deferred_bios(struct work_struct *ws)
{}

/*
 *--------------------------------------------------------------
 * Main worker loop
 *--------------------------------------------------------------
 */
static void requeue_deferred_bios(struct cache *cache)
{}

/*
 * We want to commit periodically so that not too much
 * unwritten metadata builds up.
 */
static void do_waker(struct work_struct *ws)
{}

static void check_migrations(struct work_struct *ws)
{}

/*
 *--------------------------------------------------------------
 * Target methods
 *--------------------------------------------------------------
 */

/*
 * This function gets called on the error paths of the constructor, so we
 * have to cope with a partially initialised struct.
 */
static void destroy(struct cache *cache)
{}

static void cache_dtr(struct dm_target *ti)
{}

static sector_t get_dev_size(struct dm_dev *dev)
{}

/*----------------------------------------------------------------*/

/*
 * Construct a cache device mapping.
 *
 * cache <metadata dev> <cache dev> <origin dev> <block size>
 *       <#feature args> [<feature arg>]*
 *       <policy> <#policy args> [<policy arg>]*
 *
 * metadata dev    : fast device holding the persistent metadata
 * cache dev	   : fast device holding cached data blocks
 * origin dev	   : slow device holding original data blocks
 * block size	   : cache unit size in sectors
 *
 * #feature args   : number of feature arguments passed
 * feature args    : writethrough.  (The default is writeback.)
 *
 * policy	   : the replacement policy to use
 * #policy args    : an even number of policy arguments corresponding
 *		     to key/value pairs passed to the policy
 * policy args	   : key/value pairs passed to the policy
 *		     E.g. 'sequential_threshold 1024'
 *		     See cache-policies.txt for details.
 *
 * Optional feature arguments are:
 *   writethrough  : write through caching that prohibits cache block
 *		     content from being different from origin block content.
 *		     Without this argument, the default behaviour is to write
 *		     back cache block contents later for performance reasons,
 *		     so they may differ from the corresponding origin blocks.
 */
struct cache_args {};

static void destroy_cache_args(struct cache_args *ca)
{}

static bool at_least_one_arg(struct dm_arg_set *as, char **error)
{}

static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
			      char **error)
{}

static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
			   char **error)
{}

static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
			    char **error)
{}

static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
			    char **error)
{}

static void init_features(struct cache_features *cf)
{}

static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
			  char **error)
{}

static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
			char **error)
{}

static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
			    char **error)
{}

/*----------------------------------------------------------------*/

static struct kmem_cache *migration_cache;

#define NOT_CORE_OPTION

static int process_config_option(struct cache *cache, const char *key, const char *value)
{}

static int set_config_value(struct cache *cache, const char *key, const char *value)
{}

static int set_config_values(struct cache *cache, int argc, const char **argv)
{}

static int create_cache_policy(struct cache *cache, struct cache_args *ca,
			       char **error)
{}

/*
 * We want the discard block size to be at least the size of the cache
 * block size and have no more than 2^14 discard blocks across the origin.
 */
#define MAX_DISCARD_BLOCKS

static bool too_many_discard_blocks(sector_t discard_block_size,
				    sector_t origin_size)
{}

static sector_t calculate_discard_block_size(sector_t cache_block_size,
					     sector_t origin_size)
{}

static void set_cache_size(struct cache *cache, dm_cblock_t size)
{}

#define DEFAULT_MIGRATION_THRESHOLD

static int cache_create(struct cache_args *ca, struct cache **result)
{}

static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{}

static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

/*----------------------------------------------------------------*/

static int cache_map(struct dm_target *ti, struct bio *bio)
{}

static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{}

static int write_dirty_bitset(struct cache *cache)
{}

static int write_discard_bitset(struct cache *cache)
{}

static int write_hints(struct cache *cache)
{}

/*
 * returns true on success
 */
static bool sync_metadata(struct cache *cache)
{}

static void cache_postsuspend(struct dm_target *ti)
{}

static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
			bool dirty, uint32_t hint, bool hint_valid)
{}

/*
 * The discard block size in the on disk metadata is not
 * necessarily the same as we're currently using.  So we have to
 * be careful to only set the discarded attribute if we know it
 * covers a complete block of the new size.
 */
struct discard_load_info {};

static void discard_load_info_init(struct cache *cache,
				   struct discard_load_info *li)
{}

static void set_discard_range(struct discard_load_info *li)
{}

static int load_discard(void *context, sector_t discard_block_size,
			dm_dblock_t dblock, bool discard)
{}

static dm_cblock_t get_cache_dev_size(struct cache *cache)
{}

static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{}

static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
{}

static int cache_preresume(struct dm_target *ti)
{}

static void cache_resume(struct dm_target *ti)
{}

static void emit_flags(struct cache *cache, char *result,
		       unsigned int maxlen, ssize_t *sz_ptr)
{}

/*
 * Status format:
 *
 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
 * <cache block size> <#used cache blocks>/<#total cache blocks>
 * <#read hits> <#read misses> <#write hits> <#write misses>
 * <#demotions> <#promotions> <#dirty>
 * <#features> <features>*
 * <#core args> <core args>
 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
 */
static void cache_status(struct dm_target *ti, status_type_t type,
			 unsigned int status_flags, char *result, unsigned int maxlen)
{}

/*
 * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
 * the one-past-the-end value.
 */
struct cblock_range {};

/*
 * A cache block range can take two forms:
 *
 * i) A single cblock, eg. '3456'
 * ii) A begin and end cblock with a dash between, eg. 123-234
 */
static int parse_cblock_range(struct cache *cache, const char *str,
			      struct cblock_range *result)
{}

static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
{}

static inline dm_cblock_t cblock_succ(dm_cblock_t b)
{}

static int request_invalidation(struct cache *cache, struct cblock_range *range)
{}

static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
					      const char **cblock_ranges)
{}

/*
 * Supports
 *	"<key> <value>"
 * and
 *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
 *
 * The key migration_threshold is supported by the cache target core.
 */
static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
			 char *result, unsigned int maxlen)
{}

static int cache_iterate_devices(struct dm_target *ti,
				 iterate_devices_callout_fn fn, void *data)
{}

/*
 * If discard_passdown was enabled verify that the origin device
 * supports discards.  Disable discard_passdown if not.
 */
static void disable_passdown_if_not_supported(struct cache *cache)
{}

static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{}

static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

/*----------------------------------------------------------------*/

static struct target_type cache_target =;

static int __init dm_cache_init(void)
{}

static void __exit dm_cache_exit(void)
{}

module_init();
module_exit(dm_cache_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();