linux/drivers/md/dm-clone-target.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
 */

#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/err.h>
#include <linux/hash.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/dm-io.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/blk_types.h>
#include <linux/dm-kcopyd.h>
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
#include <linux/device-mapper.h>

#include "dm.h"
#include "dm-clone-metadata.h"

#define DM_MSG_PREFIX

/*
 * Minimum and maximum allowed region sizes
 */
#define MIN_REGION_SIZE
#define MAX_REGION_SIZE

#define MIN_HYDRATIONS
#define DEFAULT_HYDRATION_THRESHOLD
#define DEFAULT_HYDRATION_BATCH_SIZE

#define COMMIT_PERIOD

/*
 * Hydration hash table size: 1 << HASH_TABLE_BITS
 */
#define HASH_TABLE_BITS

DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM();

/* Slab cache for struct dm_clone_region_hydration */
static struct kmem_cache *_hydration_cache;

/* dm-clone metadata modes */
enum clone_metadata_mode {};

struct hash_table_bucket;

struct clone {};

/*
 * dm-clone flags
 */
#define DM_CLONE_DISCARD_PASSDOWN
#define DM_CLONE_HYDRATION_ENABLED
#define DM_CLONE_HYDRATION_SUSPENDED

/*---------------------------------------------------------------------------*/

/*
 * Metadata failure handling.
 */
static enum clone_metadata_mode get_clone_mode(struct clone *clone)
{}

static const char *clone_device_name(struct clone *clone)
{}

static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
{}

static void __abort_transaction(struct clone *clone)
{}

static void __reload_in_core_bitset(struct clone *clone)
{}

static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
{}

/*---------------------------------------------------------------------------*/

/* Wake up anyone waiting for region hydrations to stop */
static inline void wakeup_hydration_waiters(struct clone *clone)
{}

static inline void wake_worker(struct clone *clone)
{}

/*---------------------------------------------------------------------------*/

/*
 * bio helper functions.
 */
static inline void remap_to_source(struct clone *clone, struct bio *bio)
{}

static inline void remap_to_dest(struct clone *clone, struct bio *bio)
{}

static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
{}

/* Get the address of the region in sectors */
static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
{}

/* Get the region number of the bio */
static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
{}

/* Get the region range covered by the bio */
static void bio_region_range(struct clone *clone, struct bio *bio,
			     unsigned long *rs, unsigned long *nr_regions)
{}

/* Check whether a bio overwrites a region */
static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
{}

static void fail_bios(struct bio_list *bios, blk_status_t status)
{}

static void submit_bios(struct bio_list *bios)
{}

/*
 * Submit bio to the underlying device.
 *
 * If the bio triggers a commit, delay it, until after the metadata have been
 * committed.
 *
 * NOTE: The bio remapping must be performed by the caller.
 */
static void issue_bio(struct clone *clone, struct bio *bio)
{}

/*
 * Remap bio to the destination device and submit it.
 *
 * If the bio triggers a commit, delay it, until after the metadata have been
 * committed.
 */
static void remap_and_issue(struct clone *clone, struct bio *bio)
{}

/*
 * Issue bios that have been deferred until after their region has finished
 * hydrating.
 *
 * We delegate the bio submission to the worker thread, so this is safe to call
 * from interrupt context.
 */
static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
{}

static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
{}

static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
{}

static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
{}

static void process_discard_bio(struct clone *clone, struct bio *bio)
{}

/*---------------------------------------------------------------------------*/

/*
 * dm-clone region hydrations.
 */
struct dm_clone_region_hydration {};

/*
 * Hydration hash table implementation.
 *
 * Ideally we would like to use list_bl, which uses bit spin locks and employs
 * the least significant bit of the list head to lock the corresponding bucket,
 * reducing the memory overhead for the locks. But, currently, list_bl and bit
 * spin locks don't support IRQ safe versions. Since we have to take the lock
 * in both process and interrupt context, we must fall back to using regular
 * spin locks; one per hash table bucket.
 */
struct hash_table_bucket {};

#define bucket_lock_irqsave(bucket, flags)

#define bucket_unlock_irqrestore(bucket, flags)

#define bucket_lock_irq(bucket)

#define bucket_unlock_irq(bucket)

static int hash_table_init(struct clone *clone)
{}

static void hash_table_exit(struct clone *clone)
{}

static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
						       unsigned long region_nr)
{}

/*
 * Search hash table for a hydration with hd->region_nr == region_nr
 *
 * NOTE: Must be called with the bucket lock held
 */
static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
						     unsigned long region_nr)
{}

/*
 * Insert a hydration into the hash table.
 *
 * NOTE: Must be called with the bucket lock held.
 */
static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
					     struct dm_clone_region_hydration *hd)
{}

/*
 * This function inserts a hydration into the hash table, unless someone else
 * managed to insert a hydration for the same region first. In the latter case
 * it returns the existing hydration descriptor for this region.
 *
 * NOTE: Must be called with the hydration hash table lock held.
 */
static struct dm_clone_region_hydration *
__find_or_insert_region_hydration(struct hash_table_bucket *bucket,
				  struct dm_clone_region_hydration *hd)
{}

/*---------------------------------------------------------------------------*/

/* Allocate a hydration */
static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
{}

static inline void free_hydration(struct dm_clone_region_hydration *hd)
{}

/* Initialize a hydration */
static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
{}

/*---------------------------------------------------------------------------*/

/*
 * Update dm-clone's metadata after a region has finished hydrating and remove
 * hydration from the hash table.
 */
static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
{}

/*
 * Complete a region's hydration:
 *
 *	1. Update dm-clone's metadata.
 *	2. Remove hydration from hash table.
 *	3. Complete overwrite bio.
 *	4. Issue deferred bios.
 *	5. If this was the last hydration, wake up anyone waiting for
 *	   hydrations to finish.
 */
static void hydration_complete(struct dm_clone_region_hydration *hd)
{}

static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
{}

static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
{}

static void overwrite_endio(struct bio *bio)
{}

static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
{}

/*
 * Hydrate bio's region.
 *
 * This function starts the hydration of the bio's region and puts the bio in
 * the list of deferred bios for this region. In case, by the time this
 * function is called, the region has finished hydrating it's submitted to the
 * destination device.
 *
 * NOTE: The bio remapping must be performed by the caller.
 */
static void hydrate_bio_region(struct clone *clone, struct bio *bio)
{}

/*---------------------------------------------------------------------------*/

/*
 * Background hydrations.
 */

/*
 * Batch region hydrations.
 *
 * To better utilize device bandwidth we batch together the hydration of
 * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
 * is good for small, random write performance (because of the overwriting of
 * un-hydrated regions) and at the same time issue big copy requests to kcopyd
 * to achieve high hydration bandwidth.
 */
struct batch_info {};

static void __batch_hydration(struct batch_info *batch,
			      struct dm_clone_region_hydration *hd)
{}

static unsigned long __start_next_hydration(struct clone *clone,
					    unsigned long offset,
					    struct batch_info *batch)
{}

/*
 * This function searches for regions that still reside in the source device
 * and starts their hydration.
 */
static void do_hydration(struct clone *clone)
{}

/*---------------------------------------------------------------------------*/

static bool need_commit_due_to_time(struct clone *clone)
{}

/*
 * A non-zero return indicates read-only or fail mode.
 */
static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
{}

static void process_deferred_discards(struct clone *clone)
{}

static void process_deferred_bios(struct clone *clone)
{}

static void process_deferred_flush_bios(struct clone *clone)
{}

static void do_worker(struct work_struct *work)
{}

/*
 * Commit periodically so that not too much unwritten data builds up.
 *
 * Also, restart background hydration, if it has been stopped by in-flight I/O.
 */
static void do_waker(struct work_struct *work)
{}

/*---------------------------------------------------------------------------*/

/*
 * Target methods
 */
static int clone_map(struct dm_target *ti, struct bio *bio)
{}

static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{}

static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
		       ssize_t *sz_ptr)
{}

static void emit_core_args(struct clone *clone, char *result,
			   unsigned int maxlen, ssize_t *sz_ptr)
{}

/*
 * Status format:
 *
 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
 * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
 * <#features> <features>* <#core args> <core args>* <clone metadata mode>
 */
static void clone_status(struct dm_target *ti, status_type_t type,
			 unsigned int status_flags, char *result,
			 unsigned int maxlen)
{}

static sector_t get_dev_size(struct dm_dev *dev)
{}

/*---------------------------------------------------------------------------*/

/*
 * Construct a clone device mapping:
 *
 * clone <metadata dev> <destination dev> <source dev> <region size>
 *	[<#feature args> [<feature arg>]* [<#core args> [key value]*]]
 *
 * metadata dev: Fast device holding the persistent metadata
 * destination dev: The destination device, which will become a clone of the
 *                  source device
 * source dev: The read-only source device that gets cloned
 * region size: dm-clone unit size in sectors
 *
 * #feature args: Number of feature arguments passed
 * feature args: E.g. no_hydration, no_discard_passdown
 *
 * #core arguments: An even number of core arguments
 * core arguments: Key/value pairs for tuning the core
 *		   E.g. 'hydration_threshold 256'
 */
static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
{}

static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
{}

static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
{}

static int validate_nr_regions(unsigned long n, char **error)
{}

static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
{}

static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
{}

static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
{}

static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
{}

static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static void clone_dtr(struct dm_target *ti)
{}

/*---------------------------------------------------------------------------*/

static void clone_postsuspend(struct dm_target *ti)
{}

static void clone_resume(struct dm_target *ti)
{}

/*
 * If discard_passdown was enabled verify that the destination device supports
 * discards. Disable discard_passdown if not.
 */
static void disable_passdown_if_not_supported(struct clone *clone)
{}

static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
{}

static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

static int clone_iterate_devices(struct dm_target *ti,
				 iterate_devices_callout_fn fn, void *data)
{}

/*
 * dm-clone message functions.
 */
static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
{}

static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
{}

static void enable_hydration(struct clone *clone)
{}

static void disable_hydration(struct clone *clone)
{}

static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
			 char *result, unsigned int maxlen)
{}

static struct target_type clone_target =;

/*---------------------------------------------------------------------------*/

/* Module functions */
static int __init dm_clone_init(void)
{}

static void __exit dm_clone_exit(void)
{}

/* Module hooks */
module_init();
module_exit(dm_clone_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();