linux/drivers/md/dm-snap.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
 *
 * This file is released under the GPL.
 */

#include <linux/blkdev.h>
#include <linux/device-mapper.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>

#include "dm.h"

#include "dm-exception-store.h"

#define DM_MSG_PREFIX

static const char dm_snapshot_merge_target_name[] =;

#define dm_target_is_snapshot_merge(ti)

/*
 * The size of the mempool used to track chunks in use.
 */
#define MIN_IOS

#define DM_TRACKED_CHUNK_HASH_SIZE
#define DM_TRACKED_CHUNK_HASH(x)

struct dm_exception_table {};

struct dm_snapshot {};

/*
 * state_bits:
 *   RUNNING_MERGE  - Merge operation is in progress.
 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 *                    cleared afterwards.
 */
#define RUNNING_MERGE
#define SHUTDOWN_MERGE

/*
 * Maximum number of chunks being copied on write.
 *
 * The value was decided experimentally as a trade-off between memory
 * consumption, stalling the kernel's workqueues and maintaining a high enough
 * throughput.
 */
#define DEFAULT_COW_THRESHOLD

static unsigned int cow_threshold =;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC();

DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM();

struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{}
EXPORT_SYMBOL();

struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
{}
EXPORT_SYMBOL();

static sector_t chunk_to_sector(struct dm_exception_store *store,
				chunk_t chunk)
{}

static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
{}

struct dm_snap_pending_exception {};

/*
 * Hash table mapping origin volumes to lists of snapshots and
 * a lock to protect it
 */
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;

struct dm_snap_tracked_chunk {};

static void init_tracked_chunk(struct bio *bio)
{}

static bool is_bio_tracked(struct bio *bio)
{}

static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
{}

static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
{}

static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
{}

/*
 * This conflicting I/O is extremely improbable in the caller,
 * so fsleep(1000) is sufficient and there is no need for a wait queue.
 */
static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
{}

/*
 * One of these per registered origin, held in the snapshot_origins hash
 */
struct origin {};

/*
 * This structure is allocated for each origin target
 */
struct dm_origin {};

/*
 * Size of the hash table for origin volumes. If we make this
 * the size of the minors list then it should be nearly perfect
 */
#define ORIGIN_HASH_SIZE
#define ORIGIN_MASK
static struct list_head *_origins;
static struct list_head *_dm_origins;
static struct rw_semaphore _origins_lock;

static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
static uint64_t _pending_exceptions_done_count;

static int init_origin_hash(void)
{}

static void exit_origin_hash(void)
{}

static unsigned int origin_hash(struct block_device *bdev)
{}

static struct origin *__lookup_origin(struct block_device *origin)
{}

static void __insert_origin(struct origin *o)
{}

static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
{}

static void __insert_dm_origin(struct dm_origin *o)
{}

static void __remove_dm_origin(struct dm_origin *o)
{}

/*
 * _origins_lock must be held when calling this function.
 * Returns number of snapshots registered using the supplied cow device, plus:
 * snap_src - a snapshot suitable for use as a source of exception handover
 * snap_dest - a snapshot capable of receiving exception handover.
 * snap_merge - an existing snapshot-merge target linked to the same origin.
 *   There can be at most one snapshot-merge target. The parameter is optional.
 *
 * Possible return values and states of snap_src and snap_dest.
 *   0: NULL, NULL  - first new snapshot
 *   1: snap_src, NULL - normal snapshot
 *   2: snap_src, snap_dest  - waiting for handover
 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 *   1: NULL, snap_dest - source got destroyed without handover
 */
static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
					struct dm_snapshot **snap_src,
					struct dm_snapshot **snap_dest,
					struct dm_snapshot **snap_merge)
{}

/*
 * On success, returns 1 if this snapshot is a handover destination,
 * otherwise returns 0.
 */
static int __validate_exception_handover(struct dm_snapshot *snap)
{}

static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
{}

/*
 * Make a note of the snapshot and its origin so we can look it
 * up when the origin has a write on it.
 *
 * Also validate snapshot exception store handovers.
 * On success, returns 1 if this registration is a handover destination,
 * otherwise returns 0.
 */
static int register_snapshot(struct dm_snapshot *snap)
{}

/*
 * Move snapshot to correct place in list according to chunk size.
 */
static void reregister_snapshot(struct dm_snapshot *s)
{}

static void unregister_snapshot(struct dm_snapshot *s)
{}

/*
 * Implementation of the exception hash tables.
 * The lowest hash_shift bits of the chunk number are ignored, allowing
 * some consecutive chunks to be grouped together.
 */
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);

/* Lock to protect access to the completed and pending exception hash tables. */
struct dm_exception_table_lock {};

static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
					 struct dm_exception_table_lock *lock)
{}

static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
{}

static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
{}

static int dm_exception_table_init(struct dm_exception_table *et,
				   uint32_t size, unsigned int hash_shift)
{}

static void dm_exception_table_exit(struct dm_exception_table *et,
				    struct kmem_cache *mem)
{}

static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
{}

static void dm_remove_exception(struct dm_exception *e)
{}

/*
 * Return the exception data for a sector, or NULL if not
 * remapped.
 */
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
						chunk_t chunk)
{}

static struct dm_exception *alloc_completed_exception(gfp_t gfp)
{}

static void free_completed_exception(struct dm_exception *e)
{}

static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
{}

static void free_pending_exception(struct dm_snap_pending_exception *pe)
{}

static void dm_insert_exception(struct dm_exception_table *eh,
				struct dm_exception *new_e)
{}

/*
 * Callback used by the exception stores to load exceptions when
 * initialising.
 */
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{}

/*
 * Return a minimum chunk size of all snapshots that have the specified origin.
 * Return zero if the origin has no snapshots.
 */
static uint32_t __minimum_chunk_size(struct origin *o)
{}

/*
 * Hard coded magic.
 */
static int calc_max_buckets(void)
{}

/*
 * Allocate room for a suitable hash table.
 */
static int init_hash_tables(struct dm_snapshot *s)
{}

static void merge_shutdown(struct dm_snapshot *s)
{}

static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
{}

/*
 * Remove one chunk from the index of completed exceptions.
 */
static int __remove_single_exception_chunk(struct dm_snapshot *s,
					   chunk_t old_chunk)
{}

static void flush_bios(struct bio *bio);

static int remove_single_exception_chunk(struct dm_snapshot *s)
{}

static int origin_write_extent(struct dm_snapshot *merging_snap,
			       sector_t sector, unsigned int chunk_size);

static void merge_callback(int read_err, unsigned long write_err,
			   void *context);

static uint64_t read_pending_exceptions_done_count(void)
{}

static void increment_pending_exceptions_done_count(void)
{}

static void snapshot_merge_next_chunks(struct dm_snapshot *s)
{}

static void error_bios(struct bio *bio);

static void merge_callback(int read_err, unsigned long write_err, void *context)
{}

static void start_merge(struct dm_snapshot *s)
{}

/*
 * Stop the merging process and wait until it finishes.
 */
static void stop_merge(struct dm_snapshot *s)
{}

static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
				   struct dm_target *ti)
{}

/*
 * Construct a snapshot mapping:
 * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
 */
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static void __free_exceptions(struct dm_snapshot *s)
{}

static void __handover_exceptions(struct dm_snapshot *snap_src,
				  struct dm_snapshot *snap_dest)
{}

static void snapshot_dtr(struct dm_target *ti)
{}

static void account_start_copy(struct dm_snapshot *s)
{}

static void account_end_copy(struct dm_snapshot *s)
{}

static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{}

/*
 * Flush a list of buffers.
 */
static void flush_bios(struct bio *bio)
{}

static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);

/*
 * Flush a list of buffers.
 */
static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
{}

/*
 * Error a list of buffers.
 */
static void error_bios(struct bio *bio)
{}

static void __invalidate_snapshot(struct dm_snapshot *s, int err)
{}

static void invalidate_snapshot(struct dm_snapshot *s, int err)
{}

static void pending_complete(void *context, int success)
{}

static void complete_exception(struct dm_snap_pending_exception *pe)
{}

/*
 * Called when the copy I/O has finished.  kcopyd actually runs
 * this code so don't block.
 */
static void copy_callback(int read_err, unsigned long write_err, void *context)
{}

/*
 * Dispatches the copy operation to kcopyd.
 */
static void start_copy(struct dm_snap_pending_exception *pe)
{}

static void full_bio_end_io(struct bio *bio)
{}

static void start_full_bio(struct dm_snap_pending_exception *pe,
			   struct bio *bio)
{}

static struct dm_snap_pending_exception *
__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
{}

/*
 * Inserts a pending exception into the pending table.
 *
 * NOTE: a write lock must be held on the chunk's pending exception table slot
 * before calling this.
 */
static struct dm_snap_pending_exception *
__insert_pending_exception(struct dm_snapshot *s,
			   struct dm_snap_pending_exception *pe, chunk_t chunk)
{}

/*
 * Looks to see if this snapshot already has a pending exception
 * for this chunk, otherwise it allocates a new one and inserts
 * it into the pending table.
 *
 * NOTE: a write lock must be held on the chunk's pending exception table slot
 * before calling this.
 */
static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s,
			 struct dm_snap_pending_exception *pe, chunk_t chunk)
{}

static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
			    struct bio *bio, chunk_t chunk)
{}

static void zero_callback(int read_err, unsigned long write_err, void *context)
{}

static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
			   struct bio *bio, chunk_t chunk)
{}

static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
{}

static int snapshot_map(struct dm_target *ti, struct bio *bio)
{}

/*
 * A snapshot-merge target behaves like a combination of a snapshot
 * target and a snapshot-origin target.  It only generates new
 * exceptions in other snapshots and not in the one that is being
 * merged.
 *
 * For each chunk, if there is an existing exception, it is used to
 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
 * which in turn might generate exceptions in other snapshots.
 * If merging is currently taking place on the chunk in question, the
 * I/O is deferred by adding it to s->bios_queued_during_merge.
 */
static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
{}

static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
		blk_status_t *error)
{}

static void snapshot_merge_presuspend(struct dm_target *ti)
{}

static int snapshot_preresume(struct dm_target *ti)
{}

static void snapshot_resume(struct dm_target *ti)
{}

static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
{}

static void snapshot_merge_resume(struct dm_target *ti)
{}

static void snapshot_status(struct dm_target *ti, status_type_t type,
			    unsigned int status_flags, char *result, unsigned int maxlen)
{}

static int snapshot_iterate_devices(struct dm_target *ti,
				    iterate_devices_callout_fn fn, void *data)
{}

static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

/*
 *---------------------------------------------------------------
 * Origin methods
 *---------------------------------------------------------------
 */
/*
 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
 * supplied bio was ignored.  The caller may submit it immediately.
 * (No remapping actually occurs as the origin is always a direct linear
 * map.)
 *
 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
 * and any supplied bio is added to a list to be submitted once all
 * the necessary exceptions exist.
 */
static int __origin_write(struct list_head *snapshots, sector_t sector,
			  struct bio *bio)
{}

/*
 * Called on a write from the origin driver.
 */
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{}

/*
 * Trigger exceptions in all non-merging snapshots.
 *
 * The chunk size of the merging snapshot may be larger than the chunk
 * size of some other snapshot so we may need to reallocate multiple
 * chunks in other snapshots.
 *
 * We scan all the overlapping exceptions in the other snapshots.
 * Returns 1 if anything was reallocated and must be waited for,
 * otherwise returns 0.
 *
 * size must be a multiple of merging_snap's chunk_size.
 */
static int origin_write_extent(struct dm_snapshot *merging_snap,
			       sector_t sector, unsigned int size)
{}

/*
 * Origin: maps a linear range of a device, with hooks for snapshotting.
 */

/*
 * Construct an origin mapping: <dev_path>
 * The context for an origin is merely a 'struct dm_dev *'
 * pointing to the real device.
 */
static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static void origin_dtr(struct dm_target *ti)
{}

static int origin_map(struct dm_target *ti, struct bio *bio)
{}

/*
 * Set the target "max_io_len" field to the minimum of all the snapshots'
 * chunk sizes.
 */
static void origin_resume(struct dm_target *ti)
{}

static void origin_postsuspend(struct dm_target *ti)
{}

static void origin_status(struct dm_target *ti, status_type_t type,
			  unsigned int status_flags, char *result, unsigned int maxlen)
{}

static int origin_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{}

static struct target_type origin_target =;

static struct target_type snapshot_target =;

static struct target_type merge_target =;

static int __init dm_snapshot_init(void)
{}

static void __exit dm_snapshot_exit(void)
{}

/* Module hooks */
module_init();
module_exit(dm_snapshot_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_ALIAS();
MODULE_ALIAS();