linux/drivers/md/dm-thin.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011-2012 Red Hat UK.
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
#include "dm-bio-prison-v1.h"
#include "dm.h"

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/rbtree.h>

#define DM_MSG_PREFIX

/*
 * Tunable constants
 */
#define ENDIO_HOOK_POOL_SIZE
#define MAPPING_POOL_SIZE
#define COMMIT_PERIOD
#define NO_SPACE_TIMEOUT_SECS

static unsigned int no_space_timeout_secs =;

DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM();

/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
 * including all devices that share this block.  (see dm_deferred_set code)
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
 * (process_prepared_mapping).  This act of inserting breaks some
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
enum lock_space {};

static bool build_key(struct dm_thin_device *td, enum lock_space ls,
		      dm_block_t b, dm_block_t e, struct dm_cell_key *key)
{}

static void build_data_key(struct dm_thin_device *td, dm_block_t b,
			   struct dm_cell_key *key)
{}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
			      struct dm_cell_key *key)
{}

/*----------------------------------------------------------------*/

#define THROTTLE_THRESHOLD

struct throttle {};

static void throttle_init(struct throttle *t)
{}

static void throttle_work_start(struct throttle *t)
{}

static void throttle_work_update(struct throttle *t)
{}

static void throttle_work_complete(struct throttle *t)
{}

static void throttle_lock(struct throttle *t)
{}

static void throttle_unlock(struct throttle *t)
{}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
struct dm_thin_new_mapping;

/*
 * The pool runs in various modes.  Ordered in degraded order for comparisons.
 */
enum pool_mode {};

struct pool_features {};

struct thin_c;
process_bio_fn;
process_cell_fn;
process_mapping_fn;

#define CELL_SORT_ARRAY_SIZE

struct pool {};

static void metadata_operation_failed(struct pool *pool, const char *op, int r);

static enum pool_mode get_pool_mode(struct pool *pool)
{}

static void notify_of_pool_mode_change(struct pool *pool)
{}

/*
 * Target context for a pool.
 */
struct pool_c {};

/*
 * Target context for a thin.
 */
struct thin_c {};

/*----------------------------------------------------------------*/

static bool block_size_is_power_of_two(struct pool *pool)
{}

static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
{}

/*----------------------------------------------------------------*/

struct discard_op {};

static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
{}

static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
{}

static void end_discard(struct discard_op *op, int r)
{}

/*----------------------------------------------------------------*/

/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{}

/*----------------------------------------------------------------*/

static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{}

static void cell_visit_release(struct pool *pool,
			       void (*fn)(void *, struct dm_bio_prison_cell *),
			       void *context,
			       struct dm_bio_prison_cell *cell)
{}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{}

static void cell_error_with_code(struct pool *pool,
		struct dm_bio_prison_cell *cell, blk_status_t error_code)
{}

static blk_status_t get_pool_io_error_code(struct pool *pool)
{}

static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{}

static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
{}

static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{}

/*----------------------------------------------------------------*/

/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {} dm_thin_pool_table;

static void pool_table_init(void)
{}

static void pool_table_exit(void)
{}

static void __pool_table_insert(struct pool *pool)
{}

static void __pool_table_remove(struct pool *pool)
{}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{}

/*----------------------------------------------------------------*/

struct dm_thin_endio_hook {};

static void error_bio_list(struct bio_list *bios, blk_status_t error)
{}

static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
		blk_status_t error)
{}

static void requeue_deferred_cells(struct thin_c *tc)
{}

static void requeue_io(struct thin_c *tc)
{}

static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
{}

static void error_retry_list(struct pool *pool)
{}

/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{}

/*
 * Returns the _complete_ blocks that this bio covers.
 */
static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
				dm_block_t *begin, dm_block_t *end)
{}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{}

static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{}

static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{}

static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{}

static void issue(struct thin_c *tc, struct bio *bio)
{}

static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{}

/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
struct dm_thin_new_mapping {};

static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
{}

static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
{}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{}

static void overwrite_endio(struct bio *bio)
{}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell, except the original holder, back
 * to the deferred_bios list.
 */
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

static void thin_defer_bio(struct thin_c *tc, struct bio *bio);

struct remap_info {};

static void __inc_remap_and_issue_cell(void *context,
				       struct dm_bio_prison_cell *cell)
{}

static void inc_remap_and_issue_cell(struct thin_c *tc,
				     struct dm_bio_prison_cell *cell,
				     dm_block_t block)
{}

static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{}

static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
{}

static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{}

/*----------------------------------------------------------------*/

static void free_discard_mapping(struct dm_thin_new_mapping *m)
{}

static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
{}

static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
{}

static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
{}

/*----------------------------------------------------------------*/

static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
						   struct bio *discard_parent)
{}

static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
{}

static void passdown_endio(struct bio *bio)
{}

static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
{}

static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
{}

static void process_prepared(struct pool *pool, struct list_head *head,
			     process_mapping_fn *fn)
{}

/*
 * Deferred bio jobs.
 */
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{}

static int ensure_next_mapping(struct pool *pool)
{}

static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{}

static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
		    sector_t begin, sector_t end)
{}

static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
				      dm_block_t data_begin,
				      struct dm_thin_new_mapping *m)
{}

/*
 * A partial copy also needs to zero the uncopied region.
 */
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
			  struct dm_bio_prison_cell *cell, struct bio *bio,
			  sector_t len)
{}

static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{}

static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
			  struct bio *bio)
{}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{}

static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

static void requeue_bios(struct pool *pool);

static bool is_read_only_pool_mode(enum pool_mode mode)
{}

static bool is_read_only(struct pool *pool)
{}

static void check_for_metadata_space(struct pool *pool)
{}

static void check_for_data_space(struct pool *pool)
{}

/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
static int commit(struct pool *pool)
{}

static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{}

static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{}

static blk_status_t should_error_unserviceable_bio(struct pool *pool)
{}

static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{}

static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
{}

static void process_discard_cell_no_passdown(struct thin_c *tc,
					     struct dm_bio_prison_cell *virt_cell)
{}

static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
				 struct bio *bio)
{}

static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
{}

static void process_discard_bio(struct thin_c *tc, struct bio *bio)
{}

static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
			  struct dm_cell_key *key,
			  struct dm_thin_lookup_result *lookup_result,
			  struct dm_bio_prison_cell *cell)
{}

static void __remap_and_issue_shared_cell(void *context,
					  struct dm_bio_prison_cell *cell)
{}

static void remap_and_issue_shared_cell(struct thin_c *tc,
					struct dm_bio_prison_cell *cell,
					dm_block_t block)
{}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result,
			       struct dm_bio_prison_cell *virt_cell)
{}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
			    struct dm_bio_prison_cell *cell)
{}

static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

static void process_bio(struct thin_c *tc, struct bio *bio)
{}

static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
				    struct dm_bio_prison_cell *cell)
{}

static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
{}

static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

static void process_bio_success(struct thin_c *tc, struct bio *bio)
{}

static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{}

static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

/*
 * FIXME: should we also commit due to size of transaction, measured in
 * metadata blocks?
 */
static int need_commit_due_to_time(struct pool *pool)
{}

#define thin_pbd(node)
#define thin_bio(pbd)

static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
{}

static void __extract_sorted_bios(struct thin_c *tc)
{}

static void __sort_thin_deferred_bios(struct thin_c *tc)
{}

static void process_thin_deferred_bios(struct thin_c *tc)
{}

static int cmp_cells(const void *lhs, const void *rhs)
{}

static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
{}

static void process_thin_deferred_cells(struct thin_c *tc)
{}

static void thin_get(struct thin_c *tc);
static void thin_put(struct thin_c *tc);

/*
 * We can't hold rcu_read_lock() around code that can block.  So we
 * find a thin with the rcu lock held; bump a refcount; then drop
 * the lock.
 */
static struct thin_c *get_first_thin(struct pool *pool)
{}

static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
{}

static void process_deferred_bios(struct pool *pool)
{}

static void do_worker(struct work_struct *ws)
{}

/*
 * We want to commit periodically so that not too much
 * unwritten data builds up.
 */
static void do_waker(struct work_struct *ws)
{}

/*
 * We're holding onto IO to allow userland time to react.  After the
 * timeout either the pool will have been resized (and thus back in
 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
 */
static void do_no_space_timeout(struct work_struct *ws)
{}

/*----------------------------------------------------------------*/

struct pool_work {};

static struct pool_work *to_pool_work(struct work_struct *ws)
{}

static void pool_work_complete(struct pool_work *pw)
{}

static void pool_work_wait(struct pool_work *pw, struct pool *pool,
			   void (*fn)(struct work_struct *))
{}

/*----------------------------------------------------------------*/

struct noflush_work {};

static struct noflush_work *to_noflush(struct work_struct *ws)
{}

static void do_noflush_start(struct work_struct *ws)
{}

static void do_noflush_stop(struct work_struct *ws)
{}

static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{}

/*----------------------------------------------------------------*/

static void set_discard_callbacks(struct pool *pool)
{}

static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{}

static void abort_transaction(struct pool *pool)
{}

static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{}

/*----------------------------------------------------------------*/

/*
 * Mapping functions.
 */

/*
 * Called only while mapping a thin bio to hand it over to the workqueue.
 */
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{}

static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
{}

static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{}

static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
{}

/*
 * Non-blocking function called from the thin target's map function.
 */
static int thin_bio_map(struct dm_target *ti, struct bio *bio)
{}

static void requeue_bios(struct pool *pool)
{}

/*
 *--------------------------------------------------------------
 * Binding of control targets to a pool object
 *--------------------------------------------------------------
 */
static bool is_factor(sector_t block_size, uint32_t n)
{}

/*
 * If discard_passdown was enabled verify that the data device
 * supports discards.  Disable discard_passdown if not.
 */
static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{}

static int bind_control_target(struct pool *pool, struct dm_target *ti)
{}

static void unbind_control_target(struct pool *pool, struct dm_target *ti)
{}

/*
 *--------------------------------------------------------------
 * Pool creation
 *--------------------------------------------------------------
 */
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{}

static void __pool_destroy(struct pool *pool)
{}

static struct kmem_cache *_new_mapping_cache;

static struct pool *pool_create(struct mapped_device *pool_md,
				struct block_device *metadata_dev,
				struct block_device *data_dev,
				unsigned long block_size,
				int read_only, char **error)
{}

static void __pool_inc(struct pool *pool)
{}

static void __pool_dec(struct pool *pool)
{}

static struct pool *__pool_find(struct mapped_device *pool_md,
				struct block_device *metadata_dev,
				struct block_device *data_dev,
				unsigned long block_size, int read_only,
				char **error, int *created)
{}

/*
 *--------------------------------------------------------------
 * Pool target methods
 *--------------------------------------------------------------
 */
static void pool_dtr(struct dm_target *ti)
{}

static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
			       struct dm_target *ti)
{}

static void metadata_low_callback(void *context)
{}

/*
 * We need to flush the data device **before** committing the metadata.
 *
 * This ensures that the data blocks of any newly inserted mappings are
 * properly written to non-volatile storage and won't be lost in case of a
 * crash.
 *
 * Failure to do so can result in data corruption in the case of internal or
 * external snapshots and in the case of newly provisioned blocks, when block
 * zeroing is enabled.
 */
static int metadata_pre_commit_callback(void *context)
{}

static sector_t get_dev_size(struct block_device *bdev)
{}

static void warn_if_metadata_device_too_big(struct block_device *bdev)
{}

static sector_t get_metadata_dev_size(struct block_device *bdev)
{}

static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{}

/*
 * When a metadata threshold is crossed a dm event is triggered, and
 * userland should respond by growing the metadata device.  We could let
 * userland set the threshold, like we do with the data threshold, but I'm
 * not sure they know enough to do this well.
 */
static dm_block_t calc_metadata_threshold(struct pool_c *pt)
{}

/*
 * thin-pool <metadata dev> <data dev>
 *	     <data block size (sectors)>
 *	     <low water mark (blocks)>
 *	     [<#feature args> [<arg>]*]
 *
 * Optional feature arguments are:
 *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
 *	     ignore_discard: disable discard
 *	     no_discard_passdown: don't pass discards down to the data device
 *	     read_only: Don't allow any changes to be made to the pool metadata.
 *	     error_if_no_space: error IOs, instead of queueing, if no space.
 */
static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static int pool_map(struct dm_target *ti, struct bio *bio)
{}

static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
{}

static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
{}

/*
 * Retrieves the number of blocks of the data device from
 * the superblock and compares it to the actual device size,
 * thus resizing the data device in case it has grown.
 *
 * This both copes with opening preallocated data devices in the ctr
 * being followed by a resume
 * -and-
 * calling the resume method individually after userspace has
 * grown the data device in reaction to a table event.
 */
static int pool_preresume(struct dm_target *ti)
{}

static void pool_suspend_active_thins(struct pool *pool)
{}

static void pool_resume_active_thins(struct pool *pool)
{}

static void pool_resume(struct dm_target *ti)
{}

static void pool_presuspend(struct dm_target *ti)
{}

static void pool_presuspend_undo(struct dm_target *ti)
{}

static void pool_postsuspend(struct dm_target *ti)
{}

static int check_arg_count(unsigned int argc, unsigned int args_required)
{}

static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
{}

static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{}

/*
 * Messages supported:
 *   create_thin	<dev_id>
 *   create_snap	<dev_id> <origin_id>
 *   delete		<dev_id>
 *   set_transaction_id <current_trans_id> <new_trans_id>
 *   reserve_metadata_snap
 *   release_metadata_snap
 */
static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
			char *result, unsigned int maxlen)
{}

static void emit_flags(struct pool_features *pf, char *result,
		       unsigned int sz, unsigned int maxlen)
{}

/*
 * Status line is:
 *    <transaction id> <used metadata sectors>/<total metadata sectors>
 *    <used data sectors>/<total data sectors> <held metadata root>
 *    <pool mode> <discard config> <no space config> <needs_check>
 */
static void pool_status(struct dm_target *ti, status_type_t type,
			unsigned int status_flags, char *result, unsigned int maxlen)
{}

static int pool_iterate_devices(struct dm_target *ti,
				iterate_devices_callout_fn fn, void *data)
{}

static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

static struct target_type pool_target =;

/*
 *--------------------------------------------------------------
 * Thin target methods
 *--------------------------------------------------------------
 */
static void thin_get(struct thin_c *tc)
{}

static void thin_put(struct thin_c *tc)
{}

static void thin_dtr(struct dm_target *ti)
{}

/*
 * Thin target parameters:
 *
 * <pool_dev> <dev_id> [origin_dev]
 *
 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
 * dev_id: the internal device identifier
 * origin_dev: a device external to the pool that should act as the origin
 *
 * If the pool device has discards disabled, they get disabled for the thin
 * device as well.
 */
static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static int thin_map(struct dm_target *ti, struct bio *bio)
{}

static int thin_endio(struct dm_target *ti, struct bio *bio,
		blk_status_t *err)
{}

static void thin_presuspend(struct dm_target *ti)
{}

static void thin_postsuspend(struct dm_target *ti)
{}

static int thin_preresume(struct dm_target *ti)
{}

/*
 * <nr mapped sectors> <highest mapped sector>
 */
static void thin_status(struct dm_target *ti, status_type_t type,
			unsigned int status_flags, char *result, unsigned int maxlen)
{}

static int thin_iterate_devices(struct dm_target *ti,
				iterate_devices_callout_fn fn, void *data)
{}

static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{}

static struct target_type thin_target =;

/*----------------------------------------------------------------*/

static int __init dm_thin_init(void)
{}

static void dm_thin_exit(void)
{}

module_init();
module_exit(dm_thin_exit);

module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();