linux/drivers/md/dm-table.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2001 Sistina Software (UK) Limited.
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include "dm-core.h"
#include "dm-rq.h"

#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>

#define DM_MSG_PREFIX

#define NODE_SIZE
#define KEYS_PER_NODE
#define CHILDREN_PER_NODE

/*
 * Similar to ceiling(log_size(n))
 */
static unsigned int int_log(unsigned int n, unsigned int base)
{}

/*
 * Calculate the index of the child node of the n'th node k'th key.
 */
static inline unsigned int get_child(unsigned int n, unsigned int k)
{}

/*
 * Return the n'th node of level l from table t.
 */
static inline sector_t *get_node(struct dm_table *t,
				 unsigned int l, unsigned int n)
{}

/*
 * Return the highest key that you could lookup from the n'th
 * node on level l of the btree.
 */
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
{}

/*
 * Fills in a level of the btree based on the highs of the level
 * below it.
 */
static int setup_btree_index(unsigned int l, struct dm_table *t)
{}

/*
 * highs, and targets are managed as dynamic arrays during a
 * table load.
 */
static int alloc_targets(struct dm_table *t, unsigned int num)
{}

int dm_table_create(struct dm_table **result, blk_mode_t mode,
		    unsigned int num_targets, struct mapped_device *md)
{}

static void free_devices(struct list_head *devices, struct mapped_device *md)
{}

static void dm_table_destroy_crypto_profile(struct dm_table *t);

void dm_table_destroy(struct dm_table *t)
{}

/*
 * See if we've already got a device in the list.
 */
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{}

/*
 * If possible, this checks an area of a destination device is invalid.
 */
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{}

/*
 * This upgrades the mode on an already open dm_dev, being
 * careful to leave things as they were if we fail to reopen the
 * device and not to touch the existing bdev field in case
 * it is accessed concurrently.
 */
static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
			struct mapped_device *md)
{}

/*
 * Note: the __ref annotation is because this function can call the __init
 * marked early_lookup_bdev when called during early boot code from dm-init.c.
 */
int __ref dm_devt_from_path(const char *path, dev_t *dev_p)
{}
EXPORT_SYMBOL();

/*
 * Add a device to the list, or just increment the usage count if
 * it's already present.
 */
int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
		  struct dm_dev **result)
{}
EXPORT_SYMBOL();

static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
				sector_t start, sector_t len, void *data)
{}

/*
 * Decrement a device's use count and remove it if necessary.
 */
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{}
EXPORT_SYMBOL();

/*
 * Checks to see if the target joins onto the end of the table.
 */
static int adjoin(struct dm_table *t, struct dm_target *ti)
{}

/*
 * Used to dynamically allocate the arg array.
 *
 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 * process messages even if some device is suspended. These messages have a
 * small fixed number of arguments.
 *
 * On the other hand, dm-switch needs to process bulk data using messages and
 * excessive use of GFP_NOIO could cause trouble.
 */
static char **realloc_argv(unsigned int *size, char **old_argv)
{}

/*
 * Destructively splits up the argument list to pass to ctr.
 */
int dm_split_args(int *argc, char ***argvp, char *input)
{}

static void dm_set_stacking_limits(struct queue_limits *limits)
{}

/*
 * Impose necessary and sufficient conditions on a devices's table such
 * that any incoming bio which respects its logical_block_size can be
 * processed successfully.  If it falls across the boundary between
 * two or more targets, the size of each piece it gets split into must
 * be compatible with the logical_block_size of the target processing it.
 */
static int validate_hardware_logical_block_alignment(struct dm_table *t,
						     struct queue_limits *limits)
{}

int dm_table_add_target(struct dm_table *t, const char *type,
			sector_t start, sector_t len, char *params)
{}

/*
 * Target argument parsing helpers.
 */
static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
			     unsigned int *value, char **error, unsigned int grouped)
{}

int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
		unsigned int *value, char **error)
{}
EXPORT_SYMBOL();

int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
		      unsigned int *value, char **error)
{}
EXPORT_SYMBOL();

const char *dm_shift_arg(struct dm_arg_set *as)
{}
EXPORT_SYMBOL();

void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
{}
EXPORT_SYMBOL();

static bool __table_type_bio_based(enum dm_queue_mode table_type)
{}

static bool __table_type_request_based(enum dm_queue_mode table_type)
{}

void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
{}
EXPORT_SYMBOL_GPL();

/* validate the dax capability of the target device span */
static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
			sector_t start, sector_t len, void *data)
{}

/* Check devices support synchronous DAX */
static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
					      sector_t start, sector_t len, void *data)
{}

static bool dm_table_supports_dax(struct dm_table *t,
				  iterate_devices_callout_fn iterate_fn)
{}

static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{}

static int dm_table_determine_type(struct dm_table *t)
{}

enum dm_queue_mode dm_table_get_type(struct dm_table *t)
{}

struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{}

struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
{}

struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{}

bool dm_table_bio_based(struct dm_table *t)
{}

bool dm_table_request_based(struct dm_table *t)
{}

static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{}

static int setup_indexes(struct dm_table *t)
{}

/*
 * Builds the btree to index the map.
 */
static int dm_table_build_index(struct dm_table *t)
{}

#ifdef CONFIG_BLK_INLINE_ENCRYPTION

struct dm_crypto_profile {};

static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
				     sector_t start, sector_t len, void *data)
{}

/*
 * When an inline encryption key is evicted from a device-mapper device, evict
 * it from all the underlying devices.
 */
static int dm_keyslot_evict(struct blk_crypto_profile *profile,
			    const struct blk_crypto_key *key, unsigned int slot)
{}

static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
				     sector_t start, sector_t len, void *data)
{}

void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{}

static void dm_table_destroy_crypto_profile(struct dm_table *t)
{}

/*
 * Constructs and initializes t->crypto_profile with a crypto profile that
 * represents the common set of crypto capabilities of the devices described by
 * the dm_table.  However, if the constructed crypto profile doesn't support all
 * crypto capabilities that are supported by the current mapped_device, it
 * returns an error instead, since we don't support removing crypto capabilities
 * on table changes.  Finally, if the constructed crypto profile is "empty" (has
 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
 */
static int dm_table_construct_crypto_profile(struct dm_table *t)
{}

static void dm_update_crypto_profile(struct request_queue *q,
				     struct dm_table *t)
{}

#else /* CONFIG_BLK_INLINE_ENCRYPTION */

static int dm_table_construct_crypto_profile(struct dm_table *t)
{
	return 0;
}

void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
}

static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
}

static void dm_update_crypto_profile(struct request_queue *q,
				     struct dm_table *t)
{
}

#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */

/*
 * Prepares the table for use by building the indices,
 * setting the type, and allocating mempools.
 */
int dm_table_complete(struct dm_table *t)
{}

static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
			     void (*fn)(void *), void *context)
{}

void dm_table_event(struct dm_table *t)
{}
EXPORT_SYMBOL();

inline sector_t dm_table_get_size(struct dm_table *t)
{}
EXPORT_SYMBOL();

/*
 * Search the btree for the correct target.
 *
 * Caller should check returned pointer for NULL
 * to trap I/O beyond end of device.
 */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{}

/*
 * type->iterate_devices() should be called when the sanity check needs to
 * iterate and check all underlying data devices. iterate_devices() will
 * iterate all underlying data devices until it encounters a non-zero return
 * code, returned by whether the input iterate_devices_callout_fn, or
 * iterate_devices() itself internally.
 *
 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
 * iterate multiple underlying devices internally, in which case a non-zero
 * return code returned by iterate_devices_callout_fn will stop the iteration
 * in advance.
 *
 * Cases requiring _any_ underlying device supporting some kind of attribute,
 * should use the iteration structure like dm_table_any_dev_attr(), or call
 * it directly. @func should handle semantics of positive examples, e.g.
 * capable of something.
 *
 * Cases requiring _all_ underlying devices supporting some kind of attribute,
 * should use the iteration structure like dm_table_supports_nowait() or
 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
 * uses an @anti_func that handle semantics of counter examples, e.g. not
 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
 */
static bool dm_table_any_dev_attr(struct dm_table *t,
				  iterate_devices_callout_fn func, void *data)
{}

static int count_device(struct dm_target *ti, struct dm_dev *dev,
			sector_t start, sector_t len, void *data)
{}

/*
 * Check whether a table has no data devices attached using each
 * target's iterate_devices method.
 * Returns false if the result is unknown because a target doesn't
 * support iterate_devices.
 */
bool dm_table_has_no_data_devices(struct dm_table *t)
{}

static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{}

static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
				 sector_t start, sector_t len, void *data)
{}

/*
 * Check the device zoned model based on the target feature flag. If the target
 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
 * also accepted but all devices must have the same zoned model. If the target
 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
 * zoned model with all zoned devices having the same zone size.
 */
static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
{}

static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
					   sector_t start, sector_t len, void *data)
{}

/*
 * Check consistency of zoned model and zone sectors across all targets. For
 * zone sectors, if the destination device is a zoned block device, it shall
 * have the specified zone_sectors.
 */
static int validate_hardware_zoned(struct dm_table *t, bool zoned,
				   unsigned int zone_sectors)
{}

/*
 * Establish the new table's queue_limits and validate them.
 */
int dm_calculate_queue_limits(struct dm_table *t,
			      struct queue_limits *limits)
{}

/*
 * Check if a target requires flush support even if none of the underlying
 * devices need it (e.g. to persist target-specific metadata).
 */
static bool dm_table_supports_flush(struct dm_table *t)
{}

static int device_dax_write_cache_enabled(struct dm_target *ti,
					  struct dm_dev *dev, sector_t start,
					  sector_t len, void *data)
{}

static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
					   sector_t start, sector_t len, void *data)
{}

static bool dm_table_supports_write_zeroes(struct dm_table *t)
{}

static bool dm_table_supports_nowait(struct dm_table *t)
{}

static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
				      sector_t start, sector_t len, void *data)
{}

static bool dm_table_supports_discards(struct dm_table *t)
{}

static int device_not_secure_erase_capable(struct dm_target *ti,
					   struct dm_dev *dev, sector_t start,
					   sector_t len, void *data)
{}

static bool dm_table_supports_secure_erase(struct dm_table *t)
{}

int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
			      struct queue_limits *limits)
{}

struct list_head *dm_table_get_devices(struct dm_table *t)
{}

blk_mode_t dm_table_get_mode(struct dm_table *t)
{}
EXPORT_SYMBOL();

enum suspend_mode {};

static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
{}

void dm_table_presuspend_targets(struct dm_table *t)
{}

void dm_table_presuspend_undo_targets(struct dm_table *t)
{}

void dm_table_postsuspend_targets(struct dm_table *t)
{}

int dm_table_resume_targets(struct dm_table *t)
{}

struct mapped_device *dm_table_get_md(struct dm_table *t)
{}
EXPORT_SYMBOL();

const char *dm_table_device_name(struct dm_table *t)
{}
EXPORT_SYMBOL_GPL();

void dm_table_run_md_queue_async(struct dm_table *t)
{}
EXPORT_SYMBOL();