#include "dm-core.h"
#include "dm-rq.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
#define DM_MSG_PREFIX …
#define NODE_SIZE …
#define KEYS_PER_NODE …
#define CHILDREN_PER_NODE …
static unsigned int int_log(unsigned int n, unsigned int base)
{ … }
static inline unsigned int get_child(unsigned int n, unsigned int k)
{ … }
static inline sector_t *get_node(struct dm_table *t,
unsigned int l, unsigned int n)
{ … }
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
{ … }
static int setup_btree_index(unsigned int l, struct dm_table *t)
{ … }
static int alloc_targets(struct dm_table *t, unsigned int num)
{ … }
int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md)
{ … }
static void free_devices(struct list_head *devices, struct mapped_device *md)
{ … }
static void dm_table_destroy_crypto_profile(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{ … }
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{ … }
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
struct mapped_device *md)
{ … }
int __ref dm_devt_from_path(const char *path, dev_t *dev_p)
{ … }
EXPORT_SYMBOL(…);
int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result)
{ … }
EXPORT_SYMBOL(…);
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{ … }
EXPORT_SYMBOL(…);
static int adjoin(struct dm_table *t, struct dm_target *ti)
{ … }
static char **realloc_argv(unsigned int *size, char **old_argv)
{ … }
int dm_split_args(int *argc, char ***argvp, char *input)
{ … }
static void dm_set_stacking_limits(struct queue_limits *limits)
{ … }
static int validate_hardware_logical_block_alignment(struct dm_table *t,
struct queue_limits *limits)
{ … }
int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params)
{ … }
static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned int *value, char **error, unsigned int grouped)
{ … }
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned int *value, char **error)
{ … }
EXPORT_SYMBOL(…);
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned int *value, char **error)
{ … }
EXPORT_SYMBOL(…);
const char *dm_shift_arg(struct dm_arg_set *as)
{ … }
EXPORT_SYMBOL(…);
void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
{ … }
EXPORT_SYMBOL(…);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{ … }
static bool __table_type_request_based(enum dm_queue_mode table_type)
{ … }
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
{ … }
EXPORT_SYMBOL_GPL(…);
static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static bool dm_table_supports_dax(struct dm_table *t,
iterate_devices_callout_fn iterate_fn)
{ … }
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_table_determine_type(struct dm_table *t)
{ … }
enum dm_queue_mode dm_table_get_type(struct dm_table *t)
{ … }
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{ … }
struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
{ … }
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{ … }
bool dm_table_bio_based(struct dm_table *t)
{ … }
bool dm_table_request_based(struct dm_table *t)
{ … }
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{ … }
static int setup_indexes(struct dm_table *t)
{ … }
static int dm_table_build_index(struct dm_table *t)
{ … }
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_crypto_profile { … };
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int dm_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key, unsigned int slot)
{ … }
static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{ … }
static void dm_table_destroy_crypto_profile(struct dm_table *t)
{ … }
static int dm_table_construct_crypto_profile(struct dm_table *t)
{ … }
static void dm_update_crypto_profile(struct request_queue *q,
struct dm_table *t)
{ … }
#else
static int dm_table_construct_crypto_profile(struct dm_table *t)
{
return 0;
}
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
}
static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
}
static void dm_update_crypto_profile(struct request_queue *q,
struct dm_table *t)
{
}
#endif
int dm_table_complete(struct dm_table *t)
{ … }
static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)
{ … }
void dm_table_event(struct dm_table *t)
{ … }
EXPORT_SYMBOL(…);
inline sector_t dm_table_get_size(struct dm_table *t)
{ … }
EXPORT_SYMBOL(…);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{ … }
static bool dm_table_any_dev_attr(struct dm_table *t,
iterate_devices_callout_fn func, void *data)
{ … }
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
bool dm_table_has_no_data_devices(struct dm_table *t)
{ … }
static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
{ … }
static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static int validate_hardware_zoned(struct dm_table *t, bool zoned,
unsigned int zone_sectors)
{ … }
int dm_calculate_queue_limits(struct dm_table *t,
struct queue_limits *limits)
{ … }
static bool dm_table_supports_flush(struct dm_table *t)
{ … }
static int device_dax_write_cache_enabled(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{ … }
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static bool dm_table_supports_write_zeroes(struct dm_table *t)
{ … }
static bool dm_table_supports_nowait(struct dm_table *t)
{ … }
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{ … }
static bool dm_table_supports_discards(struct dm_table *t)
{ … }
static int device_not_secure_erase_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{ … }
static bool dm_table_supports_secure_erase(struct dm_table *t)
{ … }
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{ … }
struct list_head *dm_table_get_devices(struct dm_table *t)
{ … }
blk_mode_t dm_table_get_mode(struct dm_table *t)
{ … }
EXPORT_SYMBOL(…);
enum suspend_mode { … };
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
{ … }
void dm_table_presuspend_targets(struct dm_table *t)
{ … }
void dm_table_presuspend_undo_targets(struct dm_table *t)
{ … }
void dm_table_postsuspend_targets(struct dm_table *t)
{ … }
int dm_table_resume_targets(struct dm_table *t)
{ … }
struct mapped_device *dm_table_get_md(struct dm_table *t)
{ … }
EXPORT_SYMBOL(…);
const char *dm_table_device_name(struct dm_table *t)
{ … }
EXPORT_SYMBOL_GPL(…);
void dm_table_run_md_queue_async(struct dm_table *t)
{ … }
EXPORT_SYMBOL(…);