linux/drivers/md/dm-vdo/block-map.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2023 Red Hat
 */

#include "block-map.h"

#include <linux/bio.h>
#include <linux/ratelimit.h>

#include "errors.h"
#include "logger.h"
#include "memory-alloc.h"
#include "permassert.h"

#include "action-manager.h"
#include "admin-state.h"
#include "completion.h"
#include "constants.h"
#include "data-vio.h"
#include "encodings.h"
#include "io-submitter.h"
#include "physical-zone.h"
#include "recovery-journal.h"
#include "slab-depot.h"
#include "status-codes.h"
#include "types.h"
#include "vdo.h"
#include "vio.h"
#include "wait-queue.h"

/**
 * DOC: Block map eras
 *
 * The block map era, or maximum age, is used as follows:
 *
 * Each block map page, when dirty, records the earliest recovery journal block sequence number of
 * the changes reflected in that dirty block. Sequence numbers are classified into eras: every
 * @maximum_age sequence numbers, we switch to a new era. Block map pages are assigned to eras
 * according to the sequence number they record.
 *
 * In the current (newest) era, block map pages are not written unless there is cache pressure. In
 * the next oldest era, each time a new journal block is written 1/@maximum_age of the pages in
 * this era are issued for write. In all older eras, pages are issued for write immediately.
 */

struct page_descriptor {} __packed;

page_key;

struct write_if_not_dirtied_context {};

struct block_map_tree_segment {};

struct block_map_tree {};

struct forest {};

struct cursor_level {};

struct cursors;

struct cursor {};

struct cursors {};

static const physical_block_number_t NO_PAGE =;

/* Used to indicate that the page holding the location of a tree root has been "loaded". */
static const physical_block_number_t VDO_INVALID_PBN =;

const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY =;

#define LOG_INTERVAL
#define DISPLAY_INTERVAL

/*
 * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
 * Prevents any compiler shenanigans from affecting other threads reading those stats.
 */
#define ADD_ONCE(value, delta)

static inline bool is_dirty(const struct page_info *info)
{}

static inline bool is_present(const struct page_info *info)
{}

static inline bool is_in_flight(const struct page_info *info)
{}

static inline bool is_incoming(const struct page_info *info)
{}

static inline bool is_outgoing(const struct page_info *info)
{}

static inline bool is_valid(const struct page_info *info)
{}

static char *get_page_buffer(struct page_info *info)
{}

static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo_waiter *waiter)
{}

/**
 * initialize_info() - Initialize all page info structures and put them on the free list.
 *
 * Return: VDO_SUCCESS or an error.
 */
static int initialize_info(struct vdo_page_cache *cache)
{}

/**
 * allocate_cache_components() - Allocate components of the cache which require their own
 *                               allocation.
 * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
 *               written out.
 *
 * The caller is responsible for all clean up on errors.
 *
 * Return: VDO_SUCCESS or an error code.
 */
static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
{}

/**
 * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's
 *                            thread.
 */
static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
					  const char *function_name)
{}

/** assert_io_allowed() - Assert that a page cache may issue I/O. */
static inline void assert_io_allowed(struct vdo_page_cache *cache)
{}

/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
static void report_cache_pressure(struct vdo_page_cache *cache)
{}

/**
 * get_page_state_name() - Return the name of a page state.
 *
 * If the page state is invalid a static string is returned and the invalid state is logged.
 *
 * Return: A pointer to a static page state name.
 */
static const char * __must_check get_page_state_name(enum vdo_page_buffer_state state)
{}

/**
 * update_counter() - Update the counter associated with a given state.
 * @info: The page info to count.
 * @delta: The delta to apply to the counter.
 */
static void update_counter(struct page_info *info, s32 delta)
{}

/** update_lru() - Update the lru information for an active page. */
static void update_lru(struct page_info *info)
{}

/**
 * set_info_state() - Set the state of a page_info and put it on the right list, adjusting
 *                    counters.
 */
static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state)
{}

/** set_info_pbn() - Set the pbn for an info, updating the map as needed. */
static int __must_check set_info_pbn(struct page_info *info, physical_block_number_t pbn)
{}

/** reset_page_info() - Reset page info to represent an unallocated page. */
static int reset_page_info(struct page_info *info)
{}

/**
 * find_free_page() - Find a free page.
 *
 * Return: A pointer to the page info structure (if found), NULL otherwise.
 */
static struct page_info * __must_check find_free_page(struct vdo_page_cache *cache)
{}

/**
 * find_page() - Find the page info (if any) associated with a given pbn.
 * @pbn: The absolute physical block number of the page.
 *
 * Return: The page info for the page if available, or NULL if not.
 */
static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
						 physical_block_number_t pbn)
{}

/**
 * select_lru_page() - Determine which page is least recently used.
 *
 * Picks the least recently used from among the non-busy entries at the front of each of the lru
 * ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
 * that the entries at the front are busy unless the queue is very short, but not impossible.
 *
 * Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
 *         found. The page can be dirty or resident.
 */
static struct page_info * __must_check select_lru_page(struct vdo_page_cache *cache)
{}

/* ASYNCHRONOUS INTERFACE BEYOND THIS POINT */

/**
 * complete_with_page() - Helper to complete the VDO Page Completion request successfully.
 * @info: The page info representing the result page.
 * @vdo_page_comp: The VDO page completion to complete.
 */
static void complete_with_page(struct page_info *info,
			       struct vdo_page_completion *vdo_page_comp)
{}

/**
 * complete_waiter_with_error() - Complete a page completion with an error code.
 * @waiter: The page completion, as a waiter.
 * @result_ptr: A pointer to the error code.
 *
 * Implements waiter_callback_fn.
 */
static void complete_waiter_with_error(struct vdo_waiter *waiter, void *result_ptr)
{}

/**
 * complete_waiter_with_page() - Complete a page completion with a page.
 * @waiter: The page completion, as a waiter.
 * @page_info: The page info to complete with.
 *
 * Implements waiter_callback_fn.
 */
static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info)
{}

/**
 * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result.
 *
 * Upon completion the waitq will be empty.
 *
 * Return: The number of pages distributed.
 */
static unsigned int distribute_page_over_waitq(struct page_info *info,
					       struct vdo_wait_queue *waitq)
{}

/**
 * set_persistent_error() - Set a persistent error which all requests will receive in the future.
 * @context: A string describing what triggered the error.
 *
 * Once triggered, all enqueued completions will get this error. Any future requests will result in
 * this error as well.
 */
static void set_persistent_error(struct vdo_page_cache *cache, const char *context,
				 int result)
{}

/**
 * validate_completed_page() - Check that a page completion which is being freed to the cache
 *                             referred to a valid page and is in a valid state.
 * @writable: Whether a writable page is required.
 *
 * Return: VDO_SUCCESS if the page was valid, otherwise as error
 */
static int __must_check validate_completed_page(struct vdo_page_completion *completion,
						bool writable)
{}

static void check_for_drain_complete(struct block_map_zone *zone)
{}

static void enter_zone_read_only_mode(struct block_map_zone *zone, int result)
{}

static bool __must_check
validate_completed_page_or_enter_read_only_mode(struct vdo_page_completion *completion,
						bool writable)
{}

/**
 * handle_load_error() - Handle page load errors.
 * @completion: The page read vio.
 */
static void handle_load_error(struct vdo_completion *completion)
{}

/**
 * page_is_loaded() - Callback used when a page has been loaded.
 * @completion: The vio which has loaded the page. Its parent is the page_info.
 */
static void page_is_loaded(struct vdo_completion *completion)
{}

/**
 * handle_rebuild_read_error() - Handle a read error during a read-only rebuild.
 * @completion: The page load completion.
 */
static void handle_rebuild_read_error(struct vdo_completion *completion)
{}

static void load_cache_page_endio(struct bio *bio)
{}

/**
 * launch_page_load() - Begin the process of loading a page.
 *
 * Return: VDO_SUCCESS or an error code.
 */
static int __must_check launch_page_load(struct page_info *info,
					 physical_block_number_t pbn)
{}

static void write_pages(struct vdo_completion *completion);

/** handle_flush_error() - Handle errors flushing the layer. */
static void handle_flush_error(struct vdo_completion *completion)
{}

static void flush_endio(struct bio *bio)
{}

/** save_pages() - Attempt to save the outgoing pages by first flushing the layer. */
static void save_pages(struct vdo_page_cache *cache)
{}

/**
 * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved.
 *
 * Once in the list, a page may not be used until it has been written out.
 */
static void schedule_page_save(struct page_info *info)
{}

/**
 * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving
 * pages if another save is not in progress.
 */
static void launch_page_save(struct page_info *info)
{}

/**
 * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is
 *                           requesting a given page number.
 * @context: A pointer to the pbn of the desired page.
 *
 * Implements waiter_match_fn.
 *
 * Return: true if the page completion is for the desired page number.
 */
static bool completion_needs_page(struct vdo_waiter *waiter, void *context)
{}

/**
 * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and
 *                        any other completions that match it in page number.
 */
static void allocate_free_page(struct page_info *info)
{}

/**
 * discard_a_page() - Begin the process of discarding a page.
 *
 * If no page is discardable, increments a count of deferred frees so that the next release of a
 * page which is no longer busy will kick off another discard cycle. This is an indication that the
 * cache is not big enough.
 *
 * If the selected page is not dirty, immediately allocates the page to the oldest completion
 * waiting for a free page.
 */
static void discard_a_page(struct vdo_page_cache *cache)
{}

/**
 * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get
 *                                 a different page.
 */
static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp)
{}

/**
 * discard_page_if_needed() - Helper used to trigger a discard if the cache needs another free
 *                            page.
 * @cache: The page cache.
 */
static void discard_page_if_needed(struct vdo_page_cache *cache)
{}

/**
 * write_has_finished() - Inform the cache that a write has finished (possibly with an error).
 * @info: The info structure for the page whose write just completed.
 *
 * Return: true if the page write was a discard.
 */
static bool write_has_finished(struct page_info *info)
{}

/**
 * handle_page_write_error() - Handler for page write errors.
 * @completion: The page write vio.
 */
static void handle_page_write_error(struct vdo_completion *completion)
{}

static void page_is_written_out(struct vdo_completion *completion);

static void write_cache_page_endio(struct bio *bio)
{}

/**
 * page_is_written_out() - Callback used when a page has been written out.
 * @completion: The vio which wrote the page. Its parent is a page_info.
 */
static void page_is_written_out(struct vdo_completion *completion)
{}

/**
 * write_pages() - Write the batch of pages which were covered by the layer flush which just
 *                 completed.
 * @flush_completion: The flush vio.
 *
 * This callback is registered in save_pages().
 */
static void write_pages(struct vdo_completion *flush_completion)
{}

/**
 * vdo_release_page_completion() - Release a VDO Page Completion.
 *
 * The page referenced by this completion (if any) will no longer be held busy by this completion.
 * If a page becomes discardable and there are completions awaiting free pages then a new round of
 * page discarding is started.
 */
void vdo_release_page_completion(struct vdo_completion *completion)
{}

/**
 * load_page_for_completion() - Helper function to load a page as described by a VDO Page
 *                              Completion.
 */
static void load_page_for_completion(struct page_info *info,
				     struct vdo_page_completion *vdo_page_comp)
{}

/**
 * vdo_get_page() - Initialize a page completion and get a block map page.
 * @page_completion: The vdo_page_completion to initialize.
 * @zone: The block map zone of the desired page.
 * @pbn: The absolute physical block of the desired page.
 * @writable: Whether the page can be modified.
 * @parent: The object to notify when the fetch is complete.
 * @callback: The notification callback.
 * @error_handler: The handler for fetch errors.
 * @requeue: Whether we must requeue when notifying the parent.
 *
 * May cause another page to be discarded (potentially writing a dirty page) and the one nominated
 * by the completion to be loaded from disk. When the callback is invoked, the page will be
 * resident in the cache and marked busy. All callers must call vdo_release_page_completion()
 * when they are done with the page to clear the busy mark.
 */
void vdo_get_page(struct vdo_page_completion *page_completion,
		  struct block_map_zone *zone, physical_block_number_t pbn,
		  bool writable, void *parent, vdo_action_fn callback,
		  vdo_action_fn error_handler, bool requeue)
{}

/**
 * vdo_request_page_write() - Request that a VDO page be written out as soon as it is not busy.
 * @completion: The vdo_page_completion containing the page.
 */
void vdo_request_page_write(struct vdo_completion *completion)
{}

/**
 * vdo_get_cached_page() - Get the block map page from a page completion.
 * @completion: A vdo page completion whose callback has been called.
 * @page_ptr: A pointer to hold the page
 *
 * Return: VDO_SUCCESS or an error
 */
int vdo_get_cached_page(struct vdo_completion *completion,
			struct block_map_page **page_ptr)
{}

/**
 * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache.
 *
 * There must not be any dirty pages in the cache.
 *
 * Return: A success or error code.
 */
int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
{}

/**
 * get_tree_page_by_index() - Get the tree page for a given height and page index.
 *
 * Return: The requested page.
 */
static struct tree_page * __must_check get_tree_page_by_index(struct forest *forest,
							      root_count_t root_index,
							      height_t height,
							      page_number_t page_index)
{}

/* Get the page referred to by the lock's tree slot at its current height. */
static inline struct tree_page *get_tree_page(const struct block_map_zone *zone,
					      const struct tree_lock *lock)
{}

/** vdo_copy_valid_page() - Validate and copy a buffer to a page. */
bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
			 physical_block_number_t pbn,
			 struct block_map_page *page)
{}

/**
 * in_cyclic_range() - Check whether the given value is between the lower and upper bounds, within
 *                     a cyclic range of values from 0 to (modulus - 1).
 * @lower: The lowest value to accept.
 * @value: The value to check.
 * @upper: The highest value to accept.
 * @modulus: The size of the cyclic space, no more than 2^15.
 *
 * The value and both bounds must be smaller than the modulus.
 *
 * Return: true if the value is in range.
 */
static bool in_cyclic_range(u16 lower, u16 value, u16 upper, u16 modulus)
{}

/**
 * is_not_older() - Check whether a generation is strictly older than some other generation in the
 *                  context of a zone's current generation range.
 * @zone: The zone in which to do the comparison.
 * @a: The generation in question.
 * @b: The generation to compare to.
 *
 * Return: true if generation @a is not strictly older than generation @b in the context of @zone
 */
static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
{}

static void release_generation(struct block_map_zone *zone, u8 generation)
{}

static void set_generation(struct block_map_zone *zone, struct tree_page *page,
			   u8 new_generation)
{}

static void write_page(struct tree_page *tree_page, struct pooled_vio *vio);

/* Implements waiter_callback_fn */
static void write_page_callback(struct vdo_waiter *waiter, void *context)
{}

static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone)
{}

/* Return: true if all possible generations were not already active */
static bool attempt_increment(struct block_map_zone *zone)
{}

/* Launches a flush if one is not already in progress. */
static void enqueue_page(struct tree_page *page, struct block_map_zone *zone)
{}

static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
{}

static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
{}

/* This callback is registered in write_initialized_page(). */
static void finish_page_write(struct vdo_completion *completion)
{}

static void handle_write_error(struct vdo_completion *completion)
{}

static void write_page_endio(struct bio *bio);

static void write_initialized_page(struct vdo_completion *completion)
{}

static void write_page_endio(struct bio *bio)
{}

static void write_page(struct tree_page *tree_page, struct pooled_vio *vio)
{}

/* Release a lock on a page which was being loaded or allocated. */
static void release_page_lock(struct data_vio *data_vio, char *what)
{}

static void finish_lookup(struct data_vio *data_vio, int result)
{}

static void abort_lookup_for_waiter(struct vdo_waiter *waiter, void *context)
{}

static void abort_lookup(struct data_vio *data_vio, int result, char *what)
{}

static void abort_load(struct data_vio *data_vio, int result)
{}

static bool __must_check is_invalid_tree_entry(const struct vdo *vdo,
					       const struct data_location *mapping,
					       height_t height)
{}

static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
static void allocate_block_map_page(struct block_map_zone *zone,
				    struct data_vio *data_vio);

static void continue_with_loaded_page(struct data_vio *data_vio,
				      struct block_map_page *page)
{}

static void continue_load_for_waiter(struct vdo_waiter *waiter, void *context)
{}

static void finish_block_map_page_load(struct vdo_completion *completion)
{}

static void handle_io_error(struct vdo_completion *completion)
{}

static void load_page_endio(struct bio *bio)
{}

static void load_page(struct vdo_waiter *waiter, void *context)
{}

/*
 * If the page is already locked, queue up to wait for the lock to be released. If the lock is
 * acquired, @data_vio->tree_lock.locked will be true.
 */
static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio)
{}

/* Load a block map tree page from disk, for the next level in the data vio tree lock. */
static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio)
{}

static void allocation_failure(struct vdo_completion *completion)
{}

static void continue_allocation_for_waiter(struct vdo_waiter *waiter, void *context)
{}

/** expire_oldest_list() - Expire the oldest list. */
static void expire_oldest_list(struct dirty_lists *dirty_lists)
{}


/** update_period() - Update the dirty_lists period if necessary. */
static void update_period(struct dirty_lists *dirty, sequence_number_t period)
{}

/** write_expired_elements() - Write out the expired list. */
static void write_expired_elements(struct block_map_zone *zone)
{}

/**
 * add_to_dirty_lists() - Add an element to the dirty lists.
 * @zone: The zone in which we are operating.
 * @entry: The list entry of the element to add.
 * @type: The type of page.
 * @old_period: The period in which the element was previously dirtied, or 0 if it was not dirty.
 * @new_period: The period in which the element has now been dirtied, or 0 if it does not hold a
 *              lock.
 */
static void add_to_dirty_lists(struct block_map_zone *zone,
			       struct list_head *entry,
			       enum block_map_page_type type,
			       sequence_number_t old_period,
			       sequence_number_t new_period)
{}

/*
 * Record the allocation in the tree and wake any waiters now that the write lock has been
 * released.
 */
static void finish_block_map_allocation(struct vdo_completion *completion)
{}

static void release_block_map_write_lock(struct vdo_completion *completion)
{}

/*
 * Newly allocated block map pages are set to have to MAXIMUM_REFERENCES after they are journaled,
 * to prevent deduplication against the block after we release the write lock on it, but before we
 * write out the page.
 */
static void set_block_map_page_reference_count(struct vdo_completion *completion)
{}

static void journal_block_map_allocation(struct vdo_completion *completion)
{}

static void allocate_block(struct vdo_completion *completion)
{}

static void allocate_block_map_page(struct block_map_zone *zone,
				    struct data_vio *data_vio)
{}

/**
 * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio
 *                             resides and cache that result in the data_vio.
 *
 * All ancestors in the tree will be allocated or loaded, as needed.
 */
void vdo_find_block_map_slot(struct data_vio *data_vio)
{}

/*
 * Find the PBN of a leaf block map page. This method may only be used after all allocated tree
 * pages have been loaded, otherwise, it may give the wrong answer (0).
 */
physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
						    page_number_t page_number)
{}

/*
 * Write a tree page or indicate that it has been re-dirtied if it is already being written. This
 * method is used when correcting errors in the tree during read-only rebuild.
 */
void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone)
{}

static int make_segment(struct forest *old_forest, block_count_t new_pages,
			struct boundary *new_boundary, struct forest *forest)
{}

static void deforest(struct forest *forest, size_t first_page_segment)
{}

/**
 * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if
 *                 there is one.
 * @entries: The number of entries the block map will hold.
 *
 * Return: VDO_SUCCESS or an error.
 */
static int make_forest(struct block_map *map, block_count_t entries)
{}

/**
 * replace_forest() - Replace a block_map's forest with the already-prepared larger forest.
 */
static void replace_forest(struct block_map *map)
{}

/**
 * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the
 *                   traversal.
 */
static void finish_cursor(struct cursor *cursor)
{}

static void traverse(struct cursor *cursor);

/**
 * continue_traversal() - Continue traversing a block map tree.
 * @completion: The VIO doing a read or write.
 */
static void continue_traversal(struct vdo_completion *completion)
{}

/**
 * finish_traversal_load() - Continue traversing a block map tree now that a page has been loaded.
 * @completion: The VIO doing the read.
 */
static void finish_traversal_load(struct vdo_completion *completion)
{}

static void traversal_endio(struct bio *bio)
{}

/**
 * traverse() - Traverse a single block map tree.
 *
 * This is the recursive heart of the traversal process.
 */
static void traverse(struct cursor *cursor)
{}

/**
 * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with
 *                   which to load pages.
 * @context: The pooled_vio just acquired.
 *
 * Implements waiter_callback_fn.
 */
static void launch_cursor(struct vdo_waiter *waiter, void *context)
{}

/**
 * compute_boundary() - Compute the number of pages used at each level of the given root's tree.
 *
 * Return: The list of page counts as a boundary structure.
 */
static struct boundary compute_boundary(struct block_map *map, root_count_t root_index)
{}

/**
 * vdo_traverse_forest() - Walk the entire forest of a block map.
 * @callback: A function to call with the pbn of each allocated node in the forest.
 * @completion: The completion to notify on each traversed PBN, and when traversal completes.
 */
void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
			 struct vdo_completion *completion)
{}

/**
 * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
 * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
 *               written out.
 */
static int __must_check initialize_block_map_zone(struct block_map *map,
						  zone_count_t zone_number,
						  page_count_t cache_size,
						  block_count_t maximum_age)
{}

/* Implements vdo_zone_thread_getter_fn */
static thread_id_t get_block_map_zone_thread_id(void *context, zone_count_t zone_number)
{}

/* Implements vdo_action_preamble_fn */
static void prepare_for_era_advance(void *context, struct vdo_completion *parent)
{}

/* Implements vdo_zone_action_fn */
static void advance_block_map_zone_era(void *context, zone_count_t zone_number,
				       struct vdo_completion *parent)
{}

/*
 * Schedule an era advance if necessary. This method should not be called directly. Rather, call
 * vdo_schedule_default_action() on the block map's action manager.
 *
 * Implements vdo_action_scheduler_fn.
 */
static bool schedule_era_advance(void *context)
{}

static void uninitialize_block_map_zone(struct block_map_zone *zone)
{}

void vdo_free_block_map(struct block_map *map)
{}

/* @journal may be NULL. */
int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical_blocks,
			 struct vdo *vdo, struct recovery_journal *journal,
			 nonce_t nonce, page_count_t cache_size, block_count_t maximum_age,
			 struct block_map **map_ptr)
{}

struct block_map_state_2_0 vdo_record_block_map(const struct block_map *map)
{}

/* The block map needs to know the journals' sequence number to initialize the eras. */
void vdo_initialize_block_map_from_journal(struct block_map *map,
					   struct recovery_journal *journal)
{}

/* Compute the logical zone for the LBN of a data vio. */
zone_count_t vdo_compute_logical_zone(struct data_vio *data_vio)
{}

void vdo_advance_block_map_era(struct block_map *map,
			       sequence_number_t recovery_block_number)
{}

/* Implements vdo_admin_initiator_fn */
static void initiate_drain(struct admin_state *state)
{}

/* Implements vdo_zone_action_fn. */
static void drain_zone(void *context, zone_count_t zone_number,
		       struct vdo_completion *parent)
{}

void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
			 struct vdo_completion *parent)
{}

/* Implements vdo_zone_action_fn. */
static void resume_block_map_zone(void *context, zone_count_t zone_number,
				  struct vdo_completion *parent)
{}

void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent)
{}

/* Allocate an expanded collection of trees, for a future growth. */
int vdo_prepare_to_grow_block_map(struct block_map *map,
				  block_count_t new_logical_blocks)
{}

/* Implements vdo_action_preamble_fn */
static void grow_forest(void *context, struct vdo_completion *completion)
{}

/* Requires vdo_prepare_to_grow_block_map() to have been previously called. */
void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent)
{}

void vdo_abandon_block_map_growth(struct block_map *map)
{}

/* Release the page completion and then continue the requester. */
static inline void finish_processing_page(struct vdo_completion *completion, int result)
{}

static void handle_page_error(struct vdo_completion *completion)
{}

/* Fetch the mapping page for a block map update, and call the provided handler when fetched. */
static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable,
			       vdo_action_fn action)
{}

/**
 * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped.
 *
 * This indicates the block map entry for the logical block is either unmapped or corrupted.
 */
static void clear_mapped_location(struct data_vio *data_vio)
{}

/**
 * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a
 *                         data_vio.
 *
 * Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any
 *         other failure
 */
static int __must_check set_mapped_location(struct data_vio *data_vio,
					    const struct block_map_entry *entry)
{}

/* This callback is registered in vdo_get_mapped_block(). */
static void get_mapping_from_fetched_page(struct vdo_completion *completion)
{}

void vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio,
			       physical_block_number_t pbn,
			       enum block_mapping_state mapping_state,
			       sequence_number_t *recovery_lock)
{}

static void put_mapping_in_fetched_page(struct vdo_completion *completion)
{}

/* Read a stored block mapping into a data_vio. */
void vdo_get_mapped_block(struct data_vio *data_vio)
{}

/* Update a stored block mapping to reflect a data_vio's new mapping. */
void vdo_put_mapped_block(struct data_vio *data_vio)
{}

struct block_map_statistics vdo_get_block_map_statistics(struct block_map *map)
{}