linux/drivers/md/dm-vdo/indexer/volume-index.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2023 Red Hat
 */
#include "volume-index.h"

#include <linux/bitops.h>
#include <linux/bits.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/log2.h>

#include "errors.h"
#include "logger.h"
#include "memory-alloc.h"
#include "numeric.h"
#include "permassert.h"
#include "thread-utils.h"

#include "config.h"
#include "geometry.h"
#include "hash-utils.h"
#include "indexer.h"

/*
 * The volume index is a combination of two separate subindexes, one containing sparse hook entries
 * (retained for all chapters), and one containing the remaining entries (retained only for the
 * dense chapters). If there are no sparse chapters, only the non-hook sub index is used, and it
 * will contain all records for all chapters.
 *
 * The volume index is also divided into zones, with one thread operating on each zone. Each
 * incoming request is dispatched to the appropriate thread, and then to the appropriate subindex.
 * Each delta list is handled by a single zone. To ensure that the distribution of delta lists to
 * zones doesn't underflow (leaving some zone with no delta lists), the minimum number of delta
 * lists must be the square of the maximum zone count for both subindexes.
 *
 * Each subindex zone is a delta index where the payload is a chapter number. The volume index can
 * compute the delta list number, address, and zone number from the record name in order to
 * dispatch record handling to the correct structures.
 *
 * Most operations that use all the zones take place either before request processing is allowed,
 * or after all requests have been flushed in order to shut down. The only multi-threaded operation
 * supported during normal operation is the uds_lookup_volume_index_name() method, used to determine
 * whether a new chapter should be loaded into the sparse index cache. This operation only uses the
 * sparse hook subindex, and the zone mutexes are used to make this operation safe.
 *
 * There are three ways of expressing chapter numbers in the volume index: virtual, index, and
 * rolling. The interface to the volume index uses virtual chapter numbers, which are 64 bits long.
 * Internally the subindex stores only the minimal number of bits necessary by masking away the
 * high-order bits. When the index needs to deal with ordering of index chapter numbers, as when
 * flushing entries from older chapters, it rolls the index chapter number around so that the
 * smallest one in use is mapped to 0. See convert_index_to_virtual() or flush_invalid_entries()
 * for an example of this technique.
 *
 * For efficiency, when older chapter numbers become invalid, the index does not immediately remove
 * the invalidated entries. Instead it lazily removes them from a given delta list the next time it
 * walks that list during normal operation. Because of this, the index size must be increased
 * somewhat to accommodate all the invalid entries that have not yet been removed. For the standard
 * index sizes, this requires about 4 chapters of old entries per 1024 chapters of valid entries in
 * the index.
 */

struct sub_index_parameters {};

struct split_config {};

struct chapter_range {};

#define MAGIC_SIZE

static const char MAGIC_START_5[] =;

struct sub_index_data {};

static const char MAGIC_START_6[] =;

struct volume_index_data {};

static inline u32 extract_address(const struct volume_sub_index *sub_index,
				  const struct uds_record_name *name)
{}

static inline u32 extract_dlist_num(const struct volume_sub_index *sub_index,
				    const struct uds_record_name *name)
{}

static inline const struct volume_sub_index_zone *
get_zone_for_record(const struct volume_index_record *record)
{}

static inline u64 convert_index_to_virtual(const struct volume_index_record *record,
					   u32 index_chapter)
{}

static inline u32 convert_virtual_to_index(const struct volume_sub_index *sub_index,
					   u64 virtual_chapter)
{}

static inline bool is_virtual_chapter_indexed(const struct volume_index_record *record,
					      u64 virtual_chapter)
{}

static inline bool has_sparse(const struct volume_index *volume_index)
{}

bool uds_is_volume_index_sample(const struct volume_index *volume_index,
				const struct uds_record_name *name)
{}

static inline const struct volume_sub_index *
get_volume_sub_index(const struct volume_index *volume_index,
		     const struct uds_record_name *name)
{}

static unsigned int get_volume_sub_index_zone(const struct volume_sub_index *sub_index,
					      const struct uds_record_name *name)
{}

unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index,
				       const struct uds_record_name *name)
{}

#define DELTA_LIST_SIZE

static int compute_volume_sub_index_parameters(const struct uds_configuration *config,
					       struct sub_index_parameters *params)
{}

static void uninitialize_volume_sub_index(struct volume_sub_index *sub_index)
{}

void uds_free_volume_index(struct volume_index *volume_index)
{}


static int compute_volume_sub_index_save_bytes(const struct uds_configuration *config,
					       size_t *bytes)
{}

/* This function is only useful if the configuration includes sparse chapters. */
static void split_configuration(const struct uds_configuration *config,
				struct split_config *split)
{}

static int compute_volume_index_save_bytes(const struct uds_configuration *config,
					   size_t *bytes)
{}

int uds_compute_volume_index_save_blocks(const struct uds_configuration *config,
					 size_t block_size, u64 *block_count)
{}

/* Flush invalid entries while walking the delta list. */
static inline int flush_invalid_entries(struct volume_index_record *record,
					struct chapter_range *flush_range,
					u32 *next_chapter_to_invalidate)
{}

/* Find the matching record, or the list offset where the record would go. */
static int get_volume_index_entry(struct volume_index_record *record, u32 list_number,
				  u32 key, struct chapter_range *flush_range)
{}

static int get_volume_sub_index_record(struct volume_sub_index *sub_index,
				       const struct uds_record_name *name,
				       struct volume_index_record *record)
{}

int uds_get_volume_index_record(struct volume_index *volume_index,
				const struct uds_record_name *name,
				struct volume_index_record *record)
{}

int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_chapter)
{}

int uds_remove_volume_index_record(struct volume_index_record *record)
{}

static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_index,
						   unsigned int zone_number,
						   u64 virtual_chapter)
{}

void uds_set_volume_index_zone_open_chapter(struct volume_index *volume_index,
					    unsigned int zone_number,
					    u64 virtual_chapter)
{}

/*
 * Set the newest open chapter number for the index, while also advancing the oldest valid chapter
 * number.
 */
void uds_set_volume_index_open_chapter(struct volume_index *volume_index,
				       u64 virtual_chapter)
{}

int uds_set_volume_index_record_chapter(struct volume_index_record *record,
					u64 virtual_chapter)
{}

static u64 lookup_volume_sub_index_name(const struct volume_sub_index *sub_index,
					const struct uds_record_name *name)
{}

/* Do a read-only lookup of the record name for sparse cache management. */
u64 uds_lookup_volume_index_name(const struct volume_index *volume_index,
				 const struct uds_record_name *name)
{}

static void abort_restoring_volume_sub_index(struct volume_sub_index *sub_index)
{}

static void abort_restoring_volume_index(struct volume_index *volume_index)
{}

static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
					    struct buffered_reader **readers,
					    unsigned int reader_count)
{}

static int start_restoring_volume_index(struct volume_index *volume_index,
					struct buffered_reader **buffered_readers,
					unsigned int reader_count)
{}

static int finish_restoring_volume_sub_index(struct volume_sub_index *sub_index,
					     struct buffered_reader **buffered_readers,
					     unsigned int reader_count)
{}

static int finish_restoring_volume_index(struct volume_index *volume_index,
					 struct buffered_reader **buffered_readers,
					 unsigned int reader_count)
{}

int uds_load_volume_index(struct volume_index *volume_index,
			  struct buffered_reader **readers, unsigned int reader_count)
{}

static int start_saving_volume_sub_index(const struct volume_sub_index *sub_index,
					 unsigned int zone_number,
					 struct buffered_writer *buffered_writer)
{}

static int start_saving_volume_index(const struct volume_index *volume_index,
				     unsigned int zone_number,
				     struct buffered_writer *writer)
{}

static int finish_saving_volume_sub_index(const struct volume_sub_index *sub_index,
					  unsigned int zone_number)
{}

static int finish_saving_volume_index(const struct volume_index *volume_index,
				      unsigned int zone_number)
{}

int uds_save_volume_index(struct volume_index *volume_index,
			  struct buffered_writer **writers, unsigned int writer_count)
{}

static void get_volume_sub_index_stats(const struct volume_sub_index *sub_index,
				       struct volume_index_stats *stats)
{}

void uds_get_volume_index_stats(const struct volume_index *volume_index,
				struct volume_index_stats *stats)
{}

static int initialize_volume_sub_index(const struct uds_configuration *config,
				       u64 volume_nonce, u8 tag,
				       struct volume_sub_index *sub_index)
{}

int uds_make_volume_index(const struct uds_configuration *config, u64 volume_nonce,
			  struct volume_index **volume_index_ptr)
{}