linux/drivers/md/dm-vdo/indexer/sparse-cache.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2023 Red Hat
 */

#include "sparse-cache.h"

#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/dm-bufio.h>

#include "logger.h"
#include "memory-alloc.h"
#include "permassert.h"

#include "chapter-index.h"
#include "config.h"
#include "index.h"

/*
 * Since the cache is small, it is implemented as a simple array of cache entries. Searching for a
 * specific virtual chapter is implemented as a linear search. The cache replacement policy is
 * least-recently-used (LRU). Again, the small size of the cache allows the LRU order to be
 * maintained by shifting entries in an array list.
 *
 * Changing the contents of the cache requires the coordinated participation of all zone threads
 * via the careful use of barrier messages sent to all the index zones by the triage queue worker
 * thread. The critical invariant for coordination is that the cache membership must not change
 * between updates, so that all calls to uds_sparse_cache_contains() from the zone threads must all
 * receive the same results for every virtual chapter number. To ensure that critical invariant,
 * state changes such as "that virtual chapter is no longer in the volume" and "skip searching that
 * chapter because it has had too many cache misses" are represented separately from the cache
 * membership information (the virtual chapter number).
 *
 * As a result of this invariant, we have the guarantee that every zone thread will call
 * uds_update_sparse_cache() once and exactly once to request a chapter that is not in the cache,
 * and the serialization of the barrier requests from the triage queue ensures they will all
 * request the same chapter number. This means the only synchronization we need can be provided by
 * a pair of thread barriers used only in the uds_update_sparse_cache() call, providing a critical
 * section where a single zone thread can drive the cache update while all the other zone threads
 * are known to be blocked, waiting in the second barrier. Outside that critical section, all the
 * zone threads implicitly hold a shared lock. Inside it, the thread for zone zero holds an
 * exclusive lock. No other threads may access or modify the cache entries.
 *
 * Chapter statistics must only be modified by a single thread, which is also the zone zero thread.
 * All fields that might be frequently updated by that thread are kept in separate cache-aligned
 * structures so they will not cause cache contention via "false sharing" with the fields that are
 * frequently accessed by all of the zone threads.
 *
 * The LRU order is managed independently by each zone thread, and each zone uses its own list for
 * searching and cache membership queries. The zone zero list is used to decide which chapter to
 * evict when the cache is updated, and its search list is copied to the other threads at that
 * time.
 *
 * The virtual chapter number field of the cache entry is the single field indicating whether a
 * chapter is a member of the cache or not. The value NO_CHAPTER is used to represent a null or
 * undefined chapter number. When present in the virtual chapter number field of a
 * cached_chapter_index, it indicates that the cache entry is dead, and all the other fields of
 * that entry (other than immutable pointers to cache memory) are undefined and irrelevant. Any
 * cache entry that is not marked as dead is fully defined and a member of the cache, and
 * uds_sparse_cache_contains() will always return true for any virtual chapter number that appears
 * in any of the cache entries.
 *
 * A chapter index that is a member of the cache may be excluded from searches between calls to
 * uds_update_sparse_cache() in two different ways. First, when a chapter falls off the end of the
 * volume, its virtual chapter number will be less that the oldest virtual chapter number. Since
 * that chapter is no longer part of the volume, there's no point in continuing to search that
 * chapter index. Once invalidated, that virtual chapter will still be considered a member of the
 * cache, but it will no longer be searched for matching names.
 *
 * The second mechanism is a heuristic based on keeping track of the number of consecutive search
 * misses in a given chapter index. Once that count exceeds a threshold, the skip_search flag will
 * be set to true, causing the chapter to be skipped when searching the entire cache, but still
 * allowing it to be found when searching for a hook in that specific chapter. Finding a hook will
 * clear the skip_search flag, once again allowing the non-hook searches to use that cache entry.
 * Again, regardless of the state of the skip_search flag, the virtual chapter must still
 * considered to be a member of the cache for uds_sparse_cache_contains().
 */

#define SKIP_SEARCH_THRESHOLD
#define ZONE_ZERO

/*
 * These counters are essentially fields of the struct cached_chapter_index, but are segregated
 * into this structure because they are frequently modified. They are grouped and aligned to keep
 * them on different cache lines from the chapter fields that are accessed far more often than they
 * are updated.
 */
struct __aligned(L1_CACHE_BYTES) cached_index_counters {};

struct __aligned(L1_CACHE_BYTES) cached_chapter_index {};

/*
 * A search_list represents an ordering of the sparse chapter index cache entry array, from most
 * recently accessed to least recently accessed, which is the order in which the indexes should be
 * searched and the reverse order in which they should be evicted from the cache.
 *
 * Cache entries that are dead or empty are kept at the end of the list, avoiding the need to even
 * iterate over them to search, and ensuring that dead entries are replaced before any live entries
 * are evicted.
 *
 * The search list is instantiated for each zone thread, avoiding any need for synchronization. The
 * structure is allocated on a cache boundary to avoid false sharing of memory cache lines between
 * zone threads.
 */
struct search_list {};

struct threads_barrier {};

struct sparse_cache {};

static void initialize_threads_barrier(struct threads_barrier *barrier,
				       unsigned int thread_count)
{}

static inline void __down(struct semaphore *semaphore)
{}

static void enter_threads_barrier(struct threads_barrier *barrier)
{}

static int __must_check initialize_cached_chapter_index(struct cached_chapter_index *chapter,
							const struct index_geometry *geometry)
{}

static int __must_check make_search_list(struct sparse_cache *cache,
					 struct search_list **list_ptr)
{}

int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int capacity,
			  unsigned int zone_count, struct sparse_cache **cache_ptr)
{}

static inline void set_skip_search(struct cached_chapter_index *chapter,
				   bool skip_search)
{}

static void score_search_hit(struct cached_chapter_index *chapter)
{}

static void score_search_miss(struct sparse_cache *cache,
			      struct cached_chapter_index *chapter)
{}

static void release_cached_chapter_index(struct cached_chapter_index *chapter)
{}

void uds_free_sparse_cache(struct sparse_cache *cache)
{}

/*
 * Take the indicated element of the search list and move it to the start, pushing the pointers
 * previously before it back down the list.
 */
static inline void set_newest_entry(struct search_list *search_list, u8 index)
{}

bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
			       unsigned int zone_number)
{}

/*
 * Re-sort cache entries into three sets (active, skippable, and dead) while maintaining the LRU
 * ordering that already existed. This operation must only be called during the critical section in
 * uds_update_sparse_cache().
 */
static void purge_search_list(struct search_list *search_list,
			      struct sparse_cache *cache, u64 oldest_virtual_chapter)
{}

static int __must_check cache_chapter_index(struct cached_chapter_index *chapter,
					    u64 virtual_chapter,
					    const struct volume *volume)
{}

static inline void copy_search_list(const struct search_list *source,
				    struct search_list *target)
{}

/*
 * Update the sparse cache to contain a chapter index. This function must be called by all the zone
 * threads with the same chapter number to correctly enter the thread barriers used to synchronize
 * the cache updates.
 */
int uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter)
{}

void uds_invalidate_sparse_cache(struct sparse_cache *cache)
{}

static inline bool should_skip_chapter(struct cached_chapter_index *chapter,
				       u64 oldest_chapter, u64 requested_chapter)
{}

static int __must_check search_cached_chapter_index(struct cached_chapter_index *chapter,
						    const struct index_geometry *geometry,
						    const struct index_page_map *index_page_map,
						    const struct uds_record_name *name,
						    u16 *record_page_ptr)
{}

int uds_search_sparse_cache(struct index_zone *zone, const struct uds_record_name *name,
			    u64 *virtual_chapter_ptr, u16 *record_page_ptr)
{}