linux/drivers/md/dm-cache-policy-smq.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2015 Red Hat. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include "dm-cache-background-tracker.h"
#include "dm-cache-policy-internal.h"
#include "dm-cache-policy.h"
#include "dm.h"

#include <linux/hash.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/math64.h>

#define DM_MSG_PREFIX

/*----------------------------------------------------------------*/

/*
 * Safe division functions that return zero on divide by zero.
 */
static unsigned int safe_div(unsigned int n, unsigned int d)
{}

static unsigned int safe_mod(unsigned int n, unsigned int d)
{}

/*----------------------------------------------------------------*/

struct entry {};

/*----------------------------------------------------------------*/

#define INDEXER_NULL

/*
 * An entry_space manages a set of entries that we use for the queues.
 * The clean and dirty queues share entries, so this object is separate
 * from the queue itself.
 */
struct entry_space {};

static int space_init(struct entry_space *es, unsigned int nr_entries)
{}

static void space_exit(struct entry_space *es)
{}

static struct entry *__get_entry(struct entry_space *es, unsigned int block)
{}

static unsigned int to_index(struct entry_space *es, struct entry *e)
{}

static struct entry *to_entry(struct entry_space *es, unsigned int block)
{}

/*----------------------------------------------------------------*/

struct ilist {};

static void l_init(struct ilist *l)
{}

static struct entry *l_head(struct entry_space *es, struct ilist *l)
{}

static struct entry *l_tail(struct entry_space *es, struct ilist *l)
{}

static struct entry *l_next(struct entry_space *es, struct entry *e)
{}

static struct entry *l_prev(struct entry_space *es, struct entry *e)
{}

static bool l_empty(struct ilist *l)
{}

static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
{}

static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
{}

static void l_add_before(struct entry_space *es, struct ilist *l,
			 struct entry *old, struct entry *e)
{}

static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
{}

static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
{}

static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{}

/*----------------------------------------------------------------*/

/*
 * The stochastic-multi-queue is a set of lru lists stacked into levels.
 * Entries are moved up levels when they are used, which loosely orders the
 * most accessed entries in the top levels and least in the bottom.  This
 * structure is *much* better than a single lru list.
 */
#define MAX_LEVELS

struct queue {};

static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
{}

static unsigned int q_size(struct queue *q)
{}

/*
 * Insert an entry to the back of the given level.
 */
static void q_push(struct queue *q, struct entry *e)
{}

static void q_push_front(struct queue *q, struct entry *e)
{}

static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
{}

static void q_del(struct queue *q, struct entry *e)
{}

/*
 * Return the oldest entry of the lowest populated level.
 */
static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
{}

static struct entry *q_pop(struct queue *q)
{}

/*
 * This function assumes there is a non-sentinel entry to pop.  It's only
 * used by redistribute, so we know this is true.  It also doesn't adjust
 * the q->nr_elts count.
 */
static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
{}

static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
				    unsigned int lbegin, unsigned int lend)
{}

/*
 * Typically we have fewer elements in the top few levels which allows us
 * to adjust the promote threshold nicely.
 */
static void q_set_targets(struct queue *q)
{}

static void q_redistribute(struct queue *q)
{}

static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
		      struct entry *s1, struct entry *s2)
{}

/*----------------------------------------------------------------*/

#define FP_SHIFT
#define SIXTEENTH
#define EIGHTH

struct stats {};

enum performance {};

static void stats_init(struct stats *s, unsigned int nr_levels)
{}

static void stats_reset(struct stats *s)
{}

static void stats_level_accessed(struct stats *s, unsigned int level)
{}

static void stats_miss(struct stats *s)
{}

/*
 * There are times when we don't have any confidence in the hotspot queue.
 * Such as when a fresh cache is created and the blocks have been spread
 * out across the levels, or if an io load changes.  We detect this by
 * seeing how often a lookup is in the top levels of the hotspot queue.
 */
static enum performance stats_assess(struct stats *s)
{}

/*----------------------------------------------------------------*/

struct smq_hash_table {};

/*
 * All cache entries are stored in a chained hash table.  To save space we
 * use indexing again, and only store indexes to the next entry.
 */
static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
{}

static void h_exit(struct smq_hash_table *ht)
{}

static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
{}

static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
{}

static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
{}

static void h_insert(struct smq_hash_table *ht, struct entry *e)
{}

static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
				struct entry **prev)
{}

static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
		       struct entry *e, struct entry *prev)
{}

/*
 * Also moves each entry to the front of the bucket.
 */
static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
{}

static void h_remove(struct smq_hash_table *ht, struct entry *e)
{}

/*----------------------------------------------------------------*/

struct entry_alloc {};

static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
			   unsigned int begin, unsigned int end)
{}

static void init_entry(struct entry *e)
{}

static struct entry *alloc_entry(struct entry_alloc *ea)
{}

/*
 * This assumes the cblock hasn't already been allocated.
 */
static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
{}

static void free_entry(struct entry_alloc *ea, struct entry *e)
{}

static bool allocator_empty(struct entry_alloc *ea)
{}

static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
{}

static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
{}

/*----------------------------------------------------------------*/

#define NR_HOTSPOT_LEVELS
#define NR_CACHE_LEVELS

#define WRITEBACK_PERIOD
#define DEMOTE_PERIOD

#define HOTSPOT_UPDATE_PERIOD
#define CACHE_UPDATE_PERIOD

struct smq_policy {};

/*----------------------------------------------------------------*/

static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
{}

static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
{}

static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
{}

static void __update_writeback_sentinels(struct smq_policy *mq)
{}

static void __update_demote_sentinels(struct smq_policy *mq)
{}

static void update_sentinels(struct smq_policy *mq)
{}

static void __sentinels_init(struct smq_policy *mq)
{}

static void sentinels_init(struct smq_policy *mq)
{}

/*----------------------------------------------------------------*/

static void del_queue(struct smq_policy *mq, struct entry *e)
{}

static void push_queue(struct smq_policy *mq, struct entry *e)
{}

// !h, !q, a -> h, q, a
static void push(struct smq_policy *mq, struct entry *e)
{}

static void push_queue_front(struct smq_policy *mq, struct entry *e)
{}

static void push_front(struct smq_policy *mq, struct entry *e)
{}

static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
{}

static void requeue(struct smq_policy *mq, struct entry *e)
{}

static unsigned int default_promote_level(struct smq_policy *mq)
{}

static void update_promote_levels(struct smq_policy *mq)
{}

/*
 * If the hotspot queue is performing badly, then we try and move entries
 * around more quickly.
 */
static void update_level_jump(struct smq_policy *mq)
{}

static void end_hotspot_period(struct smq_policy *mq)
{}

static void end_cache_period(struct smq_policy *mq)
{}

/*----------------------------------------------------------------*/

/*
 * Targets are given as a percentage.
 */
#define CLEAN_TARGET
#define FREE_TARGET

static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
{}

static bool clean_target_met(struct smq_policy *mq, bool idle)
{}

static bool free_target_met(struct smq_policy *mq)
{}

/*----------------------------------------------------------------*/

static void mark_pending(struct smq_policy *mq, struct entry *e)
{}

static void clear_pending(struct smq_policy *mq, struct entry *e)
{}

static void queue_writeback(struct smq_policy *mq, bool idle)
{}

static void queue_demotion(struct smq_policy *mq)
{}

static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
			    struct policy_work **workp)
{}

/*----------------------------------------------------------------*/

enum promote_result {};

/*
 * Converts a boolean into a promote result.
 */
static enum promote_result maybe_promote(bool promote)
{}

static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
					  int data_dir, bool fast_promote)
{}

static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
{}

static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
{}

/*----------------------------------------------------------------*/

/*
 * Public interface, via the policy struct.  See dm-cache-policy.h for a
 * description of these.
 */

static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
{}

static void smq_destroy(struct dm_cache_policy *p)
{}

/*----------------------------------------------------------------*/

static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
		    int data_dir, bool fast_copy,
		    struct policy_work **work, bool *background_work)
{}

static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
		      int data_dir, bool fast_copy,
		      bool *background_work)
{}

static int smq_lookup_with_work(struct dm_cache_policy *p,
				dm_oblock_t oblock, dm_cblock_t *cblock,
				int data_dir, bool fast_copy,
				struct policy_work **work)
{}

static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
				   struct policy_work **result)
{}

/*
 * We need to clear any pending work flags that have been set, and in the
 * case of promotion free the entry for the destination cblock.
 */
static void __complete_background_work(struct smq_policy *mq,
				       struct policy_work *work,
				       bool success)
{}

static void smq_complete_background_work(struct dm_cache_policy *p,
					 struct policy_work *work,
					 bool success)
{}

// in_hash(oblock) -> in_hash(oblock)
static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
{}

static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
{}

static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
{}

static unsigned int random_level(dm_cblock_t cblock)
{}

static int smq_load_mapping(struct dm_cache_policy *p,
			    dm_oblock_t oblock, dm_cblock_t cblock,
			    bool dirty, uint32_t hint, bool hint_valid)
{}

static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
{}

static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
{}

static dm_cblock_t smq_residency(struct dm_cache_policy *p)
{}

static void smq_tick(struct dm_cache_policy *p, bool can_block)
{}

static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
{}

/*
 * smq has no config values, but the old mq policy did.  To avoid breaking
 * software we continue to accept these configurables for the mq policy,
 * but they have no effect.
 */
static int mq_set_config_value(struct dm_cache_policy *p,
			       const char *key, const char *value)
{}

static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
				 unsigned int maxlen, ssize_t *sz_ptr)
{}

/* Init the policy plugin interface function pointers. */
static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
{}

static bool too_many_hotspot_blocks(sector_t origin_size,
				    sector_t hotspot_block_size,
				    unsigned int nr_hotspot_blocks)
{}

static void calc_hotspot_params(sector_t origin_size,
				sector_t cache_block_size,
				unsigned int nr_cache_blocks,
				sector_t *hotspot_block_size,
				unsigned int *nr_hotspot_blocks)
{}

static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
	     bool mimic_mq, bool migrations_allowed, bool cleaner)
{}

static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
					  sector_t origin_size,
					  sector_t cache_block_size)
{}

static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
					 sector_t origin_size,
					 sector_t cache_block_size)
{}

static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
					      sector_t origin_size,
					      sector_t cache_block_size)
{}

/*----------------------------------------------------------------*/

static struct dm_cache_policy_type smq_policy_type =;

static struct dm_cache_policy_type mq_policy_type =;

static struct dm_cache_policy_type cleaner_policy_type =;

static struct dm_cache_policy_type default_policy_type =;

static int __init smq_init(void)
{}

static void __exit smq_exit(void)
{}

module_init();
module_exit(smq_exit);

MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DESCRIPTION();

MODULE_ALIAS();
MODULE_ALIAS();
MODULE_ALIAS();