#define pr_fmt(fmt) …
#include "util.h"
#include "bset.h"
#include <linux/console.h>
#include <linux/sched/clock.h>
#include <linux/random.h>
#include <linux/prefetch.h>
#ifdef CONFIG_BCACHE_DEBUG
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
{ … }
void bch_dump_bucket(struct btree_keys *b)
{ … }
int __bch_count_data(struct btree_keys *b)
{ … }
void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{ … }
static void bch_btree_iter_next_check(struct btree_iter *iter)
{ … }
#else
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
#endif
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
{ … }
struct bkey *bch_keylist_pop(struct keylist *l)
{ … }
void bch_keylist_pop_front(struct keylist *l)
{ … }
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
unsigned int i)
{ … }
bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{ … }
bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{ … }
#define BKEY_MID_BITS …
#define BKEY_EXPONENT_BITS …
#define BKEY_MANTISSA_BITS …
#define BKEY_MANTISSA_MASK …
struct bkey_float { … } __packed;
#define BSET_CACHELINE …
static inline size_t btree_keys_bytes(struct btree_keys *b)
{ … }
static inline size_t btree_keys_cachelines(struct btree_keys *b)
{ … }
static inline size_t bset_tree_bytes(struct btree_keys *b)
{ … }
static inline size_t bset_prev_bytes(struct btree_keys *b)
{ … }
void bch_btree_keys_free(struct btree_keys *b)
{ … }
int bch_btree_keys_alloc(struct btree_keys *b,
unsigned int page_order,
gfp_t gfp)
{ … }
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
{ … }
static unsigned int inorder_next(unsigned int j, unsigned int size)
{ … }
static unsigned int inorder_prev(unsigned int j, unsigned int size)
{ … }
static unsigned int __to_inorder(unsigned int j,
unsigned int size,
unsigned int extra)
{ … }
static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
{ … }
static unsigned int __inorder_to_tree(unsigned int j,
unsigned int size,
unsigned int extra)
{ … }
static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
{ … }
#if 0
void inorder_test(void)
{
unsigned long done = 0;
ktime_t start = ktime_get();
for (unsigned int size = 2;
size < 65536000;
size++) {
unsigned int extra =
(size - rounddown_pow_of_two(size - 1)) << 1;
unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
if (!(size % 4096))
pr_notice("loop %u, %llu per us\n", size,
done / ktime_us_delta(ktime_get(), start));
while (1) {
if (__inorder_to_tree(i, size, extra) != j)
panic("size %10u j %10u i %10u", size, j, i);
if (__to_inorder(j, size, extra) != i)
panic("size %10u j %10u i %10u", size, j, i);
if (j == rounddown_pow_of_two(size) - 1)
break;
BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
j = inorder_next(j, size);
i++;
}
done += size - 1;
}
}
#endif
static struct bkey *cacheline_to_bkey(struct bset_tree *t,
unsigned int cacheline,
unsigned int offset)
{ … }
static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{ … }
static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
unsigned int cacheline,
struct bkey *k)
{ … }
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
{ … }
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
{ … }
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
{ … }
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{ … }
static inline unsigned int bfloat_mantissa(const struct bkey *k,
struct bkey_float *f)
{ … }
static void make_bfloat(struct bset_tree *t, unsigned int j)
{ … }
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{ … }
static void bch_bset_build_unwritten_tree(struct btree_keys *b)
{ … }
void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
{ … }
void bch_bset_build_written_tree(struct btree_keys *b)
{ … }
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{ … }
static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
struct bkey *k)
{ … }
bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
{ … }
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *insert)
{ … }
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
{ … }
struct bset_search_iter { … };
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{ … }
static struct bset_search_iter bset_search_tree(struct bset_tree *t,
const struct bkey *search)
{ … }
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search)
{ … }
new_btree_iter_cmp_fn;
static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
{ … }
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
{ … }
static inline bool btree_iter_end(struct btree_iter *iter)
{ … }
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end)
{ … }
static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
struct btree_iter *iter,
struct bkey *search,
struct bset_tree *start)
{ … }
struct bkey *bch_btree_iter_init(struct btree_keys *b,
struct btree_iter *iter,
struct bkey *search)
{ … }
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
new_btree_iter_cmp_fn *cmp)
{ … }
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
{ … }
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *b, ptr_filter_fn fn)
{ … }
void bch_bset_sort_state_free(struct bset_sort_state *state)
{ … }
int bch_bset_sort_state_init(struct bset_sort_state *state,
unsigned int page_order)
{ … }
static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{ … }
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
unsigned int start, unsigned int order, bool fixup,
struct bset_sort_state *state)
{ … }
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{ … }
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
struct btree_iter *iter,
struct bset_sort_state *state)
{ … }
void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{ … }
#define SORT_CRIT …
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{ … }
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{ … }