linux/fs/bcachefs/btree_cache.c

// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "bbpos.h"
#include "bkey_buf.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
#include "debug.h"
#include "errcode.h"
#include "error.h"
#include "journal.h"
#include "trace.h"

#include <linux/prefetch.h>
#include <linux/sched/mm.h>

#define BTREE_CACHE_NOT_FREED_INCREMENT(counter)

const char * const bch2_btree_node_flags[] =;

void bch2_recalc_btree_reserve(struct bch_fs *c)
{}

static inline unsigned btree_cache_can_free(struct btree_cache *bc)
{}

static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
{}

static void btree_node_data_free(struct bch_fs *c, struct btree *b)
{}

static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
				   const void *obj)
{}

static const struct rhashtable_params bch_btree_cache_params =;

static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{}

static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{}

struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
{}

/* Btree in memory cache - hash table */

void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{}

int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
{}

int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
				unsigned level, enum btree_id id)
{}

void bch2_btree_node_update_key_early(struct btree_trans *trans,
				      enum btree_id btree, unsigned level,
				      struct bkey_s_c old, struct bkey_i *new)
{}

__flatten
static inline struct btree *btree_cache_find(struct btree_cache *bc,
				     const struct bkey_i *k)
{}

/*
 * this version is for btree nodes that have already been freed (we're not
 * reaping a real btree node)
 */
static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter)
{}

static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter)
{}

static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
{}

static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
					   struct shrink_control *sc)
{}

static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
					    struct shrink_control *sc)
{}

void bch2_fs_btree_cache_exit(struct bch_fs *c)
{}

int bch2_fs_btree_cache_init(struct bch_fs *c)
{}

void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
{}

/*
 * We can only have one thread cannibalizing other cached btree nodes at a time,
 * or we'll deadlock. We use an open coded mutex to ensure that, which a
 * cannibalize_bucket() will take. This means every time we unlock the root of
 * the btree, we need to release this lock if we have it held.
 */
void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans)
{}

int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl)
{}

static struct btree *btree_node_cannibalize(struct bch_fs *c)
{}

struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{}

/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
				struct btree_path *path,
				const struct bkey_i *k,
				enum btree_id btree_id,
				unsigned level,
				enum six_lock_type lock_type,
				bool sync)
{}

static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
{}

static inline void btree_check_header(struct bch_fs *c, struct btree *b)
{}

static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
					   const struct bkey_i *k, unsigned level,
					   enum six_lock_type lock_type,
					   unsigned long trace_ip)
{}

/**
 * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
 * in from disk if necessary.
 *
 * @trans:	btree transaction object
 * @path:	btree_path being traversed
 * @k:		pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
 * @level:	level of btree node being looked up (0 == leaf node)
 * @lock_type:	SIX_LOCK_read or SIX_LOCK_intent
 * @trace_ip:	ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
 *
 * The btree node will have either a read or a write lock held, depending on
 * the @write parameter.
 *
 * Returns: btree node or ERR_PTR()
 */
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
				  const struct bkey_i *k, unsigned level,
				  enum six_lock_type lock_type,
				  unsigned long trace_ip)
{}

struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
					 const struct bkey_i *k,
					 enum btree_id btree_id,
					 unsigned level,
					 bool nofill)
{}

int bch2_btree_node_prefetch(struct btree_trans *trans,
			     struct btree_path *path,
			     const struct bkey_i *k,
			     enum btree_id btree_id, unsigned level)
{}

void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
{}

const char *bch2_btree_id_str(enum btree_id btree)
{}

void bch2_btree_id_to_text(struct printbuf *out, enum btree_id btree)
{}

void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
{}

void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
{}

static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
				 const char *label, unsigned nr)
{}

void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
{}