linux/fs/bcachefs/btree_io.c

// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "bkey_methods.h"
#include "bkey_sort.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
#include "debug.h"
#include "error.h"
#include "extents.h"
#include "io_write.h"
#include "journal_reclaim.h"
#include "journal_seq_blacklist.h"
#include "recovery.h"
#include "super-io.h"
#include "trace.h"

#include <linux/sched/mm.h>

static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
{}

void bch2_btree_node_io_unlock(struct btree *b)
{}

void bch2_btree_node_io_lock(struct btree *b)
{}

void __bch2_btree_node_wait_on_read(struct btree *b)
{}

void __bch2_btree_node_wait_on_write(struct btree *b)
{}

void bch2_btree_node_wait_on_read(struct btree *b)
{}

void bch2_btree_node_wait_on_write(struct btree *b)
{}

static void verify_no_dups(struct btree *b,
			   struct bkey_packed *start,
			   struct bkey_packed *end)
{}

static void set_needs_whiteout(struct bset *i, int v)
{}

static void btree_bounce_free(struct bch_fs *c, size_t size,
			      bool used_mempool, void *p)
{}

static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
				bool *used_mempool)
{}

static void sort_bkey_ptrs(const struct btree *bt,
			   struct bkey_packed **ptrs, unsigned nr)
{}

static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
{}

static bool should_compact_bset(struct btree *b, struct bset_tree *t,
				bool compacting, enum compact_mode mode)
{}

static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
{}

bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
			    enum compact_mode mode)
{}

static void btree_node_sort(struct bch_fs *c, struct btree *b,
			    unsigned start_idx,
			    unsigned end_idx)
{}

void bch2_btree_sort_into(struct bch_fs *c,
			 struct btree *dst,
			 struct btree *src)
{}

/*
 * We're about to add another bset to the btree node, so if there's currently
 * too many bsets - sort some of them together:
 */
static bool btree_node_compact(struct bch_fs *c, struct btree *b)
{}

void bch2_btree_build_aux_trees(struct btree *b)
{}

/*
 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
 *
 * The first bset is going to be of similar order to the size of the node, the
 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
 * memmove on insert from being too expensive: the middle bset should, ideally,
 * be the geometric mean of the first and the last.
 *
 * Returns true if the middle bset is greater than that geometric mean:
 */
static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
{}

/*
 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
 * inserted into
 *
 * Safe to call if there already is an unwritten bset - will only add a new bset
 * if @b doesn't already have one.
 *
 * Returns true if we sorted (i.e. invalidated iterators
 */
void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
{}

static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
			  struct bch_dev *ca,
			  struct btree *b, struct bset *i, struct bkey_packed *k,
			  unsigned offset, int write)
{}

__printf(10, 11)
static int __btree_err(int ret,
		       struct bch_fs *c,
		       struct bch_dev *ca,
		       struct btree *b,
		       struct bset *i,
		       struct bkey_packed *k,
		       int write,
		       bool have_retry,
		       enum bch_sb_error_id err_type,
		       const char *fmt, ...)
{}

#define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)

#define btree_err_on(cond, ...)

/*
 * When btree topology repair changes the start or end of a node, that might
 * mean we have to drop keys that are no longer inside the node:
 */
__cold
void bch2_btree_node_drop_keys_outside_node(struct btree *b)
{}

static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
			 struct btree *b, struct bset *i,
			 unsigned offset, unsigned sectors,
			 int write, bool have_retry, bool *saw_error)
{}

static int bset_key_invalid(struct bch_fs *c, struct btree *b,
			    struct bkey_s_c k,
			    bool updated_range, int rw,
			    struct printbuf *err)
{}

static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
			 struct bset *i, struct bkey_packed *k)
{}

static int validate_bset_keys(struct bch_fs *c, struct btree *b,
			 struct bset *i, int write,
			 bool have_retry, bool *saw_error)
{}

int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
			      struct btree *b, bool have_retry, bool *saw_error)
{}

static void btree_node_read_work(struct work_struct *work)
{}

static void btree_node_read_endio(struct bio *bio)
{}

struct btree_node_read_all {};

static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
{}

static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
{}

static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
{}

static void btree_node_read_all_replicas_endio(struct bio *bio)
{}

/*
 * XXX This allocates multiple times from the same mempools, and can deadlock
 * under sufficient memory pressure (but is only a debug path)
 */
static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
{}

void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
			  bool sync)
{}

static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
				  const struct bkey_i *k, unsigned level)
{}

int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
			const struct bkey_i *k, unsigned level)
{}

static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
				      struct btree_write *w)
{}

static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
{}

static void btree_node_write_done(struct bch_fs *c, struct btree *b)
{}

static void btree_node_write_work(struct work_struct *work)
{}

static void btree_node_write_endio(struct bio *bio)
{}

static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
				   struct bset *i, unsigned sectors)
{}

static void btree_write_submit(struct work_struct *work)
{}

void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{}

/*
 * Work that must be done with write lock held:
 */
bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
{}

/*
 * Use this one if the node is intent locked:
 */
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
			   enum six_lock_type lock_type_held,
			   unsigned flags)
{}

static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
{}

bool bch2_btree_flush_all_reads(struct bch_fs *c)
{}

bool bch2_btree_flush_all_writes(struct bch_fs *c)
{}

static const char * const bch2_btree_write_types[] =;

void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
{}