linux/fs/bcachefs/btree_update_interior.c

// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "alloc_foreground.h"
#include "bkey_buf.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_gc.h"
#include "btree_journal_iter.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
#include "buckets.h"
#include "clock.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
#include "recovery_passes.h"
#include "replicas.h"
#include "sb-members.h"
#include "super-io.h"
#include "trace.h"

#include <linux/random.h>

static const char * const bch2_btree_update_modes[] =;

static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
				  btree_path_idx_t, struct btree *, struct keylist *);
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);

/*
 * Verify that child nodes correctly span parent node's range:
 */
int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
{}

/* Calculate ideal packed bkey format for new btree nodes: */

static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
{}

static struct bkey_format bch2_btree_calc_format(struct btree *b)
{}

static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
					  struct bkey_format *old_f,
					  struct bkey_format *new_f)
{}

/**
 * bch2_btree_node_format_fits - check if we could rewrite node with a new format
 *
 * @c:		filesystem handle
 * @b:		btree node to rewrite
 * @nr:		number of keys for new node (i.e. b->nr)
 * @new_f:	bkey format to translate keys to
 *
 * Returns: true if all re-packed keys will be able to fit in a new node.
 *
 * Assumes all keys will successfully pack with the new format.
 */
static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
				 struct btree_nr_keys nr,
				 struct bkey_format *new_f)
{}

/* Btree node freeing/allocation: */

static void __btree_node_free(struct btree_trans *trans, struct btree *b)
{}

static void bch2_btree_node_free_inmem(struct btree_trans *trans,
				       struct btree_path *path,
				       struct btree *b)
{}

static void bch2_btree_node_free_never_used(struct btree_update *as,
					    struct btree_trans *trans,
					    struct btree *b)
{}

static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
					     struct disk_reservation *res,
					     struct closure *cl,
					     bool interior_node,
					     unsigned flags)
{}

static struct btree *bch2_btree_node_alloc(struct btree_update *as,
					   struct btree_trans *trans,
					   unsigned level)
{}

static void btree_set_min(struct btree *b, struct bpos pos)
{}

static void btree_set_max(struct btree *b, struct bpos pos)
{}

static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
						       struct btree_trans *trans,
						       struct btree *b)
{}

static struct btree *__btree_root_alloc(struct btree_update *as,
				struct btree_trans *trans, unsigned level)
{}

static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans)
{}

static int bch2_btree_reserve_get(struct btree_trans *trans,
				  struct btree_update *as,
				  unsigned nr_nodes[2],
				  unsigned flags,
				  struct closure *cl)
{}

/* Asynchronous interior node update machinery */

static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans)
{}

static void btree_update_add_key(struct btree_update *as,
				 struct keylist *keys, struct btree *b)
{}

static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
{}

static void btree_update_new_nodes_mark_sb(struct btree_update *as)
{}

/*
 * The transactional part of an interior btree node update, where we journal the
 * update we did to the interior node and update alloc info:
 */
static int btree_update_nodes_written_trans(struct btree_trans *trans,
					    struct btree_update *as)
{}

static void btree_update_nodes_written(struct btree_update *as)
{}

static void btree_interior_update_work(struct work_struct *work)
{}

static CLOSURE_CALLBACK(btree_update_set_nodes_written)
{}

/*
 * We're updating @b with pointers to nodes that haven't finished writing yet:
 * block @b from being written until @as completes
 */
static void btree_update_updated_node(struct btree_update *as, struct btree *b)
{}

static int bch2_update_reparent_journal_pin_flush(struct journal *j,
				struct journal_entry_pin *_pin, u64 seq)
{}

static void btree_update_reparent(struct btree_update *as,
				  struct btree_update *child)
{}

static void btree_update_updated_root(struct btree_update *as, struct btree *b)
{}

/*
 * bch2_btree_update_add_new_node:
 *
 * This causes @as to wait on @b to be written, before it gets to
 * bch2_btree_update_nodes_written
 *
 * Additionally, it sets b->will_make_reachable to prevent any additional writes
 * to @b from happening besides the first until @b is reachable on disk
 *
 * And it adds @b to the list of @as's new nodes, so that we can update sector
 * counts in bch2_btree_update_nodes_written:
 */
static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
{}

/*
 * returns true if @b was a new node
 */
static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
{}

static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
{}

static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j,
				struct journal_entry_pin *_pin, u64 seq)
{}

/*
 * @b is being split/rewritten: it may have pointers to not-yet-written btree
 * nodes and thus outstanding btree_updates - redirect @b's
 * btree_updates to point to this btree_update:
 */
static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
						      struct btree *b)
{}

static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans)
{}

static struct btree_update *
bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
			unsigned level_start, bool split, unsigned flags)
{}

/* Btree root updates: */

static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
{}

static int bch2_btree_set_root(struct btree_update *as,
			       struct btree_trans *trans,
			       struct btree_path *path,
			       struct btree *b,
			       bool nofail)
{}

/* Interior node updates: */

static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
					struct btree_trans *trans,
					struct btree_path *path,
					struct btree *b,
					struct btree_node_iter *node_iter,
					struct bkey_i *insert)
{}

static void
bch2_btree_insert_keys_interior(struct btree_update *as,
				struct btree_trans *trans,
				struct btree_path *path,
				struct btree *b,
				struct btree_node_iter node_iter,
				struct keylist *keys)
{}

/*
 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
 * node)
 */
static void __btree_split_node(struct btree_update *as,
			       struct btree_trans *trans,
			       struct btree *b,
			       struct btree *n[2])
{}

/*
 * For updates to interior nodes, we've got to do the insert before we split
 * because the stuff we're inserting has to be inserted atomically. Post split,
 * the keys might have to go in different nodes and the split would no longer be
 * atomic.
 *
 * Worse, if the insert is from btree node coalescing, if we do the insert after
 * we do the split (and pick the pivot) - the pivot we pick might be between
 * nodes that were coalesced, and thus in the middle of a child node post
 * coalescing:
 */
static void btree_split_insert_keys(struct btree_update *as,
				    struct btree_trans *trans,
				    btree_path_idx_t path_idx,
				    struct btree *b,
				    struct keylist *keys)
{}

static int btree_split(struct btree_update *as, struct btree_trans *trans,
		       btree_path_idx_t path, struct btree *b,
		       struct keylist *keys)
{}

/**
 * bch2_btree_insert_node - insert bkeys into a given btree node
 *
 * @as:			btree_update object
 * @trans:		btree_trans object
 * @path_idx:		path that points to current node
 * @b:			node to insert keys into
 * @keys:		list of keys to insert
 *
 * Returns: 0 on success, typically transaction restart error on failure
 *
 * Inserts as many keys as it can into a given btree node, splitting it if full.
 * If a split occurred, this function will return early. This can only happen
 * for leaf nodes -- inserts into interior nodes have to be atomic.
 */
static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
				  btree_path_idx_t path_idx, struct btree *b,
				  struct keylist *keys)
{}

int bch2_btree_split_leaf(struct btree_trans *trans,
			  btree_path_idx_t path,
			  unsigned flags)
{}

static void __btree_increase_depth(struct btree_update *as, struct btree_trans *trans,
				   btree_path_idx_t path_idx)
{}

int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, unsigned flags)
{}

int __bch2_foreground_maybe_merge(struct btree_trans *trans,
				  btree_path_idx_t path,
				  unsigned level,
				  unsigned flags,
				  enum btree_node_sibling sib)
{}

int bch2_btree_node_rewrite(struct btree_trans *trans,
			    struct btree_iter *iter,
			    struct btree *b,
			    unsigned flags)
{}

struct async_btree_rewrite {};

static int async_btree_node_rewrite_trans(struct btree_trans *trans,
					  struct async_btree_rewrite *a)
{}

static void async_btree_node_rewrite_work(struct work_struct *work)
{}

void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
{}

void bch2_do_pending_node_rewrites(struct bch_fs *c)
{}

void bch2_free_pending_node_rewrites(struct bch_fs *c)
{}

static int __bch2_btree_node_update_key(struct btree_trans *trans,
					struct btree_iter *iter,
					struct btree *b, struct btree *new_hash,
					struct bkey_i *new_key,
					unsigned commit_flags,
					bool skip_triggers)
{}

int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
			       struct btree *b, struct bkey_i *new_key,
			       unsigned commit_flags, bool skip_triggers)
{}

int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
					struct btree *b, struct bkey_i *new_key,
					unsigned commit_flags, bool skip_triggers)
{}

/* Init code: */

/*
 * Only for filesystem bringup, when first reading the btree roots or allocating
 * btree roots when initializing a new filesystem:
 */
void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
{}

int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level)
{}

void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
{}

static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
{}

void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
{}

static bool bch2_btree_interior_updates_pending(struct bch_fs *c)
{}

bool bch2_btree_interior_updates_flush(struct bch_fs *c)
{}

void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
{}

struct jset_entry *
bch2_btree_roots_to_journal_entries(struct bch_fs *c,
				    struct jset_entry *end,
				    unsigned long skip)
{}

static void bch2_btree_alloc_to_text(struct printbuf *out,
				     struct bch_fs *c,
				     struct btree_alloc *a)
{}

void bch2_btree_reserve_cache_to_text(struct printbuf *out, struct bch_fs *c)
{}

void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
{}

void bch2_fs_btree_interior_update_init_early(struct bch_fs *c)
{}

int bch2_fs_btree_interior_update_init(struct bch_fs *c)
{}