linux/fs/bcachefs/btree_write_buffer.c

// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "bkey_buf.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "disk_accounting.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"

#include <linux/prefetch.h>
#include <linux/sort.h>

static int bch2_btree_write_buffer_journal_flush(struct journal *,
				struct journal_entry_pin *, u64);

static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);

static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
{}

static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
{}

static int wb_key_seq_cmp(const void *_l, const void *_r)
{}

/* Compare excluding idx, the low 24 bits: */
static inline bool wb_key_eq(const void *_l, const void *_r)
{}

static noinline void wb_sort(struct wb_key_ref *base, size_t num)
{}

static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
					  struct btree_iter *iter,
					  struct btree_write_buffered_key *wb)
{}

static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
			       struct btree_write_buffered_key *wb,
			       bool *write_locked,
			       bool *accounting_accumulated,
			       size_t *fast)
{}

/*
 * Update a btree with a write buffered key using the journal seq of the
 * original write buffer insert.
 *
 * It is not safe to rejournal the key once it has been inserted into the write
 * buffer because that may break recovery ordering. For example, the key may
 * have already been modified in the active write buffer in a seq that comes
 * before the current transaction. If we were to journal this key again and
 * crash, recovery would process updates in the wrong order.
 */
static int
btree_write_buffered_insert(struct btree_trans *trans,
			  struct btree_write_buffered_key *wb)
{}

static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
{}

static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
{}

static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
{}

static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
{}

static int bch2_btree_write_buffer_journal_flush(struct journal *j,
				struct journal_entry_pin *_pin, u64 seq)
{}

int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
{}

int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
{}

int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
{}

/*
 * In check and repair code, when checking references to write buffer btrees we
 * need to issue a flush before we have a definitive error: this issues a flush
 * if this is a key we haven't yet checked.
 */
int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
					struct bkey_s_c referring_k,
					struct bkey_buf *last_flushed)
{}

static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
{}

static void wb_accounting_sort(struct btree_write_buffer *wb)
{}

int bch2_accounting_key_to_wb_slowpath(struct bch_fs *c, enum btree_id btree,
				       struct bkey_i_accounting *k)
{}

int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
			     struct journal_keys_to_wb *dst,
			     enum btree_id btree, struct bkey_i *k)
{}

void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
{}

int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
{}

static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
{}

static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
{}

int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
{}

void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
{}

int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
{}