/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2021-2024 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_trans.h" #include "xfs_btree.h" #include "xfs_error.h" #include "xfs_buf_mem.h" #include "xfs_btree_mem.h" #include "xfs_ag.h" #include "xfs_buf_item.h" #include "xfs_trace.h" /* Set the root of an in-memory btree. */ void xfbtree_set_root( struct xfs_btree_cur *cur, const union xfs_btree_ptr *ptr, int inc) { … } /* Initialize a pointer from the in-memory btree header. */ void xfbtree_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) { … } /* Duplicate an in-memory btree cursor. */ struct xfs_btree_cur * xfbtree_dup_cursor( struct xfs_btree_cur *cur) { … } /* Close the btree xfile and release all resources. */ void xfbtree_destroy( struct xfbtree *xfbt) { … } /* Compute the number of bytes available for records. */ static inline unsigned int xfbtree_rec_bytes( struct xfs_mount *mp, const struct xfs_btree_ops *ops) { … } /* Initialize an empty leaf block as the btree root. */ STATIC int xfbtree_init_leaf_block( struct xfs_mount *mp, struct xfbtree *xfbt, const struct xfs_btree_ops *ops) { … } /* * Create an in-memory btree root that can be used with the given xmbuf. * Callers must set xfbt->owner. */ int xfbtree_init( struct xfs_mount *mp, struct xfbtree *xfbt, struct xfs_buftarg *btp, const struct xfs_btree_ops *ops) { … } /* Allocate a block to our in-memory btree. */ int xfbtree_alloc_block( struct xfs_btree_cur *cur, const union xfs_btree_ptr *start, union xfs_btree_ptr *new, int *stat) { … } /* Free a block from our in-memory btree. */ int xfbtree_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) { … } /* Return the minimum number of records for a btree block. */ int xfbtree_get_minrecs( struct xfs_btree_cur *cur, int level) { … } /* Return the maximum number of records for a btree block. */ int xfbtree_get_maxrecs( struct xfs_btree_cur *cur, int level) { … } /* If this log item is a buffer item that came from the xfbtree, return it. */ static inline struct xfs_buf * xfbtree_buf_match( struct xfbtree *xfbt, const struct xfs_log_item *lip) { … } /* * Commit changes to the incore btree immediately by writing all dirty xfbtree * buffers to the backing xfile. This detaches all xfbtree buffers from the * transaction, even on failure. The buffer locks are dropped between the * delwri queue and submit, so the caller must synchronize btree access. * * Normally we'd let the buffers commit with the transaction and get written to * the xfile via the log, but online repair stages ephemeral btrees in memory * and uses the btree_staging functions to write new btrees to disk atomically. * The in-memory btree (and its backing store) are discarded at the end of the * repair phase, which means that xfbtree buffers cannot commit with the rest * of a transaction. * * In other words, online repair only needs the transaction to collect buffer * pointers and to avoid buffer deadlocks, not to guarantee consistency of * updates. */ int xfbtree_trans_commit( struct xfbtree *xfbt, struct xfs_trans *tp) { … } /* * Cancel changes to the incore btree by detaching all the xfbtree buffers. * Changes are not undone, so callers must not access the btree ever again. */ void xfbtree_trans_cancel( struct xfbtree *xfbt, struct xfs_trans *tp) { … }