linux/fs/xfs/libxfs/xfs_btree.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_btree.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_alloc.h"
#include "xfs_log.h"
#include "xfs_btree_staging.h"
#include "xfs_ag.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_health.h"
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"

/*
 * Btree magic numbers.
 */
uint32_t
xfs_btree_magic(
	struct xfs_mount		*mp,
	const struct xfs_btree_ops	*ops)
{}

/*
 * These sibling pointer checks are optimised for null sibling pointers. This
 * happens a lot, and we don't need to byte swap at runtime if the sibling
 * pointer is NULL.
 *
 * These are explicitly marked at inline because the cost of calling them as
 * functions instead of inlining them is about 36 bytes extra code per call site
 * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
 * two sibling check functions reduces the compiled code size by over 300
 * bytes.
 */
static inline xfs_failaddr_t
xfs_btree_check_fsblock_siblings(
	struct xfs_mount	*mp,
	xfs_fsblock_t		fsb,
	__be64			dsibling)
{}

static inline xfs_failaddr_t
xfs_btree_check_memblock_siblings(
	struct xfs_buftarg	*btp,
	xfbno_t			bno,
	__be64			dsibling)
{}

static inline xfs_failaddr_t
xfs_btree_check_agblock_siblings(
	struct xfs_perag	*pag,
	xfs_agblock_t		agbno,
	__be32			dsibling)
{}

static xfs_failaddr_t
__xfs_btree_check_lblock_hdr(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	int			level,
	struct xfs_buf		*bp)
{}

/*
 * Check a long btree block header.  Return the address of the failing check,
 * or NULL if everything is ok.
 */
static xfs_failaddr_t
__xfs_btree_check_fsblock(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	int			level,
	struct xfs_buf		*bp)
{}

/*
 * Check an in-memory btree block header.  Return the address of the failing
 * check, or NULL if everything is ok.
 */
static xfs_failaddr_t
__xfs_btree_check_memblock(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	int			level,
	struct xfs_buf		*bp)
{}

/*
 * Check a short btree block header.  Return the address of the failing check,
 * or NULL if everything is ok.
 */
static xfs_failaddr_t
__xfs_btree_check_agblock(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	int			level,
	struct xfs_buf		*bp)
{}

/*
 * Internal btree block check.
 *
 * Return NULL if the block is ok or the address of the failed check otherwise.
 */
xfs_failaddr_t
__xfs_btree_check_block(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	int			level,
	struct xfs_buf		*bp)
{}

static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur)
{}

/*
 * Debug routine: check that block header is ok.
 */
int
xfs_btree_check_block(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	struct xfs_btree_block	*block,	/* generic btree block pointer */
	int			level,	/* level of the btree block */
	struct xfs_buf		*bp)	/* buffer containing block, if any */
{}

int
__xfs_btree_check_ptr(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	int				index,
	int				level)
{}

/*
 * Check that a given (indexed) btree pointer at a certain level of a
 * btree is valid and doesn't point past where it should.
 */
static int
xfs_btree_check_ptr(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	int				index,
	int				level)
{}

#ifdef DEBUG
#define xfs_btree_debug_check_ptr
#else
#define xfs_btree_debug_check_ptr
#endif

/*
 * Calculate CRC on the whole btree block and stuff it into the
 * long-form btree header.
 *
 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
 * it into the buffer so recovery knows what the last modification was that made
 * it to disk.
 */
void
xfs_btree_fsblock_calc_crc(
	struct xfs_buf		*bp)
{}

bool
xfs_btree_fsblock_verify_crc(
	struct xfs_buf		*bp)
{}

/*
 * Calculate CRC on the whole btree block and stuff it into the
 * short-form btree header.
 *
 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
 * it into the buffer so recovery knows what the last modification was that made
 * it to disk.
 */
void
xfs_btree_agblock_calc_crc(
	struct xfs_buf		*bp)
{}

bool
xfs_btree_agblock_verify_crc(
	struct xfs_buf		*bp)
{}

static int
xfs_btree_free_block(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp)
{}

/*
 * Delete the btree cursor.
 */
void
xfs_btree_del_cursor(
	struct xfs_btree_cur	*cur,		/* btree cursor */
	int			error)		/* del because of error */
{}

/* Return the buffer target for this btree's buffer. */
static inline struct xfs_buftarg *
xfs_btree_buftarg(
	struct xfs_btree_cur	*cur)
{}

/* Return the block size (in units of 512b sectors) for this btree. */
static inline unsigned int
xfs_btree_bbsize(
	struct xfs_btree_cur	*cur)
{}

/*
 * Duplicate the btree cursor.
 * Allocate a new one, copy the record, re-get the buffers.
 */
int						/* error */
xfs_btree_dup_cursor(
	struct xfs_btree_cur	*cur,		/* input cursor */
	struct xfs_btree_cur	**ncur)		/* output cursor */
{}

/*
 * XFS btree block layout and addressing:
 *
 * There are two types of blocks in the btree: leaf and non-leaf blocks.
 *
 * The leaf record start with a header then followed by records containing
 * the values.  A non-leaf block also starts with the same header, and
 * then first contains lookup keys followed by an equal number of pointers
 * to the btree blocks at the previous level.
 *
 *		+--------+-------+-------+-------+-------+-------+-------+
 * Leaf:	| header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
 *		+--------+-------+-------+-------+-------+-------+-------+
 *
 *		+--------+-------+-------+-------+-------+-------+-------+
 * Non-Leaf:	| header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
 *		+--------+-------+-------+-------+-------+-------+-------+
 *
 * The header is called struct xfs_btree_block for reasons better left unknown
 * and comes in different versions for short (32bit) and long (64bit) block
 * pointers.  The record and key structures are defined by the btree instances
 * and opaque to the btree core.  The block pointers are simple disk endian
 * integers, available in a short (32bit) and long (64bit) variant.
 *
 * The helpers below calculate the offset of a given record, key or pointer
 * into a btree block (xfs_btree_*_offset) or return a pointer to the given
 * record, key or pointer (xfs_btree_*_addr).  Note that all addressing
 * inside the btree block is done using indices starting at one, not zero!
 *
 * If XFS_BTGEO_OVERLAPPING is set, then this btree supports keys containing
 * overlapping intervals.  In such a tree, records are still sorted lowest to
 * highest and indexed by the smallest key value that refers to the record.
 * However, nodes are different: each pointer has two associated keys -- one
 * indexing the lowest key available in the block(s) below (the same behavior
 * as the key in a regular btree) and another indexing the highest key
 * available in the block(s) below.  Because records are /not/ sorted by the
 * highest key, all leaf block updates require us to compute the highest key
 * that matches any record in the leaf and to recursively update the high keys
 * in the nodes going further up in the tree, if necessary.  Nodes look like
 * this:
 *
 *		+--------+-----+-----+-----+-----+-----+-------+-------+-----+
 * Non-Leaf:	| header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
 *		+--------+-----+-----+-----+-----+-----+-------+-------+-----+
 *
 * To perform an interval query on an overlapped tree, perform the usual
 * depth-first search and use the low and high keys to decide if we can skip
 * that particular node.  If a leaf node is reached, return the records that
 * intersect the interval.  Note that an interval query may return numerous
 * entries.  For a non-overlapped tree, simply search for the record associated
 * with the lowest key and iterate forward until a non-matching record is
 * found.  Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by
 * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in
 * more detail.
 *
 * Why do we care about overlapping intervals?  Let's say you have a bunch of
 * reverse mapping records on a reflink filesystem:
 *
 * 1: +- file A startblock B offset C length D -----------+
 * 2:      +- file E startblock F offset G length H --------------+
 * 3:      +- file I startblock F offset J length K --+
 * 4:                                                        +- file L... --+
 *
 * Now say we want to map block (B+D) into file A at offset (C+D).  Ideally,
 * we'd simply increment the length of record 1.  But how do we find the record
 * that ends at (B+D-1) (i.e. record 1)?  A LE lookup of (B+D-1) would return
 * record 3 because the keys are ordered first by startblock.  An interval
 * query would return records 1 and 2 because they both overlap (B+D-1), and
 * from that we can pick out record 1 as the appropriate left neighbor.
 *
 * In the non-overlapped case you can do a LE lookup and decrement the cursor
 * because a record's interval must end before the next record.
 */

/*
 * Return size of the btree block header for this btree instance.
 */
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{}

/*
 * Calculate offset of the n-th record in a btree block.
 */
STATIC size_t
xfs_btree_rec_offset(
	struct xfs_btree_cur	*cur,
	int			n)
{}

/*
 * Calculate offset of the n-th key in a btree block.
 */
STATIC size_t
xfs_btree_key_offset(
	struct xfs_btree_cur	*cur,
	int			n)
{}

/*
 * Calculate offset of the n-th high key in a btree block.
 */
STATIC size_t
xfs_btree_high_key_offset(
	struct xfs_btree_cur	*cur,
	int			n)
{}

/*
 * Calculate offset of the n-th block pointer in a btree block.
 */
STATIC size_t
xfs_btree_ptr_offset(
	struct xfs_btree_cur	*cur,
	int			n,
	int			level)
{}

/*
 * Return a pointer to the n-th record in the btree block.
 */
union xfs_btree_rec *
xfs_btree_rec_addr(
	struct xfs_btree_cur	*cur,
	int			n,
	struct xfs_btree_block	*block)
{}

/*
 * Return a pointer to the n-th key in the btree block.
 */
union xfs_btree_key *
xfs_btree_key_addr(
	struct xfs_btree_cur	*cur,
	int			n,
	struct xfs_btree_block	*block)
{}

/*
 * Return a pointer to the n-th high key in the btree block.
 */
union xfs_btree_key *
xfs_btree_high_key_addr(
	struct xfs_btree_cur	*cur,
	int			n,
	struct xfs_btree_block	*block)
{}

/*
 * Return a pointer to the n-th block pointer in the btree block.
 */
union xfs_btree_ptr *
xfs_btree_ptr_addr(
	struct xfs_btree_cur	*cur,
	int			n,
	struct xfs_btree_block	*block)
{}

struct xfs_ifork *
xfs_btree_ifork_ptr(
	struct xfs_btree_cur	*cur)
{}

/*
 * Get the root block which is stored in the inode.
 *
 * For now this btree implementation assumes the btree root is always
 * stored in the if_broot field of an inode fork.
 */
STATIC struct xfs_btree_block *
xfs_btree_get_iroot(
	struct xfs_btree_cur	*cur)
{}

/*
 * Retrieve the block pointer from the cursor at the given level.
 * This may be an inode btree root or from a buffer.
 */
struct xfs_btree_block *		/* generic btree block pointer */
xfs_btree_get_block(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			level,	/* level in btree */
	struct xfs_buf		**bpp)	/* buffer containing the block */
{}

/*
 * Change the cursor to point to the first record at the given level.
 * Other levels are unaffected.
 */
STATIC int				/* success=1, failure=0 */
xfs_btree_firstrec(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			level)	/* level to change */
{}

/*
 * Change the cursor to point to the last record in the current block
 * at the given level.  Other levels are unaffected.
 */
STATIC int				/* success=1, failure=0 */
xfs_btree_lastrec(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			level)	/* level to change */
{}

/*
 * Compute first and last byte offsets for the fields given.
 * Interprets the offsets table, which contains struct field offsets.
 */
void
xfs_btree_offsets(
	uint32_t	fields,		/* bitmask of fields */
	const short	*offsets,	/* table of field offsets */
	int		nbits,		/* number of bits to inspect */
	int		*first,		/* output: first byte offset */
	int		*last)		/* output: last byte offset */
{}

STATIC int
xfs_btree_readahead_fsblock(
	struct xfs_btree_cur	*cur,
	int			lr,
	struct xfs_btree_block	*block)
{}

STATIC int
xfs_btree_readahead_memblock(
	struct xfs_btree_cur	*cur,
	int			lr,
	struct xfs_btree_block	*block)
{}

STATIC int
xfs_btree_readahead_agblock(
	struct xfs_btree_cur	*cur,
	int			lr,
	struct xfs_btree_block	*block)
{}

/*
 * Read-ahead btree blocks, at the given level.
 * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
 */
STATIC int
xfs_btree_readahead(
	struct xfs_btree_cur	*cur,		/* btree cursor */
	int			lev,		/* level in btree */
	int			lr)		/* left/right bits */
{}

STATIC int
xfs_btree_ptr_to_daddr(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	xfs_daddr_t			*daddr)
{}

/*
 * Readahead @count btree blocks at the given @ptr location.
 *
 * We don't need to care about long or short form btrees here as we have a
 * method of converting the ptr directly to a daddr available to us.
 */
STATIC void
xfs_btree_readahead_ptr(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr,
	xfs_extlen_t		count)
{}

/*
 * Set the buffer for level "lev" in the cursor to bp, releasing
 * any previous buffer.
 */
STATIC void
xfs_btree_setbuf(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			lev,	/* level in btree */
	struct xfs_buf		*bp)	/* new buffer to set */
{}

bool
xfs_btree_ptr_is_null(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr)
{}

void
xfs_btree_set_ptr_null(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr)
{}

static inline bool
xfs_btree_ptrs_equal(
	struct xfs_btree_cur		*cur,
	union xfs_btree_ptr		*ptr1,
	union xfs_btree_ptr		*ptr2)
{}

/*
 * Get/set/init sibling pointers
 */
void
xfs_btree_get_sibling(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	union xfs_btree_ptr	*ptr,
	int			lr)
{}

void
xfs_btree_set_sibling(
	struct xfs_btree_cur		*cur,
	struct xfs_btree_block		*block,
	const union xfs_btree_ptr	*ptr,
	int				lr)
{}

static void
__xfs_btree_init_block(
	struct xfs_mount	*mp,
	struct xfs_btree_block	*buf,
	const struct xfs_btree_ops *ops,
	xfs_daddr_t		blkno,
	__u16			level,
	__u16			numrecs,
	__u64			owner)
{}

void
xfs_btree_init_block(
	struct xfs_mount	*mp,
	struct xfs_btree_block	*block,
	const struct xfs_btree_ops *ops,
	__u16			level,
	__u16			numrecs,
	__u64			owner)
{}

void
xfs_btree_init_buf(
	struct xfs_mount		*mp,
	struct xfs_buf			*bp,
	const struct xfs_btree_ops	*ops,
	__u16				level,
	__u16				numrecs,
	__u64				owner)
{}

static inline __u64
xfs_btree_owner(
	struct xfs_btree_cur    *cur)
{}

void
xfs_btree_init_block_cur(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp,
	int			level,
	int			numrecs)
{}

STATIC void
xfs_btree_buf_to_ptr(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp,
	union xfs_btree_ptr	*ptr)
{}

static inline void
xfs_btree_set_refs(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp)
{}

int
xfs_btree_get_buf_block(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	struct xfs_btree_block		**block,
	struct xfs_buf			**bpp)
{}

/*
 * Read in the buffer at the given ptr and return the buffer and
 * the block pointer within the buffer.
 */
int
xfs_btree_read_buf_block(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	int				flags,
	struct xfs_btree_block		**block,
	struct xfs_buf			**bpp)
{}

/*
 * Copy keys from one btree block to another.
 */
void
xfs_btree_copy_keys(
	struct xfs_btree_cur		*cur,
	union xfs_btree_key		*dst_key,
	const union xfs_btree_key	*src_key,
	int				numkeys)
{}

/*
 * Copy records from one btree block to another.
 */
STATIC void
xfs_btree_copy_recs(
	struct xfs_btree_cur	*cur,
	union xfs_btree_rec	*dst_rec,
	union xfs_btree_rec	*src_rec,
	int			numrecs)
{}

/*
 * Copy block pointers from one btree block to another.
 */
void
xfs_btree_copy_ptrs(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*dst_ptr,
	const union xfs_btree_ptr *src_ptr,
	int			numptrs)
{}

/*
 * Shift keys one index left/right inside a single btree block.
 */
STATIC void
xfs_btree_shift_keys(
	struct xfs_btree_cur	*cur,
	union xfs_btree_key	*key,
	int			dir,
	int			numkeys)
{}

/*
 * Shift records one index left/right inside a single btree block.
 */
STATIC void
xfs_btree_shift_recs(
	struct xfs_btree_cur	*cur,
	union xfs_btree_rec	*rec,
	int			dir,
	int			numrecs)
{}

/*
 * Shift block pointers one index left/right inside a single btree block.
 */
STATIC void
xfs_btree_shift_ptrs(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr,
	int			dir,
	int			numptrs)
{}

/*
 * Log key values from the btree block.
 */
STATIC void
xfs_btree_log_keys(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp,
	int			first,
	int			last)
{}

/*
 * Log record values from the btree block.
 */
void
xfs_btree_log_recs(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp,
	int			first,
	int			last)
{}

/*
 * Log block pointer fields from a btree block (nonleaf).
 */
STATIC void
xfs_btree_log_ptrs(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	struct xfs_buf		*bp,	/* buffer containing btree block */
	int			first,	/* index of first pointer to log */
	int			last)	/* index of last pointer to log */
{}

/*
 * Log fields from a btree block header.
 */
void
xfs_btree_log_block(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	struct xfs_buf		*bp,	/* buffer containing btree block */
	uint32_t		fields)	/* mask of fields: XFS_BB_... */
{}

/*
 * Increment cursor by one record at the level.
 * For nonzero levels the leaf-ward information is untouched.
 */
int						/* error */
xfs_btree_increment(
	struct xfs_btree_cur	*cur,
	int			level,
	int			*stat)		/* success/failure */
{}

/*
 * Decrement cursor by one record at the level.
 * For nonzero levels the leaf-ward information is untouched.
 */
int						/* error */
xfs_btree_decrement(
	struct xfs_btree_cur	*cur,
	int			level,
	int			*stat)		/* success/failure */
{}

/*
 * Check the btree block owner now that we have the context to know who the
 * real owner is.
 */
static inline xfs_failaddr_t
xfs_btree_check_block_owner(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block)
{}

int
xfs_btree_lookup_get_block(
	struct xfs_btree_cur		*cur,	/* btree cursor */
	int				level,	/* level in the btree */
	const union xfs_btree_ptr	*pp,	/* ptr to btree block */
	struct xfs_btree_block		**blkp) /* return btree block */
{}

/*
 * Get current search key.  For level 0 we don't actually have a key
 * structure so we make one up from the record.  For all other levels
 * we just return the right key.
 */
STATIC union xfs_btree_key *
xfs_lookup_get_search_key(
	struct xfs_btree_cur	*cur,
	int			level,
	int			keyno,
	struct xfs_btree_block	*block,
	union xfs_btree_key	*kp)
{}

/*
 * Initialize a pointer to the root block.
 */
void
xfs_btree_init_ptr_from_cur(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr)
{}

/*
 * Lookup the record.  The cursor is made to point to it, based on dir.
 * stat is set to 0 if can't find any such record, 1 for success.
 */
int					/* error */
xfs_btree_lookup(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_lookup_t		dir,	/* <=, ==, or >= */
	int			*stat)	/* success/failure */
{}

/* Find the high key storage area from a regular key. */
union xfs_btree_key *
xfs_btree_high_key_from_key(
	struct xfs_btree_cur	*cur,
	union xfs_btree_key	*key)
{}

/* Determine the low (and high if overlapped) keys of a leaf block */
STATIC void
xfs_btree_get_leaf_keys(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	union xfs_btree_key	*key)
{}

/* Determine the low (and high if overlapped) keys of a node block */
STATIC void
xfs_btree_get_node_keys(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	union xfs_btree_key	*key)
{}

/* Derive the keys for any btree block. */
void
xfs_btree_get_keys(
	struct xfs_btree_cur	*cur,
	struct xfs_btree_block	*block,
	union xfs_btree_key	*key)
{}

/*
 * Decide if we need to update the parent keys of a btree block.  For
 * a standard btree this is only necessary if we're updating the first
 * record/key.  For an overlapping btree, we must always update the
 * keys because the highest key can be in any of the records or keys
 * in the block.
 */
static inline bool
xfs_btree_needs_key_update(
	struct xfs_btree_cur	*cur,
	int			ptr)
{}

/*
 * Update the low and high parent keys of the given level, progressing
 * towards the root.  If force_all is false, stop if the keys for a given
 * level do not need updating.
 */
STATIC int
__xfs_btree_updkeys(
	struct xfs_btree_cur	*cur,
	int			level,
	struct xfs_btree_block	*block,
	struct xfs_buf		*bp0,
	bool			force_all)
{}

/* Update all the keys from some level in cursor back to the root. */
STATIC int
xfs_btree_updkeys_force(
	struct xfs_btree_cur	*cur,
	int			level)
{}

/*
 * Update the parent keys of the given level, progressing towards the root.
 */
STATIC int
xfs_btree_update_keys(
	struct xfs_btree_cur	*cur,
	int			level)
{}

/*
 * Update the record referred to by cur to the value in the
 * given record. This either works (return 0) or gets an
 * EFSCORRUPTED error.
 */
int
xfs_btree_update(
	struct xfs_btree_cur	*cur,
	union xfs_btree_rec	*rec)
{}

/*
 * Move 1 record left from cur/level if possible.
 * Update cur to reflect the new path.
 */
STATIC int					/* error */
xfs_btree_lshift(
	struct xfs_btree_cur	*cur,
	int			level,
	int			*stat)		/* success/failure */
{}

/*
 * Move 1 record right from cur/level if possible.
 * Update cur to reflect the new path.
 */
STATIC int					/* error */
xfs_btree_rshift(
	struct xfs_btree_cur	*cur,
	int			level,
	int			*stat)		/* success/failure */
{}

static inline int
xfs_btree_alloc_block(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*hint_block,
	union xfs_btree_ptr		*new_block,
	int				*stat)
{}

/*
 * Split cur/level block in half.
 * Return new block number and the key to its first
 * record (to be inserted into parent).
 */
STATIC int					/* error */
__xfs_btree_split(
	struct xfs_btree_cur	*cur,
	int			level,
	union xfs_btree_ptr	*ptrp,
	union xfs_btree_key	*key,
	struct xfs_btree_cur	**curp,
	int			*stat)		/* success/failure */
{}

#ifdef __KERNEL__
struct xfs_btree_split_args {};

/*
 * Stack switching interfaces for allocation
 */
static void
xfs_btree_split_worker(
	struct work_struct	*work)
{}

/*
 * BMBT split requests often come in with little stack to work on so we push
 * them off to a worker thread so there is lots of stack to use. For the other
 * btree types, just call directly to avoid the context switch overhead here.
 *
 * Care must be taken here - the work queue rescuer thread introduces potential
 * AGF <> worker queue deadlocks if the BMBT block allocation has to lock new
 * AGFs to allocate blocks. A task being run by the rescuer could attempt to
 * lock an AGF that is already locked by a task queued to run by the rescuer,
 * resulting in an ABBA deadlock as the rescuer cannot run the lock holder to
 * release it until the current thread it is running gains the lock.
 *
 * To avoid this issue, we only ever queue BMBT splits that don't have an AGF
 * already locked to allocate from. The only place that doesn't hold an AGF
 * locked is unwritten extent conversion at IO completion, but that has already
 * been offloaded to a worker thread and hence has no stack consumption issues
 * we have to worry about.
 */
STATIC int					/* error */
xfs_btree_split(
	struct xfs_btree_cur	*cur,
	int			level,
	union xfs_btree_ptr	*ptrp,
	union xfs_btree_key	*key,
	struct xfs_btree_cur	**curp,
	int			*stat)		/* success/failure */
{}
#else
#define xfs_btree_split
#endif /* __KERNEL__ */

/*
 * Copy the old inode root contents into a real block and make the
 * broot point to it.
 */
int						/* error */
xfs_btree_new_iroot(
	struct xfs_btree_cur	*cur,		/* btree cursor */
	int			*logflags,	/* logging flags for inode */
	int			*stat)		/* return status - 0 fail */
{}

static void
xfs_btree_set_root(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*ptr,
	int				inc)
{}

/*
 * Allocate a new root block, fill it in.
 */
STATIC int				/* error */
xfs_btree_new_root(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			*stat)	/* success/failure */
{}

STATIC int
xfs_btree_make_block_unfull(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			level,	/* btree level */
	int			numrecs,/* # of recs in block */
	int			*oindex,/* old tree index */
	int			*index,	/* new tree index */
	union xfs_btree_ptr	*nptr,	/* new btree ptr */
	struct xfs_btree_cur	**ncur,	/* new btree cursor */
	union xfs_btree_key	*key,	/* key of new block */
	int			*stat)
{}

/*
 * Insert one record/level.  Return information to the caller
 * allowing the next level up to proceed if necessary.
 */
STATIC int
xfs_btree_insrec(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	int			level,	/* level to insert record at */
	union xfs_btree_ptr	*ptrp,	/* i/o: block number inserted */
	union xfs_btree_rec	*rec,	/* record to insert */
	union xfs_btree_key	*key,	/* i/o: block key for ptrp */
	struct xfs_btree_cur	**curp,	/* output: new cursor replacing cur */
	int			*stat)	/* success/failure */
{}

/*
 * Insert the record at the point referenced by cur.
 *
 * A multi-level split of the tree on insert will invalidate the original
 * cursor.  All callers of this function should assume that the cursor is
 * no longer valid and revalidate it.
 */
int
xfs_btree_insert(
	struct xfs_btree_cur	*cur,
	int			*stat)
{}

/*
 * Try to merge a non-leaf block back into the inode root.
 *
 * Note: the killroot names comes from the fact that we're effectively
 * killing the old root block.  But because we can't just delete the
 * inode we have to copy the single block it was pointing to into the
 * inode.
 */
STATIC int
xfs_btree_kill_iroot(
	struct xfs_btree_cur	*cur)
{}

/*
 * Kill the current root node, and replace it with it's only child node.
 */
STATIC int
xfs_btree_kill_root(
	struct xfs_btree_cur	*cur,
	struct xfs_buf		*bp,
	int			level,
	union xfs_btree_ptr	*newroot)
{}

STATIC int
xfs_btree_dec_cursor(
	struct xfs_btree_cur	*cur,
	int			level,
	int			*stat)
{}

/*
 * Single level of the btree record deletion routine.
 * Delete record pointed to by cur/level.
 * Remove the record from its block then rebalance the tree.
 * Return 0 for error, 1 for done, 2 to go on to the next level.
 */
STATIC int					/* error */
xfs_btree_delrec(
	struct xfs_btree_cur	*cur,		/* btree cursor */
	int			level,		/* level removing record from */
	int			*stat)		/* fail/done/go-on */
{}

/*
 * Delete the record pointed to by cur.
 * The cursor refers to the place where the record was (could be inserted)
 * when the operation returns.
 */
int					/* error */
xfs_btree_delete(
	struct xfs_btree_cur	*cur,
	int			*stat)	/* success/failure */
{}

/*
 * Get the data from the pointed-to record.
 */
int					/* error */
xfs_btree_get_rec(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	union xfs_btree_rec	**recp,	/* output: btree record */
	int			*stat)	/* output: success/failure */
{}

/* Visit a block in a btree. */
STATIC int
xfs_btree_visit_block(
	struct xfs_btree_cur		*cur,
	int				level,
	xfs_btree_visit_blocks_fn	fn,
	void				*data)
{}


/* Visit every block in a btree. */
int
xfs_btree_visit_blocks(
	struct xfs_btree_cur		*cur,
	xfs_btree_visit_blocks_fn	fn,
	unsigned int			flags,
	void				*data)
{}

/*
 * Change the owner of a btree.
 *
 * The mechanism we use here is ordered buffer logging. Because we don't know
 * how many buffers were are going to need to modify, we don't really want to
 * have to make transaction reservations for the worst case of every buffer in a
 * full size btree as that may be more space that we can fit in the log....
 *
 * We do the btree walk in the most optimal manner possible - we have sibling
 * pointers so we can just walk all the blocks on each level from left to right
 * in a single pass, and then move to the next level and do the same. We can
 * also do readahead on the sibling pointers to get IO moving more quickly,
 * though for slow disks this is unlikely to make much difference to performance
 * as the amount of CPU work we have to do before moving to the next block is
 * relatively small.
 *
 * For each btree block that we load, modify the owner appropriately, set the
 * buffer as an ordered buffer and log it appropriately. We need to ensure that
 * we mark the region we change dirty so that if the buffer is relogged in
 * a subsequent transaction the changes we make here as an ordered buffer are
 * correctly relogged in that transaction.  If we are in recovery context, then
 * just queue the modified buffer as delayed write buffer so the transaction
 * recovery completion writes the changes to disk.
 */
struct xfs_btree_block_change_owner_info {};

static int
xfs_btree_block_change_owner(
	struct xfs_btree_cur	*cur,
	int			level,
	void			*data)
{}

int
xfs_btree_change_owner(
	struct xfs_btree_cur	*cur,
	uint64_t		new_owner,
	struct list_head	*buffer_list)
{}

/* Verify the v5 fields of a long-format btree block. */
xfs_failaddr_t
xfs_btree_fsblock_v5hdr_verify(
	struct xfs_buf		*bp,
	uint64_t		owner)
{}

/* Verify a long-format btree block. */
xfs_failaddr_t
xfs_btree_fsblock_verify(
	struct xfs_buf		*bp,
	unsigned int		max_recs)
{}

/* Verify an in-memory btree block. */
xfs_failaddr_t
xfs_btree_memblock_verify(
	struct xfs_buf		*bp,
	unsigned int		max_recs)
{}
/**
 * xfs_btree_agblock_v5hdr_verify() -- verify the v5 fields of a short-format
 *				      btree block
 *
 * @bp: buffer containing the btree block
 */
xfs_failaddr_t
xfs_btree_agblock_v5hdr_verify(
	struct xfs_buf		*bp)
{}

/**
 * xfs_btree_agblock_verify() -- verify a short-format btree block
 *
 * @bp: buffer containing the btree block
 * @max_recs: maximum records allowed in this btree node
 */
xfs_failaddr_t
xfs_btree_agblock_verify(
	struct xfs_buf		*bp,
	unsigned int		max_recs)
{}

/*
 * For the given limits on leaf and keyptr records per block, calculate the
 * height of the tree needed to index the number of leaf records.
 */
unsigned int
xfs_btree_compute_maxlevels(
	const unsigned int	*limits,
	unsigned long long	records)
{}

/*
 * For the given limits on leaf and keyptr records per block, calculate the
 * number of blocks needed to index the given number of leaf records.
 */
unsigned long long
xfs_btree_calc_size(
	const unsigned int	*limits,
	unsigned long long	records)
{}

/*
 * Given a number of available blocks for the btree to consume with records and
 * pointers, calculate the height of the tree needed to index all the records
 * that space can hold based on the number of pointers each interior node
 * holds.
 *
 * We start by assuming a single level tree consumes a single block, then track
 * the number of blocks each node level consumes until we no longer have space
 * to store the next node level. At this point, we are indexing all the leaf
 * blocks in the space, and there's no more free space to split the tree any
 * further. That's our maximum btree height.
 */
unsigned int
xfs_btree_space_to_height(
	const unsigned int	*limits,
	unsigned long long	leaf_blocks)
{}

/*
 * Query a regular btree for all records overlapping a given interval.
 * Start with a LE lookup of the key of low_rec and return all records
 * until we find a record with a key greater than the key of high_rec.
 */
STATIC int
xfs_btree_simple_query_range(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*low_key,
	const union xfs_btree_key	*high_key,
	xfs_btree_query_range_fn	fn,
	void				*priv)
{}

/*
 * Query an overlapped interval btree for all records overlapping a given
 * interval.  This function roughly follows the algorithm given in
 * "Interval Trees" of _Introduction to Algorithms_, which is section
 * 14.3 in the 2nd and 3rd editions.
 *
 * First, generate keys for the low and high records passed in.
 *
 * For any leaf node, generate the high and low keys for the record.
 * If the record keys overlap with the query low/high keys, pass the
 * record to the function iterator.
 *
 * For any internal node, compare the low and high keys of each
 * pointer against the query low/high keys.  If there's an overlap,
 * follow the pointer.
 *
 * As an optimization, we stop scanning a block when we find a low key
 * that is greater than the query's high key.
 */
STATIC int
xfs_btree_overlapped_query_range(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*low_key,
	const union xfs_btree_key	*high_key,
	xfs_btree_query_range_fn	fn,
	void				*priv)
{}

static inline void
xfs_btree_key_from_irec(
	struct xfs_btree_cur		*cur,
	union xfs_btree_key		*key,
	const union xfs_btree_irec	*irec)
{}

/*
 * Query a btree for all records overlapping a given interval of keys.  The
 * supplied function will be called with each record found; return one of the
 * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
 * code.  This function returns -ECANCELED, zero, or a negative error code.
 */
int
xfs_btree_query_range(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_irec	*low_rec,
	const union xfs_btree_irec	*high_rec,
	xfs_btree_query_range_fn	fn,
	void				*priv)
{}

/* Query a btree for all records. */
int
xfs_btree_query_all(
	struct xfs_btree_cur		*cur,
	xfs_btree_query_range_fn	fn,
	void				*priv)
{}

static int
xfs_btree_count_blocks_helper(
	struct xfs_btree_cur	*cur,
	int			level,
	void			*data)
{}

/* Count the blocks in a btree and return the result in *blocks. */
int
xfs_btree_count_blocks(
	struct xfs_btree_cur	*cur,
	xfs_extlen_t		*blocks)
{}

/* Compare two btree pointers. */
int64_t
xfs_btree_diff_two_ptrs(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_ptr	*a,
	const union xfs_btree_ptr	*b)
{}

struct xfs_btree_has_records {};

STATIC int
xfs_btree_has_records_helper(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_rec	*rec,
	void				*priv)
{}

/*
 * Scan part of the keyspace of a btree and tell us if that keyspace does not
 * map to any records; is fully mapped to records; or is partially mapped to
 * records.  This is the btree record equivalent to determining if a file is
 * sparse.
 *
 * For most btree types, the record scan should use all available btree key
 * fields to compare the keys encountered.  These callers should pass NULL for
 * @mask.  However, some callers (e.g.  scanning physical space in the rmapbt)
 * want to ignore some part of the btree record keyspace when performing the
 * comparison.  These callers should pass in a union xfs_btree_key object with
 * the fields that *should* be a part of the comparison set to any nonzero
 * value, and the rest zeroed.
 */
int
xfs_btree_has_records(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_irec	*low,
	const union xfs_btree_irec	*high,
	const union xfs_btree_key	*mask,
	enum xbtree_recpacking		*outcome)
{}

/* Are there more records in this btree? */
bool
xfs_btree_has_more_records(
	struct xfs_btree_cur	*cur)
{}

/* Set up all the btree cursor caches. */
int __init
xfs_btree_init_cur_caches(void)
{}

/* Destroy all the btree cursor caches, if they've been allocated. */
void
xfs_btree_destroy_cur_caches(void)
{}

/* Move the btree cursor before the first record. */
int
xfs_btree_goto_left_edge(
	struct xfs_btree_cur	*cur)
{}