linux/fs/xfs/libxfs/xfs_alloc.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_rmap.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_bmap.h"
#include "xfs_health.h"
#include "xfs_extfree_item.h"

struct kmem_cache	*xfs_extfree_item_cache;

struct workqueue_struct *xfs_alloc_wq;

#define XFS_ABSDIFF(a,b)

#define XFSA_FIXUP_BNO_OK
#define XFSA_FIXUP_CNT_OK

/*
 * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
 * the beginning of the block for a proper header with the location information
 * and CRC.
 */
unsigned int
xfs_agfl_size(
	struct xfs_mount	*mp)
{}

unsigned int
xfs_refc_block(
	struct xfs_mount	*mp)
{}

xfs_extlen_t
xfs_prealloc_blocks(
	struct xfs_mount	*mp)
{}

/*
 * The number of blocks per AG that we withhold from xfs_dec_fdblocks to
 * guarantee that we can refill the AGFL prior to allocating space in a nearly
 * full AG.  Although the space described by the free space btrees, the
 * blocks used by the freesp btrees themselves, and the blocks owned by the
 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
 * free space in the AG drop so low that the free space btrees cannot refill an
 * empty AGFL up to the minimum level.  Rather than grind through empty AGs
 * until the fs goes down, we subtract this many AG blocks from the incore
 * fdblocks to ensure user allocation does not overcommit the space the
 * filesystem needs for the AGFLs.  The rmap btree uses a per-AG reservation to
 * withhold space from xfs_dec_fdblocks, so we do not account for that here.
 */
#define XFS_ALLOCBT_AGFL_RESERVE

/*
 * Compute the number of blocks that we set aside to guarantee the ability to
 * refill the AGFL and handle a full bmap btree split.
 *
 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
 * AGF buffer (PV 947395), we place constraints on the relationship among
 * actual allocations for data blocks, freelist blocks, and potential file data
 * bmap btree blocks. However, these restrictions may result in no actual space
 * allocated for a delayed extent, for example, a data block in a certain AG is
 * allocated but there is no additional block for the additional bmap btree
 * block due to a split of the bmap btree of the file. The result of this may
 * lead to an infinite loop when the file gets flushed to disk and all delayed
 * extents need to be actually allocated. To get around this, we explicitly set
 * aside a few blocks which will not be reserved in delayed allocation.
 *
 * For each AG, we need to reserve enough blocks to replenish a totally empty
 * AGFL and 4 more to handle a potential split of the file's bmap btree.
 */
unsigned int
xfs_alloc_set_aside(
	struct xfs_mount	*mp)
{}

/*
 * When deciding how much space to allocate out of an AG, we limit the
 * allocation maximum size to the size the AG. However, we cannot use all the
 * blocks in the AG - some are permanently used by metadata. These
 * blocks are generally:
 *	- the AG superblock, AGF, AGI and AGFL
 *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
 *	  the AGI free inode and rmap btree root blocks.
 *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
 *	- the rmapbt root block
 *
 * The AG headers are sector sized, so the amount of space they take up is
 * dependent on filesystem geometry. The others are all single blocks.
 */
unsigned int
xfs_alloc_ag_max_usable(
	struct xfs_mount	*mp)
{}


static int
xfs_alloc_lookup(
	struct xfs_btree_cur	*cur,
	xfs_lookup_t		dir,
	xfs_agblock_t		bno,
	xfs_extlen_t		len,
	int			*stat)
{}

/*
 * Lookup the record equal to [bno, len] in the btree given by cur.
 */
static inline int				/* error */
xfs_alloc_lookup_eq(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_agblock_t		bno,	/* starting block of extent */
	xfs_extlen_t		len,	/* length of extent */
	int			*stat)	/* success/failure */
{}

/*
 * Lookup the first record greater than or equal to [bno, len]
 * in the btree given by cur.
 */
int				/* error */
xfs_alloc_lookup_ge(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_agblock_t		bno,	/* starting block of extent */
	xfs_extlen_t		len,	/* length of extent */
	int			*stat)	/* success/failure */
{}

/*
 * Lookup the first record less than or equal to [bno, len]
 * in the btree given by cur.
 */
int					/* error */
xfs_alloc_lookup_le(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_agblock_t		bno,	/* starting block of extent */
	xfs_extlen_t		len,	/* length of extent */
	int			*stat)	/* success/failure */
{}

static inline bool
xfs_alloc_cur_active(
	struct xfs_btree_cur	*cur)
{}

/*
 * Update the record referred to by cur to the value given
 * by [bno, len].
 * This either works (return 0) or gets an EFSCORRUPTED error.
 */
STATIC int				/* error */
xfs_alloc_update(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_agblock_t		bno,	/* starting block of extent */
	xfs_extlen_t		len)	/* length of extent */
{}

/* Convert the ondisk btree record to its incore representation. */
void
xfs_alloc_btrec_to_irec(
	const union xfs_btree_rec	*rec,
	struct xfs_alloc_rec_incore	*irec)
{}

/* Simple checks for free space records. */
xfs_failaddr_t
xfs_alloc_check_irec(
	struct xfs_perag			*pag,
	const struct xfs_alloc_rec_incore	*irec)
{}

static inline int
xfs_alloc_complain_bad_rec(
	struct xfs_btree_cur		*cur,
	xfs_failaddr_t			fa,
	const struct xfs_alloc_rec_incore *irec)
{}

/*
 * Get the data from the pointed-to record.
 */
int					/* error */
xfs_alloc_get_rec(
	struct xfs_btree_cur	*cur,	/* btree cursor */
	xfs_agblock_t		*bno,	/* output: starting block of extent */
	xfs_extlen_t		*len,	/* output: length of extent */
	int			*stat)	/* output: success/failure */
{}

/*
 * Compute aligned version of the found extent.
 * Takes alignment and min length into account.
 */
STATIC bool
xfs_alloc_compute_aligned(
	xfs_alloc_arg_t	*args,		/* allocation argument structure */
	xfs_agblock_t	foundbno,	/* starting block in found extent */
	xfs_extlen_t	foundlen,	/* length in found extent */
	xfs_agblock_t	*resbno,	/* result block number */
	xfs_extlen_t	*reslen,	/* result length */
	unsigned	*busy_gen)
{}

/*
 * Compute best start block and diff for "near" allocations.
 * freelen >= wantlen already checked by caller.
 */
STATIC xfs_extlen_t			/* difference value (absolute) */
xfs_alloc_compute_diff(
	xfs_agblock_t	wantbno,	/* target starting block */
	xfs_extlen_t	wantlen,	/* target length */
	xfs_extlen_t	alignment,	/* target alignment */
	int		datatype,	/* are we allocating data? */
	xfs_agblock_t	freebno,	/* freespace's starting block */
	xfs_extlen_t	freelen,	/* freespace's length */
	xfs_agblock_t	*newbnop)	/* result: best start block from free */
{}

/*
 * Fix up the length, based on mod and prod.
 * len should be k * prod + mod for some k.
 * If len is too small it is returned unchanged.
 * If len hits maxlen it is left alone.
 */
STATIC void
xfs_alloc_fix_len(
	xfs_alloc_arg_t	*args)		/* allocation argument structure */
{}

/*
 * Determine if the cursor points to the block that contains the right-most
 * block of records in the by-count btree. This block contains the largest
 * contiguous free extent in the AG, so if we modify a record in this block we
 * need to call xfs_alloc_fixup_longest() once the modifications are done to
 * ensure the agf->agf_longest field is kept up to date with the longest free
 * extent tracked by the by-count btree.
 */
static bool
xfs_alloc_cursor_at_lastrec(
	struct xfs_btree_cur	*cnt_cur)
{}

/*
 * Find the rightmost record of the cntbt, and return the longest free space
 * recorded in it. Simply set both the block number and the length to their
 * maximum values before searching.
 */
static int
xfs_cntbt_longest(
	struct xfs_btree_cur	*cnt_cur,
	xfs_extlen_t		*longest)
{}

/*
 * Update the longest contiguous free extent in the AG from the by-count cursor
 * that is passed to us. This should be done at the end of any allocation or
 * freeing operation that touches the longest extent in the btree.
 *
 * Needing to update the longest extent can be determined by calling
 * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record
 * modification but before the modification begins.
 */
static int
xfs_alloc_fixup_longest(
	struct xfs_btree_cur	*cnt_cur)
{}

/*
 * Update the two btrees, logically removing from freespace the extent
 * starting at rbno, rlen blocks.  The extent is contained within the
 * actual (current) free extent fbno for flen blocks.
 * Flags are passed in indicating whether the cursors are set to the
 * relevant records.
 */
STATIC int				/* error code */
xfs_alloc_fixup_trees(
	struct xfs_btree_cur *cnt_cur,	/* cursor for by-size btree */
	struct xfs_btree_cur *bno_cur,	/* cursor for by-block btree */
	xfs_agblock_t	fbno,		/* starting block of free extent */
	xfs_extlen_t	flen,		/* length of free extent */
	xfs_agblock_t	rbno,		/* starting block of returned extent */
	xfs_extlen_t	rlen,		/* length of returned extent */
	int		flags)		/* flags, XFSA_FIXUP_... */
{}

/*
 * We do not verify the AGFL contents against AGF-based index counters here,
 * even though we may have access to the perag that contains shadow copies. We
 * don't know if the AGF based counters have been checked, and if they have they
 * still may be inconsistent because they haven't yet been reset on the first
 * allocation after the AGF has been read in.
 *
 * This means we can only check that all agfl entries contain valid or null
 * values because we can't reliably determine the active range to exclude
 * NULLAGBNO as a valid value.
 *
 * However, we can't even do that for v4 format filesystems because there are
 * old versions of mkfs out there that does not initialise the AGFL to known,
 * verifiable values. HEnce we can't tell the difference between a AGFL block
 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
 *
 * As a result, we can only fully validate AGFL block numbers when we pull them
 * from the freelist in xfs_alloc_get_freelist().
 */
static xfs_failaddr_t
xfs_agfl_verify(
	struct xfs_buf	*bp)
{}

static void
xfs_agfl_read_verify(
	struct xfs_buf	*bp)
{}

static void
xfs_agfl_write_verify(
	struct xfs_buf	*bp)
{}

const struct xfs_buf_ops xfs_agfl_buf_ops =;

/*
 * Read in the allocation group free block array.
 */
int
xfs_alloc_read_agfl(
	struct xfs_perag	*pag,
	struct xfs_trans	*tp,
	struct xfs_buf		**bpp)
{}

STATIC int
xfs_alloc_update_counters(
	struct xfs_trans	*tp,
	struct xfs_buf		*agbp,
	long			len)
{}

/*
 * Block allocation algorithm and data structures.
 */
struct xfs_alloc_cur {};

/*
 * Set up cursors, etc. in the extent allocation cursor. This function can be
 * called multiple times to reset an initialized structure without having to
 * reallocate cursors.
 */
static int
xfs_alloc_cur_setup(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur)
{}

static void
xfs_alloc_cur_close(
	struct xfs_alloc_cur	*acur,
	bool			error)
{}

/*
 * Check an extent for allocation and track the best available candidate in the
 * allocation structure. The cursor is deactivated if it has entered an out of
 * range state based on allocation arguments. Optionally return the extent
 * extent geometry and allocation status if requested by the caller.
 */
static int
xfs_alloc_cur_check(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur,
	struct xfs_btree_cur	*cur,
	int			*new)
{}

/*
 * Complete an allocation of a candidate extent. Remove the extent from both
 * trees and update the args structure.
 */
STATIC int
xfs_alloc_cur_finish(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur)
{}

/*
 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
 * bno optimized lookup to search for extents with ideal size and locality.
 */
STATIC int
xfs_alloc_cntbt_iter(
	struct xfs_alloc_arg		*args,
	struct xfs_alloc_cur		*acur)
{}

/*
 * Deal with the case where only small freespaces remain. Either return the
 * contents of the last freespace record, or allocate space from the freelist if
 * there is nothing in the tree.
 */
STATIC int			/* error */
xfs_alloc_ag_vextent_small(
	struct xfs_alloc_arg	*args,	/* allocation argument structure */
	struct xfs_btree_cur	*ccur,	/* optional by-size cursor */
	xfs_agblock_t		*fbnop,	/* result block number */
	xfs_extlen_t		*flenp,	/* result length */
	int			*stat)	/* status: 0-freelist, 1-normal/none */
{}

/*
 * Allocate a variable extent at exactly agno/bno.
 * Extent's length (returned in *len) will be between minlen and maxlen,
 * and of the form k * prod + mod unless there's nothing that large.
 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
 */
STATIC int			/* error */
xfs_alloc_ag_vextent_exact(
	xfs_alloc_arg_t	*args)	/* allocation argument structure */
{}

/*
 * Search a given number of btree records in a given direction. Check each
 * record against the good extent we've already found.
 */
STATIC int
xfs_alloc_walk_iter(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur,
	struct xfs_btree_cur	*cur,
	bool			increment,
	bool			find_one, /* quit on first candidate */
	int			count,    /* rec count (-1 for infinite) */
	int			*stat)
{}

/*
 * Search the by-bno and by-size btrees in parallel in search of an extent with
 * ideal locality based on the NEAR mode ->agbno locality hint.
 */
STATIC int
xfs_alloc_ag_vextent_locality(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur,
	int			*stat)
{}

/* Check the last block of the cnt btree for allocations. */
static int
xfs_alloc_ag_vextent_lastblock(
	struct xfs_alloc_arg	*args,
	struct xfs_alloc_cur	*acur,
	xfs_agblock_t		*bno,
	xfs_extlen_t		*len,
	bool			*allocated)
{}

/*
 * Allocate a variable extent near bno in the allocation group agno.
 * Extent's length (returned in len) will be between minlen and maxlen,
 * and of the form k * prod + mod unless there's nothing that large.
 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
 */
STATIC int
xfs_alloc_ag_vextent_near(
	struct xfs_alloc_arg	*args,
	uint32_t		alloc_flags)
{}

/*
 * Allocate a variable extent anywhere in the allocation group agno.
 * Extent's length (returned in len) will be between minlen and maxlen,
 * and of the form k * prod + mod unless there's nothing that large.
 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
 */
static int
xfs_alloc_ag_vextent_size(
	struct xfs_alloc_arg	*args,
	uint32_t		alloc_flags)
{}

/*
 * Free the extent starting at agno/bno for length.
 */
int
xfs_free_ag_extent(
	struct xfs_trans		*tp,
	struct xfs_buf			*agbp,
	xfs_agnumber_t			agno,
	xfs_agblock_t			bno,
	xfs_extlen_t			len,
	const struct xfs_owner_info	*oinfo,
	enum xfs_ag_resv_type		type)
{}

/*
 * Visible (exported) allocation/free functions.
 * Some of these are used just by xfs_alloc_btree.c and this file.
 */

/*
 * Compute and fill in value of m_alloc_maxlevels.
 */
void
xfs_alloc_compute_maxlevels(
	xfs_mount_t	*mp)	/* file system mount structure */
{}

/*
 * Find the length of the longest extent in an AG.  The 'need' parameter
 * specifies how much space we're going to need for the AGFL and the
 * 'reserved' parameter tells us how many blocks in this AG are reserved for
 * other callers.
 */
xfs_extlen_t
xfs_alloc_longest_free_extent(
	struct xfs_perag	*pag,
	xfs_extlen_t		need,
	xfs_extlen_t		reserved)
{}

/*
 * Compute the minimum length of the AGFL in the given AG.  If @pag is NULL,
 * return the largest possible minimum length.
 */
unsigned int
xfs_alloc_min_freelist(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag)
{}

/*
 * Check if the operation we are fixing up the freelist for should go ahead or
 * not. If we are freeing blocks, we always allow it, otherwise the allocation
 * is dependent on whether the size and shape of free space available will
 * permit the requested allocation to take place.
 */
static bool
xfs_alloc_space_available(
	struct xfs_alloc_arg	*args,
	xfs_extlen_t		min_free,
	int			flags)
{}

/*
 * Check the agfl fields of the agf for inconsistency or corruption.
 *
 * The original purpose was to detect an agfl header padding mismatch between
 * current and early v5 kernels. This problem manifests as a 1-slot size
 * difference between the on-disk flcount and the active [first, last] range of
 * a wrapped agfl.
 *
 * However, we need to use these same checks to catch agfl count corruptions
 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
 * way, we need to reset the agfl and warn the user.
 *
 * Return true if a reset is required before the agfl can be used, false
 * otherwise.
 */
static bool
xfs_agfl_needs_reset(
	struct xfs_mount	*mp,
	struct xfs_agf		*agf)
{}

/*
 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
 * agfl content cannot be trusted. Warn the user that a repair is required to
 * recover leaked blocks.
 *
 * The purpose of this mechanism is to handle filesystems affected by the agfl
 * header padding mismatch problem. A reset keeps the filesystem online with a
 * relatively minor free space accounting inconsistency rather than suffer the
 * inevitable crash from use of an invalid agfl block.
 */
static void
xfs_agfl_reset(
	struct xfs_trans	*tp,
	struct xfs_buf		*agbp,
	struct xfs_perag	*pag)
{}

/*
 * Add the extent to the list of extents to be free at transaction end.
 * The list is maintained sorted (by block number).
 */
static int
xfs_defer_extent_free(
	struct xfs_trans		*tp,
	xfs_fsblock_t			bno,
	xfs_filblks_t			len,
	const struct xfs_owner_info	*oinfo,
	enum xfs_ag_resv_type		type,
	unsigned int			free_flags,
	struct xfs_defer_pending	**dfpp)
{}

int
xfs_free_extent_later(
	struct xfs_trans		*tp,
	xfs_fsblock_t			bno,
	xfs_filblks_t			len,
	const struct xfs_owner_info	*oinfo,
	enum xfs_ag_resv_type		type,
	unsigned int			free_flags)
{}

/*
 * Set up automatic freeing of unwritten space in the filesystem.
 *
 * This function attached a paused deferred extent free item to the
 * transaction.  Pausing means that the EFI will be logged in the next
 * transaction commit, but the pending EFI will not be finished until the
 * pending item is unpaused.
 *
 * If the system goes down after the EFI has been persisted to the log but
 * before the pending item is unpaused, log recovery will find the EFI, fail to
 * find the EFD, and free the space.
 *
 * If the pending item is unpaused, the next transaction commit will log an EFD
 * without freeing the space.
 *
 * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
 * @args structure are set to the relevant values.
 */
int
xfs_alloc_schedule_autoreap(
	const struct xfs_alloc_arg	*args,
	unsigned int			free_flags,
	struct xfs_alloc_autoreap	*aarp)
{}

/*
 * Cancel automatic freeing of unwritten space in the filesystem.
 *
 * Earlier, we created a paused deferred extent free item and attached it to
 * this transaction so that we could automatically roll back a new space
 * allocation if the system went down.  Now we want to cancel the paused work
 * item by marking the EFI stale so we don't actually free the space, unpausing
 * the pending item and logging an EFD.
 *
 * The caller generally should have already mapped the space into the ondisk
 * filesystem.  If the reserved space was partially used, the caller must call
 * xfs_free_extent_later to create a new EFI to free the unused space.
 */
void
xfs_alloc_cancel_autoreap(
	struct xfs_trans		*tp,
	struct xfs_alloc_autoreap	*aarp)
{}

/*
 * Commit automatic freeing of unwritten space in the filesystem.
 *
 * This unpauses an earlier _schedule_autoreap and commits to freeing the
 * allocated space.  Call this if none of the reserved space was used.
 */
void
xfs_alloc_commit_autoreap(
	struct xfs_trans		*tp,
	struct xfs_alloc_autoreap	*aarp)
{}

#ifdef DEBUG
/*
 * Check if an AGF has a free extent record whose length is equal to
 * args->minlen.
 */
STATIC int
xfs_exact_minlen_extent_available(
	struct xfs_alloc_arg	*args,
	struct xfs_buf		*agbp,
	int			*stat)
{}
#endif

/*
 * Decide whether to use this allocation group for this allocation.
 * If so, fix up the btree freelist's size.
 */
int			/* error */
xfs_alloc_fix_freelist(
	struct xfs_alloc_arg	*args,	/* allocation argument structure */
	uint32_t		alloc_flags)
{}

/*
 * Get a block from the freelist.
 * Returns with the buffer for the block gotten.
 */
int
xfs_alloc_get_freelist(
	struct xfs_perag	*pag,
	struct xfs_trans	*tp,
	struct xfs_buf		*agbp,
	xfs_agblock_t		*bnop,
	int			btreeblk)
{}

/*
 * Log the given fields from the agf structure.
 */
void
xfs_alloc_log_agf(
	struct xfs_trans	*tp,
	struct xfs_buf		*bp,
	uint32_t		fields)
{}

/*
 * Put the block on the freelist for the allocation group.
 */
int
xfs_alloc_put_freelist(
	struct xfs_perag	*pag,
	struct xfs_trans	*tp,
	struct xfs_buf		*agbp,
	struct xfs_buf		*agflbp,
	xfs_agblock_t		bno,
	int			btreeblk)
{}

/*
 * Check that this AGF/AGI header's sequence number and length matches the AG
 * number and size in fsblocks.
 */
xfs_failaddr_t
xfs_validate_ag_length(
	struct xfs_buf		*bp,
	uint32_t		seqno,
	uint32_t		length)
{}

/*
 * Verify the AGF is consistent.
 *
 * We do not verify the AGFL indexes in the AGF are fully consistent here
 * because of issues with variable on-disk structure sizes. Instead, we check
 * the agfl indexes for consistency when we initialise the perag from the AGF
 * information after a read completes.
 *
 * If the index is inconsistent, then we mark the perag as needing an AGFL
 * reset. The first AGFL update performed then resets the AGFL indexes and
 * refills the AGFL with known good free blocks, allowing the filesystem to
 * continue operating normally at the cost of a few leaked free space blocks.
 */
static xfs_failaddr_t
xfs_agf_verify(
	struct xfs_buf		*bp)
{}

static void
xfs_agf_read_verify(
	struct xfs_buf	*bp)
{}

static void
xfs_agf_write_verify(
	struct xfs_buf	*bp)
{}

const struct xfs_buf_ops xfs_agf_buf_ops =;

/*
 * Read in the allocation group header (free/alloc section).
 */
int
xfs_read_agf(
	struct xfs_perag	*pag,
	struct xfs_trans	*tp,
	int			flags,
	struct xfs_buf		**agfbpp)
{}

/*
 * Read in the allocation group header (free/alloc section) and initialise the
 * perag structure if necessary. If the caller provides @agfbpp, then return the
 * locked buffer to the caller, otherwise free it.
 */
int
xfs_alloc_read_agf(
	struct xfs_perag	*pag,
	struct xfs_trans	*tp,
	int			flags,
	struct xfs_buf		**agfbpp)
{}

/*
 * Pre-proces allocation arguments to set initial state that we don't require
 * callers to set up correctly, as well as bounds check the allocation args
 * that are set up.
 */
static int
xfs_alloc_vextent_check_args(
	struct xfs_alloc_arg	*args,
	xfs_fsblock_t		target,
	xfs_agnumber_t		*minimum_agno)
{}

/*
 * Prepare an AG for allocation. If the AG is not prepared to accept the
 * allocation, return failure.
 *
 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
 * modified to hold their own perag references.
 */
static int
xfs_alloc_vextent_prepare_ag(
	struct xfs_alloc_arg	*args,
	uint32_t		alloc_flags)
{}

/*
 * Post-process allocation results to account for the allocation if it succeed
 * and set the allocated block number correctly for the caller.
 *
 * XXX: we should really be returning ENOSPC for ENOSPC, not
 * hiding it behind a "successful" NULLFSBLOCK allocation.
 */
static int
xfs_alloc_vextent_finish(
	struct xfs_alloc_arg	*args,
	xfs_agnumber_t		minimum_agno,
	int			alloc_error,
	bool			drop_perag)
{}

/*
 * Allocate within a single AG only. This uses a best-fit length algorithm so if
 * you need an exact sized allocation without locality constraints, this is the
 * fastest way to do it.
 *
 * Caller is expected to hold a perag reference in args->pag.
 */
int
xfs_alloc_vextent_this_ag(
	struct xfs_alloc_arg	*args,
	xfs_agnumber_t		agno)
{}

/*
 * Iterate all AGs trying to allocate an extent starting from @start_ag.
 *
 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
 * allocation attempts in @start_agno have locality information. If we fail to
 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
 * we attempt to allocation in as there is no locality optimisation possible for
 * those allocations.
 *
 * On return, args->pag may be left referenced if we finish before the "all
 * failed" return point. The allocation finish still needs the perag, and
 * so the caller will release it once they've finished the allocation.
 *
 * When we wrap the AG iteration at the end of the filesystem, we have to be
 * careful not to wrap into AGs below ones we already have locked in the
 * transaction if we are doing a blocking iteration. This will result in an
 * out-of-order locking of AGFs and hence can cause deadlocks.
 */
static int
xfs_alloc_vextent_iterate_ags(
	struct xfs_alloc_arg	*args,
	xfs_agnumber_t		minimum_agno,
	xfs_agnumber_t		start_agno,
	xfs_agblock_t		target_agbno,
	uint32_t		alloc_flags)
{}

/*
 * Iterate from the AGs from the start AG to the end of the filesystem, trying
 * to allocate blocks. It starts with a near allocation attempt in the initial
 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
 * back to zero if allowed by previous allocations in this transaction,
 * otherwise will wrap back to the start AG and run a second blocking pass to
 * the end of the filesystem.
 */
int
xfs_alloc_vextent_start_ag(
	struct xfs_alloc_arg	*args,
	xfs_fsblock_t		target)
{}

/*
 * Iterate from the agno indicated via @target through to the end of the
 * filesystem attempting blocking allocation. This does not wrap or try a second
 * pass, so will not recurse into AGs lower than indicated by the target.
 */
int
xfs_alloc_vextent_first_ag(
	struct xfs_alloc_arg	*args,
	xfs_fsblock_t		target)
 {}

/*
 * Allocate at the exact block target or fail. Caller is expected to hold a
 * perag reference in args->pag.
 */
int
xfs_alloc_vextent_exact_bno(
	struct xfs_alloc_arg	*args,
	xfs_fsblock_t		target)
{}

/*
 * Allocate an extent as close to the target as possible. If there are not
 * viable candidates in the AG, then fail the allocation.
 *
 * Caller may or may not have a per-ag reference in args->pag.
 */
int
xfs_alloc_vextent_near_bno(
	struct xfs_alloc_arg	*args,
	xfs_fsblock_t		target)
{}

/* Ensure that the freelist is at full capacity. */
int
xfs_free_extent_fix_freelist(
	struct xfs_trans	*tp,
	struct xfs_perag	*pag,
	struct xfs_buf		**agbp)
{}

/*
 * Free an extent.
 * Just break up the extent address and hand off to xfs_free_ag_extent
 * after fixing up the freelist.
 */
int
__xfs_free_extent(
	struct xfs_trans		*tp,
	struct xfs_perag		*pag,
	xfs_agblock_t			agbno,
	xfs_extlen_t			len,
	const struct xfs_owner_info	*oinfo,
	enum xfs_ag_resv_type		type,
	bool				skip_discard)
{}

struct xfs_alloc_query_range_info {};

/* Format btree record and pass to our callback. */
STATIC int
xfs_alloc_query_range_helper(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_rec	*rec,
	void				*priv)
{}

/* Find all free space within a given range of blocks. */
int
xfs_alloc_query_range(
	struct xfs_btree_cur			*cur,
	const struct xfs_alloc_rec_incore	*low_rec,
	const struct xfs_alloc_rec_incore	*high_rec,
	xfs_alloc_query_range_fn		fn,
	void					*priv)
{}

/* Find all free space records. */
int
xfs_alloc_query_all(
	struct xfs_btree_cur			*cur,
	xfs_alloc_query_range_fn		fn,
	void					*priv)
{}

/*
 * Scan part of the keyspace of the free space and tell us if the area has no
 * records, is fully mapped by records, or is partially filled.
 */
int
xfs_alloc_has_records(
	struct xfs_btree_cur	*cur,
	xfs_agblock_t		bno,
	xfs_extlen_t		len,
	enum xbtree_recpacking	*outcome)
{}

/*
 * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
 * error code or XFS_ITER_*.
 */
int
xfs_agfl_walk(
	struct xfs_mount	*mp,
	struct xfs_agf		*agf,
	struct xfs_buf		*agflbp,
	xfs_agfl_walk_fn	walk_fn,
	void			*priv)
{}

int __init
xfs_extfree_intent_init_cache(void)
{}

void
xfs_extfree_intent_destroy_cache(void)
{}