linux/fs/xfs/libxfs/xfs_exchmaps.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (c) 2020-2024 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <[email protected]>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_icache.h"
#include "xfs_quota.h"
#include "xfs_exchmaps.h"
#include "xfs_trace.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
#include "xfs_error.h"
#include "xfs_errortag.h"
#include "xfs_health.h"
#include "xfs_exchmaps_item.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr_leaf.h"
#include "xfs_attr.h"
#include "xfs_dir2_priv.h"
#include "xfs_dir2.h"
#include "xfs_symlink_remote.h"

struct kmem_cache	*xfs_exchmaps_intent_cache;

/* bmbt mappings adjacent to a pair of records. */
struct xfs_exchmaps_adjacent {};

#define ADJACENT_INIT

/* Information to reset reflink flag / CoW fork state after an exchange. */

/*
 * If the reflink flag is set on either inode, make sure it has an incore CoW
 * fork, since all reflink inodes must have them.  If there's a CoW fork and it
 * has mappings in it, make sure the inodes are tagged appropriately so that
 * speculative preallocations can be GC'd if we run low of space.
 */
static inline void
xfs_exchmaps_ensure_cowfork(
	struct xfs_inode	*ip)
{}

/*
 * Adjust the on-disk inode size upwards if needed so that we never add
 * mappings into the file past EOF.  This is crucial so that log recovery won't
 * get confused by the sudden appearance of post-eof mappings.
 */
STATIC void
xfs_exchmaps_update_size(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	xfs_fsize_t		new_isize)
{}

/* Advance the incore state tracking after exchanging a mapping. */
static inline void
xmi_advance(
	struct xfs_exchmaps_intent	*xmi,
	const struct xfs_bmbt_irec	*irec)
{}

/* Do we still have more mappings to exchange? */
static inline bool
xmi_has_more_exchange_work(const struct xfs_exchmaps_intent *xmi)
{}

/* Do we have post-operation cleanups to perform? */
static inline bool
xmi_has_postop_work(const struct xfs_exchmaps_intent *xmi)
{}

/* Check all mappings to make sure we can actually exchange them. */
int
xfs_exchmaps_check_forks(
	struct xfs_mount		*mp,
	const struct xfs_exchmaps_req	*req)
{}

#ifdef CONFIG_XFS_QUOTA
/* Log the actual updates to the quota accounting. */
static inline void
xfs_exchmaps_update_quota(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi,
	struct xfs_bmbt_irec		*irec1,
	struct xfs_bmbt_irec		*irec2)
{}
#else
#define xfs_exchmaps_update_quota
#endif

/* Decide if we want to skip this mapping from file1. */
static inline bool
xfs_exchmaps_can_skip_mapping(
	struct xfs_exchmaps_intent	*xmi,
	struct xfs_bmbt_irec		*irec)
{}

/*
 * Walk forward through the file ranges in @xmi until we find two different
 * mappings to exchange.  If there is work to do, return the mappings;
 * otherwise we've reached the end of the range and xmi_blockcount will be
 * zero.
 *
 * If the walk skips over a pair of mappings to the same storage, save them as
 * the left records in @adj (if provided) so that the simulation phase can
 * avoid an extra lookup.
  */
static int
xfs_exchmaps_find_mappings(
	struct xfs_exchmaps_intent	*xmi,
	struct xfs_bmbt_irec		*irec1,
	struct xfs_bmbt_irec		*irec2,
	struct xfs_exchmaps_adjacent	*adj)
{}

/* Exchange these two mappings. */
static void
xfs_exchmaps_one_step(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi,
	struct xfs_bmbt_irec		*irec1,
	struct xfs_bmbt_irec		*irec2)
{}

/* Convert inode2's leaf attr fork back to shortform, if possible.. */
STATIC int
xfs_exchmaps_attr_to_sf(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi)
{}

/* Convert inode2's block dir fork back to shortform, if possible.. */
STATIC int
xfs_exchmaps_dir_to_sf(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi)
{}

/* Convert inode2's remote symlink target back to shortform, if possible. */
STATIC int
xfs_exchmaps_link_to_sf(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi)
{}

/* Clear the reflink flag after an exchange. */
static inline void
xfs_exchmaps_clear_reflink(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
{}

/* Finish whatever work might come after an exchange operation. */
static int
xfs_exchmaps_do_postop_work(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi)
{}

/* Finish one step in a mapping exchange operation, possibly relogging. */
int
xfs_exchmaps_finish_one(
	struct xfs_trans		*tp,
	struct xfs_exchmaps_intent	*xmi)
{}

/*
 * Compute the amount of bmbt blocks we should reserve for each file.  In the
 * worst case, each exchange will fill a hole with a new mapping, which could
 * result in a btree split every time we add a new leaf block.
 */
static inline uint64_t
xfs_exchmaps_bmbt_blocks(
	struct xfs_mount		*mp,
	const struct xfs_exchmaps_req	*req)
{}

/* Compute the space we should reserve for the rmap btree expansions. */
static inline uint64_t
xfs_exchmaps_rmapbt_blocks(
	struct xfs_mount		*mp,
	const struct xfs_exchmaps_req	*req)
{}

/* Estimate the bmbt and rmapbt overhead required to exchange mappings. */
int
xfs_exchmaps_estimate_overhead(
	struct xfs_exchmaps_req		*req)
{}

/* Decide if we can merge two real mappings. */
static inline bool
xmi_can_merge(
	const struct xfs_bmbt_irec	*b1,
	const struct xfs_bmbt_irec	*b2)
{}

/*
 * Decide if we can merge three mappings.  Caller must ensure all three
 * mappings must not be holes or delalloc reservations.
 */
static inline bool
xmi_can_merge_all(
	const struct xfs_bmbt_irec	*l,
	const struct xfs_bmbt_irec	*m,
	const struct xfs_bmbt_irec	*r)
{}

#define CLEFT_CONTIG
#define CRIGHT_CONTIG
#define CHOLE
#define CBOTH_CONTIG

#define NLEFT_CONTIG
#define NRIGHT_CONTIG
#define NHOLE
#define NBOTH_CONTIG

/* Estimate the effect of a single exchange on mapping count. */
static inline int
xmi_delta_nextents_step(
	struct xfs_mount		*mp,
	const struct xfs_bmbt_irec	*left,
	const struct xfs_bmbt_irec	*curr,
	const struct xfs_bmbt_irec	*new,
	const struct xfs_bmbt_irec	*right)
{}

/* Make sure we don't overflow the extent (mapping) counters. */
static inline int
xmi_ensure_delta_nextents(
	struct xfs_exchmaps_req	*req,
	struct xfs_inode	*ip,
	int64_t			delta)
{}

/* Find the next mapping after irec. */
static inline int
xmi_next(
	struct xfs_inode		*ip,
	int				bmap_flags,
	const struct xfs_bmbt_irec	*irec,
	struct xfs_bmbt_irec		*nrec)
{}

int __init
xfs_exchmaps_intent_init_cache(void)
{}

void
xfs_exchmaps_intent_destroy_cache(void)
{}

/*
 * Decide if we will exchange the reflink flags between the two files after the
 * exchange.  The only time we want to do this is if we're exchanging all
 * mappings under EOF and the inode reflink flags have different states.
 */
static inline bool
xmi_can_exchange_reflink_flags(
	const struct xfs_exchmaps_req	*req,
	unsigned int			reflink_state)
{}


/* Allocate and initialize a new incore intent item from a request. */
struct xfs_exchmaps_intent *
xfs_exchmaps_init_intent(
	const struct xfs_exchmaps_req	*req)
{}

/*
 * Estimate the number of exchange operations and the number of file blocks
 * in each file that will be affected by the exchange operation.
 */
int
xfs_exchmaps_estimate(
	struct xfs_exchmaps_req		*req)
{}

/* Set the reflink flag before an operation. */
static inline void
xfs_exchmaps_set_reflink(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
{}

/*
 * If either file has shared blocks and we're exchanging data forks, we must
 * flag the other file as having shared blocks so that we get the shared-block
 * rmap functions if we need to fix up the rmaps.
 */
void
xfs_exchmaps_ensure_reflink(
	struct xfs_trans			*tp,
	const struct xfs_exchmaps_intent	*xmi)
{}

/* Set the large extent count flag before an operation if needed. */
static inline void
xfs_exchmaps_ensure_large_extent_counts(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
{}

/* Widen the extent counter fields of both inodes if necessary. */
void
xfs_exchmaps_upgrade_extent_counts(
	struct xfs_trans			*tp,
	const struct xfs_exchmaps_intent	*xmi)
{}

/*
 * Schedule an exchange a range of mappings from one inode to another.
 *
 * The use of file mapping exchange log intent items ensures the operation can
 * be resumed even if the system goes down.  The caller must commit the
 * transaction to start the work.
 *
 * The caller must ensure the inodes must be joined to the transaction and
 * ILOCKd; they will still be joined to the transaction at exit.
 */
void
xfs_exchange_mappings(
	struct xfs_trans		*tp,
	const struct xfs_exchmaps_req	*req)
{}