linux/fs/xfs/scrub/scrub.h

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <[email protected]>
 */
#ifndef __XFS_SCRUB_SCRUB_H__
#define __XFS_SCRUB_SCRUB_H__

struct xfs_scrub;

struct xchk_relax {};

/* Yield to the scheduler at most 10x per second. */
#define XCHK_RELAX_NEXT

#define INIT_XCHK_RELAX

/*
 * Relax during a scrub operation and exit if there's a fatal signal pending.
 *
 * If preemption is disabled, we need to yield to the scheduler every now and
 * then so that we don't run afoul of the soft lockup watchdog or RCU stall
 * detector.  cond_resched calls are somewhat expensive (~5ns) so we want to
 * ratelimit this to 10x per second.  Amortize the cost of the other checks by
 * only doing it once every 100 calls.
 */
static inline int xchk_maybe_relax(struct xchk_relax *widget)
{}

/*
 * Standard flags for allocating memory within scrub.  NOFS context is
 * configured by the process allocation scope.  Scrub and repair must be able
 * to back out gracefully if there isn't enough memory.  Force-cast to avoid
 * complaints from static checkers.
 */
#define XCHK_GFP_FLAGS

/*
 * For opening files by handle for fsck operations, we don't trust the inumber
 * or the allocation state; therefore, perform an untrusted lookup.  We don't
 * want these inodes to pollute the cache, so mark them for immediate removal.
 */
#define XCHK_IGET_FLAGS

/* Type info and names for the scrub types. */
enum xchk_type {};

struct xchk_meta_ops {};

/* Buffer pointers and btree cursors for an entire AG. */
struct xchk_ag {};

struct xfs_scrub {};

/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
#define XCHK_TRY_HARDER
#define XCHK_HAVE_FREEZE_PROT
#define XCHK_FSGATES_DRAIN
#define XCHK_NEED_DRAIN
#define XCHK_FSGATES_QUOTA
#define XCHK_FSGATES_DIRENTS
#define XCHK_FSGATES_RMAP
#define XREP_RESET_PERAG_RESV
#define XREP_ALREADY_FIXED

/*
 * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
 * are only enabled for this particular online fsck.  When not in use, the
 * features are gated off via dynamic code patching, which is why the state
 * must be enabled during scrub setup and can only be torn down afterwards.
 */
#define XCHK_FSGATES_ALL

struct xfs_scrub_subord {};

struct xfs_scrub_subord *xchk_scrub_create_subord(struct xfs_scrub *sc,
		unsigned int subtype);
void xchk_scrub_free_subord(struct xfs_scrub_subord *sub);

/*
 * We /could/ terminate a scrub/repair operation early.  If we're not
 * in a good place to continue (fatal signal, etc.) then bail out.
 * Note that we're careful not to make any judgements about *error.
 */
static inline bool
xchk_should_terminate(
	struct xfs_scrub	*sc,
	int			*error)
{}

static inline int xchk_nothing(struct xfs_scrub *sc)
{}

/* Metadata scrubbers */
int xchk_tester(struct xfs_scrub *sc);
int xchk_superblock(struct xfs_scrub *sc);
int xchk_agf(struct xfs_scrub *sc);
int xchk_agfl(struct xfs_scrub *sc);
int xchk_agi(struct xfs_scrub *sc);
int xchk_allocbt(struct xfs_scrub *sc);
int xchk_iallocbt(struct xfs_scrub *sc);
int xchk_rmapbt(struct xfs_scrub *sc);
int xchk_refcountbt(struct xfs_scrub *sc);
int xchk_inode(struct xfs_scrub *sc);
int xchk_bmap_data(struct xfs_scrub *sc);
int xchk_bmap_attr(struct xfs_scrub *sc);
int xchk_bmap_cow(struct xfs_scrub *sc);
int xchk_directory(struct xfs_scrub *sc);
int xchk_xattr(struct xfs_scrub *sc);
int xchk_symlink(struct xfs_scrub *sc);
int xchk_parent(struct xfs_scrub *sc);
int xchk_dirtree(struct xfs_scrub *sc);
#ifdef CONFIG_XFS_RT
int xchk_rtbitmap(struct xfs_scrub *sc);
int xchk_rtsummary(struct xfs_scrub *sc);
#else
#define xchk_rtbitmap
#define xchk_rtsummary
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_quota(struct xfs_scrub *sc);
int xchk_quotacheck(struct xfs_scrub *sc);
#else
#define xchk_quota
#define xchk_quotacheck
#endif
int xchk_fscounters(struct xfs_scrub *sc);
int xchk_nlinks(struct xfs_scrub *sc);

/* cross-referencing helpers */
void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len);
void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len);
void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len);
void xchk_xref_is_only_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
		xfs_extlen_t len);
void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
		xfs_extlen_t len);
void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
		xfs_extlen_t len);
void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
		xfs_extlen_t len);
#ifdef CONFIG_XFS_RT
void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
		xfs_extlen_t len);
#else
#define xchk_xref_is_used_rt_space
#endif

#endif	/* __XFS_SCRUB_SCRUB_H__ */