linux/fs/xfs/scrub/bitmap.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <[email protected]>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_bit.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "scrub/scrub.h"
#include "scrub/bitmap.h"

#include <linux/interval_tree_generic.h>

/* u64 bitmap */

struct xbitmap64_node {};

/* Define our own interval tree type with uint64_t parameters. */

#define START(node)
#define LAST(node)

/*
 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
 * forward-declare them anyway for clarity.
 */
static inline __maybe_unused void
xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);

static inline __maybe_unused void
xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);

static inline __maybe_unused struct xbitmap64_node *
xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
			uint64_t last);

static inline __maybe_unused struct xbitmap64_node *
xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
		       uint64_t last);

INTERVAL_TREE_DEFINE()

/* Iterate each interval of a bitmap.  Do not change the bitmap. */
#define for_each_xbitmap64_extent(bn, bitmap)

/* Clear a range of this bitmap. */
int
xbitmap64_clear(
	struct xbitmap64	*bitmap,
	uint64_t		start,
	uint64_t		len)
{}

/* Set a range of this bitmap. */
int
xbitmap64_set(
	struct xbitmap64	*bitmap,
	uint64_t		start,
	uint64_t		len)
{}

/* Free everything related to this bitmap. */
void
xbitmap64_destroy(
	struct xbitmap64	*bitmap)
{}

/* Set up a per-AG block bitmap. */
void
xbitmap64_init(
	struct xbitmap64	*bitmap)
{}

/*
 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
 *
 * The intent is that callers will iterate the rmapbt for all of its records
 * for a given owner to generate @bitmap; and iterate all the blocks of the
 * metadata structures that are not being rebuilt and have the same rmapbt
 * owner to generate @sub.  This routine subtracts all the extents
 * mentioned in sub from all the extents linked in @bitmap, which leaves
 * @bitmap as the list of blocks that are not accounted for, which we assume
 * are the dead blocks of the old metadata structure.  The blocks mentioned in
 * @bitmap can be reaped.
 *
 * This is the logical equivalent of bitmap &= ~sub.
 */
int
xbitmap64_disunion(
	struct xbitmap64	*bitmap,
	struct xbitmap64	*sub)
{}

/* How many bits are set in this bitmap? */
uint64_t
xbitmap64_hweight(
	struct xbitmap64	*bitmap)
{}

/* Call a function for every run of set bits in this bitmap. */
int
xbitmap64_walk(
	struct xbitmap64	*bitmap,
	xbitmap64_walk_fn		fn,
	void			*priv)
{}

/* Does this bitmap have no bits set at all? */
bool
xbitmap64_empty(
	struct xbitmap64	*bitmap)
{}

/* Is the start of the range set or clear?  And for how long? */
bool
xbitmap64_test(
	struct xbitmap64	*bitmap,
	uint64_t		start,
	uint64_t		*len)
{}

/* u32 bitmap */

struct xbitmap32_node {};

/* Define our own interval tree type with uint32_t parameters. */

/*
 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
 * forward-declare them anyway for clarity.
 */
static inline __maybe_unused void
xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);

static inline __maybe_unused void
xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);

static inline __maybe_unused struct xbitmap32_node *
xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
			  uint32_t last);

static inline __maybe_unused struct xbitmap32_node *
xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
			 uint32_t last);

INTERVAL_TREE_DEFINE()

/* Iterate each interval of a bitmap.  Do not change the bitmap. */
#define for_each_xbitmap32_extent(bn, bitmap)

/* Clear a range of this bitmap. */
int
xbitmap32_clear(
	struct xbitmap32	*bitmap,
	uint32_t		start,
	uint32_t		len)
{}

/* Set a range of this bitmap. */
int
xbitmap32_set(
	struct xbitmap32	*bitmap,
	uint32_t		start,
	uint32_t		len)
{}

/* Free everything related to this bitmap. */
void
xbitmap32_destroy(
	struct xbitmap32	*bitmap)
{}

/* Set up a per-AG block bitmap. */
void
xbitmap32_init(
	struct xbitmap32	*bitmap)
{}

/*
 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
 *
 * The intent is that callers will iterate the rmapbt for all of its records
 * for a given owner to generate @bitmap; and iterate all the blocks of the
 * metadata structures that are not being rebuilt and have the same rmapbt
 * owner to generate @sub.  This routine subtracts all the extents
 * mentioned in sub from all the extents linked in @bitmap, which leaves
 * @bitmap as the list of blocks that are not accounted for, which we assume
 * are the dead blocks of the old metadata structure.  The blocks mentioned in
 * @bitmap can be reaped.
 *
 * This is the logical equivalent of bitmap &= ~sub.
 */
int
xbitmap32_disunion(
	struct xbitmap32	*bitmap,
	struct xbitmap32	*sub)
{}

/* How many bits are set in this bitmap? */
uint32_t
xbitmap32_hweight(
	struct xbitmap32	*bitmap)
{}

/* Call a function for every run of set bits in this bitmap. */
int
xbitmap32_walk(
	struct xbitmap32	*bitmap,
	xbitmap32_walk_fn	fn,
	void			*priv)
{}

/* Does this bitmap have no bits set at all? */
bool
xbitmap32_empty(
	struct xbitmap32	*bitmap)
{}

/* Is the start of the range set or clear?  And for how long? */
bool
xbitmap32_test(
	struct xbitmap32	*bitmap,
	uint32_t		start,
	uint32_t		*len)
{}

/* Count the number of set regions in this bitmap. */
uint32_t
xbitmap32_count_set_regions(
	struct xbitmap32	*bitmap)
{}