linux/fs/btrfs/fs.h

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef BTRFS_FS_H
#define BTRFS_FS_H

#include <linux/blkdev.h>
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/compiler.h>
#include <linux/math.h>
#include <linux/atomic.h>
#include <linux/percpu_counter.h>
#include <linux/completion.h>
#include <linux/lockdep.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rwlock_types.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/wait_bit.h>
#include <linux/sched.h>
#include <linux/rbtree.h>
#include <uapi/linux/btrfs.h>
#include <uapi/linux/btrfs_tree.h>
#include "extent-io-tree.h"
#include "async-thread.h"
#include "block-rsv.h"

struct inode;
struct super_block;
struct kobject;
struct reloc_control;
struct crypto_shash;
struct ulist;
struct btrfs_device;
struct btrfs_block_group;
struct btrfs_root;
struct btrfs_fs_devices;
struct btrfs_transaction;
struct btrfs_delayed_root;
struct btrfs_balance_control;
struct btrfs_subpage_info;
struct btrfs_stripe_hash_table;
struct btrfs_space_info;

#define BTRFS_MAX_EXTENT_SIZE

#define BTRFS_OLDEST_GENERATION

#define BTRFS_EMPTY_DIR_SIZE

#define BTRFS_DIRTY_METADATA_THRESH

#define BTRFS_SUPER_INFO_OFFSET
#define BTRFS_SUPER_INFO_SIZE
static_assert();

/*
 * Number of metadata items necessary for an unlink operation:
 *
 * 1 for the possible orphan item
 * 1 for the dir item
 * 1 for the dir index
 * 1 for the inode ref
 * 1 for the inode
 * 1 for the parent inode
 */
#define BTRFS_UNLINK_METADATA_UNITS

/*
 * The reserved space at the beginning of each device.  It covers the primary
 * super block and leaves space for potential use by other tools like
 * bootloaders or to lower potential damage of accidental overwrite.
 */
#define BTRFS_DEVICE_RANGE_RESERVED
/*
 * Runtime (in-memory) states of filesystem
 */
enum {};

enum {};

/*
 * Flags for mount options.
 *
 * Note: don't forget to add new options to btrfs_show_options()
 */
enum {};

/*
 * Compat flags that we support.  If any incompat flags are set other than the
 * ones specified below then we will fail to mount
 */
#define BTRFS_FEATURE_COMPAT_SUPP
#define BTRFS_FEATURE_COMPAT_SAFE_SET
#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR

#define BTRFS_FEATURE_COMPAT_RO_SUPP

#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR

#define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE

#ifdef CONFIG_BTRFS_DEBUG
	/*
	 * Features under developmen like Extent tree v2 support is enabled
	 * only under CONFIG_BTRFS_DEBUG.
	 */
#define BTRFS_FEATURE_INCOMPAT_SUPP

#else

#define BTRFS_FEATURE_INCOMPAT_SUPP

#endif

#define BTRFS_FEATURE_INCOMPAT_SAFE_SET
#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR

#define BTRFS_DEFAULT_COMMIT_INTERVAL
#define BTRFS_DEFAULT_MAX_INLINE

struct btrfs_dev_replace {};

/*
 * Free clusters are used to claim free space in relatively large chunks,
 * allowing us to do less seeky writes. They are used for all metadata
 * allocations. In ssd_spread mode they are also used for data allocations.
 */
struct btrfs_free_cluster {};

/* Discard control. */
/*
 * Async discard uses multiple lists to differentiate the discard filter
 * parameters.  Index 0 is for completely free block groups where we need to
 * ensure the entire block group is trimmed without being lossy.  Indices
 * afterwards represent monotonically decreasing discard filter sizes to
 * prioritize what should be discarded next.
 */
#define BTRFS_NR_DISCARD_LISTS
#define BTRFS_DISCARD_INDEX_UNUSED
#define BTRFS_DISCARD_INDEX_START

struct btrfs_discard_ctl {};

/*
 * Exclusive operations (device replace, resize, device add/remove, balance)
 */
enum btrfs_exclusive_operation {};

/* Store data about transaction commits, exported via sysfs. */
struct btrfs_commit_stats {};

struct btrfs_fs_info {};

#define page_to_inode(_page)
#define folio_to_inode(_folio)

#define page_to_fs_info(_page)
#define folio_to_fs_info(_folio)

#define inode_to_fs_info(_inode)

static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
{}

static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
{}

static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
{}

static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
{}

static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
						u64 gen)
{}

static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
{}

/*
 * Take the number of bytes to be checksummed and figure out how many leaves
 * it would require to store the csums for that many bytes.
 */
static inline u64 btrfs_csum_bytes_to_leaves(
			const struct btrfs_fs_info *fs_info, u64 csum_bytes)
{}

/*
 * Use this if we would be adding new items, as we could split nodes as we cow
 * down the tree.
 */
static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
						  unsigned num_items)
{}

/*
 * Doing a truncate or a modification won't result in new nodes or leaves, just
 * what we need for COW.
 */
static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
						 unsigned num_items)
{}

#define BTRFS_MAX_EXTENT_ITEM_SIZE(r)

static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
{}

/*
 * Count how many fs_info->max_extent_size cover the @size
 */
static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
{}

bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type);
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
				 enum btrfs_exclusive_operation type);
void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
			  enum btrfs_exclusive_operation op);

int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);

/* Compatibility and incompatibility defines */
void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
			     const char *name);
void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
			       const char *name);
void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
			      const char *name);
void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
				const char *name);

#define __btrfs_fs_incompat(fs_info, flags)

#define __btrfs_fs_compat_ro(fs_info, flags)

#define btrfs_set_fs_incompat(__fs_info, opt)

#define btrfs_clear_fs_incompat(__fs_info, opt)

#define btrfs_fs_incompat(fs_info, opt)

#define btrfs_set_fs_compat_ro(__fs_info, opt)

#define btrfs_clear_fs_compat_ro(__fs_info, opt)

#define btrfs_fs_compat_ro(fs_info, opt)

#define btrfs_clear_opt(o, opt)
#define btrfs_set_opt(o, opt)
#define btrfs_raw_test_opt(o, opt)
#define btrfs_test_opt(fs_info, opt)

static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
{}

/*
 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
 * anything except sleeping. This function is used to check the status of
 * the fs.
 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
 * since setting and checking for SB_RDONLY in the superblock's flags is not
 * atomic.
 */
static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
{}

static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
{}

#define BTRFS_FS_ERROR(fs_info)

#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info)

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS

#define EXPORT_FOR_TESTS

static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{}

void btrfs_test_destroy_inode(struct inode *inode);

#else

#define EXPORT_FOR_TESTS

static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
	return 0;
}
#endif

#endif