linux/fs/btrfs/volumes.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
#include <linux/namei.h>
#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "volumes.h"
#include "raid56.h"
#include "rcu-string.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "tree-checker.h"
#include "space-info.h"
#include "block-group.h"
#include "discard.h"
#include "zoned.h"
#include "fs.h"
#include "accessors.h"
#include "uuid-tree.h"
#include "ioctl.h"
#include "relocation.h"
#include "scrub.h"
#include "super.h"
#include "raid-stripe-tree.h"

#define BTRFS_BLOCK_GROUP_STRIPE_MASK

struct btrfs_io_geometry {};

const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] =;

/*
 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
 * can be used as index to access btrfs_raid_array[].
 */
enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
{}

const char *btrfs_bg_type_to_raid_name(u64 flags)
{}

int btrfs_nr_parity_stripes(u64 type)
{}

/*
 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 * bytes including terminating null byte.
 */
void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
{}

static int init_first_rw_device(struct btrfs_trans_handle *trans);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);

/*
 * Device locking
 * ==============
 *
 * There are several mutexes that protect manipulation of devices and low-level
 * structures like chunks but not block groups, extents or files
 *
 * uuid_mutex (global lock)
 * ------------------------
 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 * device) or requested by the device= mount option
 *
 * the mutex can be very coarse and can cover long-running operations
 *
 * protects: updates to fs_devices counters like missing devices, rw devices,
 * seeding, structure cloning, opening/closing devices at mount/umount time
 *
 * global::fs_devs - add, remove, updates to the global list
 *
 * does not protect: manipulation of the fs_devices::devices list in general
 * but in mount context it could be used to exclude list modifications by eg.
 * scan ioctl
 *
 * btrfs_device::name - renames (write side), read is RCU
 *
 * fs_devices::device_list_mutex (per-fs, with RCU)
 * ------------------------------------------------
 * protects updates to fs_devices::devices, ie. adding and deleting
 *
 * simple list traversal with read-only actions can be done with RCU protection
 *
 * may be used to exclude some operations from running concurrently without any
 * modifications to the list (see write_all_supers)
 *
 * Is not required at mount and close times, because our device list is
 * protected by the uuid_mutex at that point.
 *
 * balance_mutex
 * -------------
 * protects balance structures (status, state) and context accessed from
 * several places (internally, ioctl)
 *
 * chunk_mutex
 * -----------
 * protects chunks, adding or removing during allocation, trim or when a new
 * device is added/removed. Additionally it also protects post_commit_list of
 * individual devices, since they can be added to the transaction's
 * post_commit_list only with chunk_mutex held.
 *
 * cleaner_mutex
 * -------------
 * a big lock that is held by the cleaner thread and prevents running subvolume
 * cleaning together with relocation or delayed iputs
 *
 *
 * Lock nesting
 * ============
 *
 * uuid_mutex
 *   device_list_mutex
 *     chunk_mutex
 *   balance_mutex
 *
 *
 * Exclusive operations
 * ====================
 *
 * Maintains the exclusivity of the following operations that apply to the
 * whole filesystem and cannot run in parallel.
 *
 * - Balance (*)
 * - Device add
 * - Device remove
 * - Device replace (*)
 * - Resize
 *
 * The device operations (as above) can be in one of the following states:
 *
 * - Running state
 * - Paused state
 * - Completed state
 *
 * Only device operations marked with (*) can go into the Paused state for the
 * following reasons:
 *
 * - ioctl (only Balance can be Paused through ioctl)
 * - filesystem remounted as read-only
 * - filesystem unmounted and mounted as read-only
 * - system power-cycle and filesystem mounted as read-only
 * - filesystem or device errors leading to forced read-only
 *
 * The status of exclusive operation is set and cleared atomically.
 * During the course of Paused state, fs_info::exclusive_operation remains set.
 * A device operation in Paused or Running state can be canceled or resumed
 * either by ioctl (Balance only) or when remounted as read-write.
 * The exclusive status is cleared when the device operation is canceled or
 * completed.
 */

DEFINE_MUTEX();
static LIST_HEAD(fs_uuids);
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
{}

/*
 * Allocate new btrfs_fs_devices structure identified by a fsid.
 *
 * @fsid:    if not NULL, copy the UUID to fs_devices::fsid and to
 *           fs_devices::metadata_fsid
 *
 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 * The returned struct is not linked onto any lists and can be destroyed with
 * kfree() right away.
 */
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{}

static void btrfs_free_device(struct btrfs_device *device)
{}

static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{}

void __exit btrfs_cleanup_fs_uuids(void)
{}

static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices,
				  const u8 *fsid, const u8 *metadata_fsid)
{}

static noinline struct btrfs_fs_devices *find_fsid(
		const u8 *fsid, const u8 *metadata_fsid)
{}

static int
btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
		      int flush, struct file **bdev_file,
		      struct btrfs_super_block **disk_super)
{}

/*
 *  Search and remove all stale devices (which are not mounted).  When both
 *  inputs are NULL, it will search and release all stale devices.
 *
 *  @devt:         Optional. When provided will it release all unmounted devices
 *                 matching this devt only.
 *  @skip_device:  Optional. Will skip this device when searching for the stale
 *                 devices.
 *
 *  Return:	0 for success or if @devt is 0.
 *		-EBUSY if @devt is a mounted device.
 *		-ENOENT if @devt does not match any device in the list.
 */
static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
{}

static struct btrfs_fs_devices *find_fsid_by_device(
					struct btrfs_super_block *disk_super,
					dev_t devt, bool *same_fsid_diff_dev)
{}

/*
 * This is only used on mount, and we are protected from competing things
 * messing with our fs_devices by the uuid_mutex, thus we do not need the
 * fs_devices->device_list_mutex here.
 */
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
			struct btrfs_device *device, blk_mode_t flags,
			void *holder)
{}

const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
{}

/*
 * Add new device to list of registered devices
 *
 * Returns:
 * device pointer which was just added or updated when successful
 * error pointer when failed
 */
static noinline struct btrfs_device *device_list_add(const char *path,
			   struct btrfs_super_block *disk_super,
			   bool *new_device_added)
{}

static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{}

static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
				      struct btrfs_device **latest_dev)
{}

/*
 * After we have read the system tree and know devids belonging to this
 * filesystem, remove the device which does not belong there.
 */
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
{}

static void btrfs_close_bdev(struct btrfs_device *device)
{}

static void btrfs_close_one_device(struct btrfs_device *device)
{}

static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
{}

void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{}

static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
				blk_mode_t flags, void *holder)
{}

static int devid_cmp(void *priv, const struct list_head *a,
		     const struct list_head *b)
{}

int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
		       blk_mode_t flags, void *holder)
{}

void btrfs_release_disk_super(struct btrfs_super_block *super)
{}

static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
						       u64 bytenr, u64 bytenr_orig)
{}

int btrfs_forget_devices(dev_t devt)
{}

static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
				    const char *path, dev_t devt,
				    bool mount_arg_dev)
{}

/*
 * Look for a btrfs signature on a device. This may be called out of the mount path
 * and we are not allowed to call set_blocksize during the scan. The superblock
 * is read via pagecache.
 *
 * With @mount_arg_dev it's a scan during mount time that will always register
 * the device or return an error. Multi-device and seeding devices are registered
 * in both cases.
 */
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
					   bool mount_arg_dev)
{}

/*
 * Try to find a chunk that intersects [start, start + len] range and when one
 * such is found, record the end of it in *start
 */
static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
				    u64 len)
{}

static u64 dev_extent_search_start(struct btrfs_device *device)
{}

static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
					u64 *hole_start, u64 *hole_size,
					u64 num_bytes)
{}

/*
 * Check if specified hole is suitable for allocation.
 *
 * @device:	the device which we have the hole
 * @hole_start: starting position of the hole
 * @hole_size:	the size of the hole
 * @num_bytes:	the size of the free space that we need
 *
 * This function may modify @hole_start and @hole_size to reflect the suitable
 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
 */
static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
				  u64 *hole_size, u64 num_bytes)
{}

/*
 * Find free space in the specified device.
 *
 * @device:	  the device which we search the free space in
 * @num_bytes:	  the size of the free space that we need
 * @search_start: the position from which to begin the search
 * @start:	  store the start of the free space.
 * @len:	  the size of the free space. that we find, or the size
 *		  of the max free space if we don't find suitable free space
 *
 * This does a pretty simple search, the expectation is that it is called very
 * infrequently and that a given device has a small number of extents.
 *
 * @start is used to store the start of the free space if we find. But if we
 * don't find suitable free space, it will be used to store the start position
 * of the max free space.
 *
 * @len is used to store the size of the free space that we find.
 * But if we don't find suitable free space, it is used to store the size of
 * the max free space.
 *
 * NOTE: This function will search *commit* root of device tree, and does extra
 * check to ensure dev extents are not double allocated.
 * This makes the function safe to allocate dev extents but may not report
 * correct usable device space, as device extent freed in current transaction
 * is not reported as available.
 */
static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
				u64 *start, u64 *len)
{}

static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
			  struct btrfs_device *device,
			  u64 start, u64 *dev_extent_len)
{}

static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
{}

static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
				    u64 *devid_ret)
{}

/*
 * the device information is stored in the chunk root
 * the btrfs_device struct should be fully filled in
 */
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
			    struct btrfs_device *device)
{}

/*
 * Function to update ctime/mtime for a given device path.
 * Mainly used for ctime/mtime based probe like libblkid.
 *
 * We don't care about errors here, this is just to be kind to userspace.
 */
static void update_dev_time(const char *device_path)
{}

static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
			     struct btrfs_device *device)
{}

/*
 * Verify that @num_devices satisfies the RAID profile constraints in the whole
 * filesystem. It's up to the caller to adjust that number regarding eg. device
 * replace.
 */
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
		u64 num_devices)
{}

static struct btrfs_device * btrfs_find_next_active_device(
		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
{}

/*
 * Helper function to check if the given device is part of s_bdev / latest_dev
 * and replace it with the provided or the next active device, in the context
 * where this function called, there should be always be another device (or
 * this_dev) which is active.
 */
void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
					    struct btrfs_device *next_device)
{}

/*
 * Return btrfs_fs_devices::num_devices excluding the device that's being
 * currently replaced.
 */
static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{}

static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
				     struct block_device *bdev, int copy_num)
{}

void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device)
{}

int btrfs_rm_device(struct btrfs_fs_info *fs_info,
		    struct btrfs_dev_lookup_args *args,
		    struct file **bdev_file)
{}

void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
{}

void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
{}

void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{}

/*
 * Populate args from device at path.
 *
 * @fs_info:	the filesystem
 * @args:	the args to populate
 * @path:	the path to the device
 *
 * This will read the super block of the device at @path and populate @args with
 * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
 * lookup a device to operate on, but need to do it before we take any locks.
 * This properly handles the special case of "missing" that a user may pass in,
 * and does some basic sanity checks.  The caller must make sure that @path is
 * properly NUL terminated before calling in, and must call
 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
 * uuid buffers.
 *
 * Return: 0 for success, -errno for failure
 */
int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
				 struct btrfs_dev_lookup_args *args,
				 const char *path)
{}

/*
 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
 * that don't need to be freed.
 */
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
{}

struct btrfs_device *btrfs_find_device_by_devspec(
		struct btrfs_fs_info *fs_info, u64 devid,
		const char *device_path)
{}

static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
{}

/*
 * Splice seed devices into the sprout fs_devices.
 * Generate a new fsid for the sprouted read-write filesystem.
 */
static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
			       struct btrfs_fs_devices *seed_devices)
{}

/*
 * Store the expected generation for seed devices in device items.
 */
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
{}

int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
{}

static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
					struct btrfs_device *device)
{}

int btrfs_grow_device(struct btrfs_trans_handle *trans,
		      struct btrfs_device *device, u64 new_size)
{}

static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{}

static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{}

struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
						    u64 logical, u64 length)
{}

struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
					     u64 logical, u64 length)
{}

/*
 * Find the mapping containing the given logical extent.
 *
 * @logical: Logical block offset in bytes.
 * @length: Length of extent in bytes.
 *
 * Return: Chunk mapping or ERR_PTR.
 */
struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
					    u64 logical, u64 length)
{}

static int remove_chunk_item(struct btrfs_trans_handle *trans,
			     struct btrfs_chunk_map *map, u64 chunk_offset)
{}

int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{}

int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{}

static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{}

/*
 * return 1 : allocate a data chunk successfully,
 * return <0: errors during allocating a data chunk,
 * return 0 : no need to allocate a data chunk.
 */
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
				      u64 chunk_offset)
{}

static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
					   const struct btrfs_disk_balance_args *disk)
{}

static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
					   const struct btrfs_balance_args *cpu)
{}

static int insert_balance_item(struct btrfs_fs_info *fs_info,
			       struct btrfs_balance_control *bctl)
{}

static int del_balance_item(struct btrfs_fs_info *fs_info)
{}

/*
 * This is a heuristic used to reduce the number of chunks balanced on
 * resume after balance was interrupted.
 */
static void update_balance_args(struct btrfs_balance_control *bctl)
{}

/*
 * Clear the balance status in fs_info and delete the balance item from disk.
 */
static void reset_balance_state(struct btrfs_fs_info *fs_info)
{}

/*
 * Balance filters.  Return 1 if chunk should be filtered out
 * (should not be balanced).
 */
static int chunk_profiles_filter(u64 chunk_type,
				 struct btrfs_balance_args *bargs)
{}

static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
			      struct btrfs_balance_args *bargs)
{}

static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
		u64 chunk_offset, struct btrfs_balance_args *bargs)
{}

static int chunk_devid_filter(struct extent_buffer *leaf,
			      struct btrfs_chunk *chunk,
			      struct btrfs_balance_args *bargs)
{}

static u64 calc_data_stripes(u64 type, int num_stripes)
{}

/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{}

/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       u64 chunk_offset,
			       struct btrfs_balance_args *bargs)
{}

static int chunk_stripes_range_filter(struct extent_buffer *leaf,
			       struct btrfs_chunk *chunk,
			       struct btrfs_balance_args *bargs)
{}

static int chunk_soft_convert_filter(u64 chunk_type,
				     struct btrfs_balance_args *bargs)
{}

static int should_balance_chunk(struct extent_buffer *leaf,
				struct btrfs_chunk *chunk, u64 chunk_offset)
{}

static int __btrfs_balance(struct btrfs_fs_info *fs_info)
{}

/*
 * See if a given profile is valid and reduced.
 *
 * @flags:     profile to validate
 * @extended:  if true @flags is treated as an extended profile
 */
static int alloc_profile_is_valid(u64 flags, int extended)
{}

/*
 * Validate target profile against allowed profiles and return true if it's OK.
 * Otherwise print the error message and return false.
 */
static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
		const struct btrfs_balance_args *bargs,
		u64 allowed, const char *type)
{}

/*
 * Fill @buf with textual description of balance filter flags @bargs, up to
 * @size_buf including the terminating null. The output may be trimmed if it
 * does not fit into the provided buffer.
 */
static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
				 u32 size_buf)
{}

static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{}

/*
 * Should be called with balance mutexe held
 */
int btrfs_balance(struct btrfs_fs_info *fs_info,
		  struct btrfs_balance_control *bctl,
		  struct btrfs_ioctl_balance_args *bargs)
{}

static int balance_kthread(void *data)
{}

int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{}

int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
{}

int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{}

int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{}

int btrfs_uuid_scan_kthread(void *data)
{}

int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{}

/*
 * shrinking a device means finding all of the device extents past
 * the new size, and then following the back refs to the chunks.
 * The chunk relocation code actually frees the device extent
 */
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{}

static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
			   struct btrfs_key *key,
			   struct btrfs_chunk *chunk, int item_size)
{}

/*
 * sort the devices in descending order by max_avail, total_avail
 */
static int btrfs_cmp_device_info(const void *a, const void *b)
{}

static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{}

static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
{}

/*
 * Structure used internally for btrfs_create_chunk() function.
 * Wraps needed parameters.
 */
struct alloc_chunk_ctl {};

static void init_alloc_chunk_ctl_policy_regular(
				struct btrfs_fs_devices *fs_devices,
				struct alloc_chunk_ctl *ctl)
{}

static void init_alloc_chunk_ctl_policy_zoned(
				      struct btrfs_fs_devices *fs_devices,
				      struct alloc_chunk_ctl *ctl)
{}

static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
				 struct alloc_chunk_ctl *ctl)
{}

static int gather_device_info(struct btrfs_fs_devices *fs_devices,
			      struct alloc_chunk_ctl *ctl,
			      struct btrfs_device_info *devices_info)
{}

static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
				      struct btrfs_device_info *devices_info)
{}

static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
				    struct btrfs_device_info *devices_info)
{}

static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
			      struct alloc_chunk_ctl *ctl,
			      struct btrfs_device_info *devices_info)
{}

static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
{}

static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
{}

void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
{}

EXPORT_FOR_TESTS
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
{}

EXPORT_FOR_TESTS
struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp)
{}

static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
			struct alloc_chunk_ctl *ctl,
			struct btrfs_device_info *devices_info)
{}

struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
					    u64 type)
{}

/*
 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
 * chunks.
 *
 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
 * phases.
 */
int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
				     struct btrfs_block_group *bg)
{}

static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
{}

static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
{}

bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{}

void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info)
{}

int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{}

unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
				    u64 logical)
{}

int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{}

static int find_live_mirror(struct btrfs_fs_info *fs_info,
			    struct btrfs_chunk_map *map, int first,
			    int dev_replace_is_ongoing)
{}

static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
						       u64 logical,
						       u16 total_stripes)
{}

void btrfs_get_bioc(struct btrfs_io_context *bioc)
{}

void btrfs_put_bioc(struct btrfs_io_context *bioc)
{}

/*
 * Please note that, discard won't be sent to target device of device
 * replace.
 */
struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
					       u64 logical, u64 *length_ret,
					       u32 *num_stripes)
{}

static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
{}

static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
				      struct btrfs_dev_replace *dev_replace,
				      u64 logical,
				      struct btrfs_io_geometry *io_geom)
{}

static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
			    struct btrfs_io_geometry *io_geom)
{}

static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
			 u64 *length, struct btrfs_io_stripe *dst,
			 struct btrfs_chunk_map *map,
			 struct btrfs_io_geometry *io_geom)
{}

static bool is_single_device_io(struct btrfs_fs_info *fs_info,
				const struct btrfs_io_stripe *smap,
				const struct btrfs_chunk_map *map,
				int num_alloc_stripes,
				enum btrfs_map_op op, int mirror_num)
{}

static void map_blocks_raid0(const struct btrfs_chunk_map *map,
			     struct btrfs_io_geometry *io_geom)
{}

static void map_blocks_raid1(struct btrfs_fs_info *fs_info,
			     struct btrfs_chunk_map *map,
			     struct btrfs_io_geometry *io_geom,
			     bool dev_replace_is_ongoing)
{}

static void map_blocks_dup(const struct btrfs_chunk_map *map,
			   struct btrfs_io_geometry *io_geom)
{}

static void map_blocks_raid10(struct btrfs_fs_info *fs_info,
			      struct btrfs_chunk_map *map,
			      struct btrfs_io_geometry *io_geom,
			      bool dev_replace_is_ongoing)
{}

static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
				    struct btrfs_io_geometry *io_geom,
				    u64 logical, u64 *length)
{}

static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
				   struct btrfs_io_geometry *io_geom)
{}

static void map_blocks_single(const struct btrfs_chunk_map *map,
			      struct btrfs_io_geometry *io_geom)
{}

/*
 * Map one logical range to one or more physical ranges.
 *
 * @length:		(Mandatory) mapped length of this run.
 *			One logical range can be split into different segments
 *			due to factors like zones and RAID0/5/6/10 stripe
 *			boundaries.
 *
 * @bioc_ret:		(Mandatory) returned btrfs_io_context structure.
 *			which has one or more physical ranges (btrfs_io_stripe)
 *			recorded inside.
 *			Caller should call btrfs_put_bioc() to free it after use.
 *
 * @smap:		(Optional) single physical range optimization.
 *			If the map request can be fulfilled by one single
 *			physical range, and this is parameter is not NULL,
 *			then @bioc_ret would be NULL, and @smap would be
 *			updated.
 *
 * @mirror_num_ret:	(Mandatory) returned mirror number if the original
 *			value is 0.
 *
 *			Mirror number 0 means to choose any live mirrors.
 *
 *			For non-RAID56 profiles, non-zero mirror_num means
 *			the Nth mirror. (e.g. mirror_num 1 means the first
 *			copy).
 *
 *			For RAID56 profile, mirror 1 means rebuild from P and
 *			the remaining data stripes.
 *
 *			For RAID6 profile, mirror > 2 means mark another
 *			data/P stripe error and rebuild from the remaining
 *			stripes..
 */
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
		    u64 logical, u64 *length,
		    struct btrfs_io_context **bioc_ret,
		    struct btrfs_io_stripe *smap, int *mirror_num_ret)
{}

static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
				      const struct btrfs_fs_devices *fs_devices)
{}

static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
				  const struct btrfs_device *device)
{}

/*
 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
 * return NULL.
 *
 * If devid and uuid are both specified, the match must be exact, otherwise
 * only devid is used.
 */
struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
				       const struct btrfs_dev_lookup_args *args)
{}

static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
					    u64 devid, u8 *dev_uuid)
{}

/*
 * Allocate new device struct, set up devid and UUID.
 *
 * @fs_info:	used only for generating a new devid, can be NULL if
 *		devid is provided (i.e. @devid != NULL).
 * @devid:	a pointer to devid for this device.  If NULL a new devid
 *		is generated.
 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
 *		is generated.
 * @path:	a pointer to device path if available, NULL otherwise.
 *
 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
 * on error.  Returned struct is not linked onto any lists and must be
 * destroyed with btrfs_free_device.
 */
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid, const u8 *uuid,
					const char *path)
{}

static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
					u64 devid, u8 *uuid, bool error)
{}

u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
{}

#if BITS_PER_LONG == 32
/*
 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
 * can't be accessed on 32bit systems.
 *
 * This function do mount time check to reject the fs if it already has
 * metadata chunk beyond that limit.
 */
static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
				  u64 logical, u64 length, u64 type)
{
	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
		return 0;

	if (logical + length < MAX_LFS_FILESIZE)
		return 0;

	btrfs_err_32bit_limit(fs_info);
	return -EOVERFLOW;
}

/*
 * This is to give early warning for any metadata chunk reaching
 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
 * Although we can still access the metadata, it's not going to be possible
 * once the limit is reached.
 */
static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
				  u64 logical, u64 length, u64 type)
{
	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
		return;

	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
		return;

	btrfs_warn_32bit_limit(fs_info);
}
#endif

static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
						  u64 devid, u8 *uuid)
{}

static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
			  struct btrfs_chunk *chunk)
{}

static void fill_device_from_item(struct extent_buffer *leaf,
				 struct btrfs_dev_item *dev_item,
				 struct btrfs_device *device)
{}

static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
						  u8 *fsid)
{}

static int read_one_dev(struct extent_buffer *leaf,
			struct btrfs_dev_item *dev_item)
{}

int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{}

/*
 * Check if all chunks in the fs are OK for read-write degraded mount
 *
 * If the @failing_dev is specified, it's accounted as missing.
 *
 * Return true if all chunks meet the minimal RW mount requirements.
 * Return false if any chunk doesn't meet the minimal RW mount requirements.
 */
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
					struct btrfs_device *failing_dev)
{}

static void readahead_tree_node_children(struct extent_buffer *node)
{}

int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{}

int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{}

static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
				 const struct btrfs_dev_stats_item *ptr,
				 int index)
{}

static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
				      struct btrfs_dev_stats_item *ptr,
				      int index, u64 val)
{}

static int btrfs_device_init_dev_stats(struct btrfs_device *device,
				       struct btrfs_path *path)
{}

int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{}

static int update_dev_stat_item(struct btrfs_trans_handle *trans,
				struct btrfs_device *device)
{}

/*
 * called from commit_transaction. Writes all changed device stats to disk.
 */
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
{}

void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{}

static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{}

int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
			struct btrfs_ioctl_get_dev_stats *stats)
{}

/*
 * Update the size and bytes used for each device where it changed.  This is
 * delayed since we would otherwise get errors while writing out the
 * superblocks.
 *
 * Must be invoked during transaction commit.
 */
void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
{}

/*
 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
 */
int btrfs_bg_type_to_factor(u64 flags)
{}



static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
				 u64 chunk_offset, u64 devid,
				 u64 physical_offset, u64 physical_len)
{}

static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
{}

/*
 * Ensure that all dev extents are mapped to correct chunk, otherwise
 * later chunk allocation/free would cause unexpected behavior.
 *
 * NOTE: This will iterate through the whole device tree, which should be of
 * the same size level as the chunk tree.  This slightly increases mount time.
 */
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{}

/*
 * Check whether the given block group or device is pinned by any inode being
 * used as a swapfile.
 */
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
{}

static int relocating_repair_kthread(void *data)
{}

bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
{}

static void map_raid56_repair_block(struct btrfs_io_context *bioc,
				    struct btrfs_io_stripe *smap,
				    u64 logical)
{}

/*
 * Map a repair write into a single device.
 *
 * A repair write is triggered by read time repair or scrub, which would only
 * update the contents of a single device.
 * Not update any other mirrors nor go through RMW path.
 *
 * Callers should ensure:
 *
 * - Call btrfs_bio_counter_inc_blocked() first
 * - The range does not cross stripe boundary
 * - Has a valid @mirror_num passed in.
 */
int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
			   struct btrfs_io_stripe *smap, u64 logical,
			   u32 length, int mirror_num)
{}