linux/fs/btrfs/zoned.c

// SPDX-License-Identifier: GPL-2.0

#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/sched/mm.h>
#include <linux/atomic.h>
#include <linux/vmalloc.h>
#include "ctree.h"
#include "volumes.h"
#include "zoned.h"
#include "rcu-string.h"
#include "disk-io.h"
#include "block-group.h"
#include "dev-replace.h"
#include "space-info.h"
#include "fs.h"
#include "accessors.h"
#include "bio.h"

/* Maximum number of zones to report per blkdev_report_zones() call */
#define BTRFS_REPORT_NR_ZONES
/* Invalid allocation pointer value for missing devices */
#define WP_MISSING_DEV
/* Pseudo write pointer value for conventional zone */
#define WP_CONVENTIONAL

/*
 * Location of the first zone of superblock logging zone pairs.
 *
 * - primary superblock:    0B (zone 0)
 * - first copy:          512G (zone starting at that offset)
 * - second copy:           4T (zone starting at that offset)
 */
#define BTRFS_SB_LOG_PRIMARY_OFFSET
#define BTRFS_SB_LOG_FIRST_OFFSET
#define BTRFS_SB_LOG_SECOND_OFFSET

#define BTRFS_SB_LOG_FIRST_SHIFT
#define BTRFS_SB_LOG_SECOND_SHIFT

/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES

/*
 * Minimum of active zones we need:
 *
 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
 * - 1 zone for tree-log dedicated block group
 * - 1 zone for relocation
 */
#define BTRFS_MIN_ACTIVE_ZONES

/*
 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
 * We do not expect the zone size to become larger than 8GiB or smaller than
 * 4MiB in the near future.
 */
#define BTRFS_MAX_ZONE_SIZE
#define BTRFS_MIN_ZONE_SIZE

#define SUPER_INFO_SECTORS

static void wait_eb_writebacks(struct btrfs_block_group *block_group);
static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);

static inline bool sb_zone_is_full(const struct blk_zone *zone)
{}

static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
{}

static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
			    u64 *wp_ret)
{}

/*
 * Get the first zone number of the superblock mirror
 */
static inline u32 sb_zone_number(int shift, int mirror)
{}

static inline sector_t zone_start_sector(u32 zone_number,
					 struct block_device *bdev)
{}

static inline u64 zone_start_physical(u32 zone_number,
				      struct btrfs_zoned_device_info *zone_info)
{}

/*
 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
 * device into static sized chunks and fake a conventional zone on each of
 * them.
 */
static int emulate_report_zones(struct btrfs_device *device, u64 pos,
				struct blk_zone *zones, unsigned int nr_zones)
{}

static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
			       struct blk_zone *zones, unsigned int *nr_zones)
{}

/* The emulated zone size is determined from the size of device extent */
static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
{}

int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
{}

int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
{}

void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
{}

struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
{}

static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
{}

static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
{}

int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
{}

int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
				unsigned long long *mount_opt)
{}

static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
			   int rw, u64 *bytenr_ret)
{}

int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
			       u64 *bytenr_ret)
{}

int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
			  u64 *bytenr_ret)
{}

static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
				  int mirror)
{}

int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
{}

int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
{}

/*
 * Find allocatable zones within a given region.
 *
 * @device:	the device to allocate a region on
 * @hole_start: the position of the hole to allocate the region
 * @num_bytes:	size of wanted region
 * @hole_end:	the end of the hole
 * @return:	position of allocatable zones
 *
 * Allocatable region should not contain any superblock locations.
 */
u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
				 u64 hole_end, u64 num_bytes)
{}

static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
{}

static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
{}

int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
			    u64 length, u64 *bytes)
{}

int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
{}

/*
 * Calculate an allocation pointer from the extent allocation information
 * for a block group consist of conventional zones. It is pointed to the
 * end of the highest addressed extent in the block group as an allocation
 * offset.
 */
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
				   u64 *offset_ret, bool new)
{}

struct zone_info {};

static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
				struct zone_info *info, unsigned long *active,
				struct btrfs_chunk_map *map)
{}

static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
					 struct zone_info *info,
					 unsigned long *active)
{}

static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
				      struct btrfs_chunk_map *map,
				      struct zone_info *zone_info,
				      unsigned long *active)
{}

static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
					struct btrfs_chunk_map *map,
					struct zone_info *zone_info,
					unsigned long *active)
{}

static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
					struct btrfs_chunk_map *map,
					struct zone_info *zone_info,
					unsigned long *active)
{}

static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
					 struct btrfs_chunk_map *map,
					 struct zone_info *zone_info,
					 unsigned long *active)
{}

int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
{}

void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
{}

bool btrfs_use_zone_append(struct btrfs_bio *bbio)
{}

void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
{}

static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
					u64 logical)
{}

static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
				      u64 logical, u64 len)
{}

void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
{}

static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
			       struct btrfs_block_group **active_bg)
{}

/*
 * Check if @ctx->eb is aligned to the write pointer.
 *
 * Return:
 *   0:        @ctx->eb is at the write pointer. You can write it.
 *   -EAGAIN:  There is a hole. The caller should handle the case.
 *   -EBUSY:   There is a hole, but the caller can just bail out.
 */
int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
				   struct btrfs_eb_write_context *ctx)
{}

int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
{}

static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
			  struct blk_zone *zone)
{}

/*
 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
 * filling zeros between @physical_pos to a write pointer of dev-replace
 * source device.
 */
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
				    u64 physical_start, u64 physical_pos)
{}

/*
 * Activate block group and underlying device zones
 *
 * @block_group: the block group to activate
 *
 * Return: true on success, false otherwise
 */
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{}

static void wait_eb_writebacks(struct btrfs_block_group *block_group)
{}

static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
{}

int btrfs_zone_finish(struct btrfs_block_group *block_group)
{}

bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
{}

void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
{}

static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
{}

void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
				   struct extent_buffer *eb)
{}

void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
{}

void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
{}

bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
{}

void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
				       u64 length)
{}

int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
{}

int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
				struct btrfs_space_info *space_info,
				bool do_finish)
{}

/*
 * Reserve zones for one metadata block group, one tree-log block group, and one
 * system block group.
 */
void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
{}