linux/fs/btrfs/bio.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 * Copyright (C) 2022 Christoph Hellwig.
 */

#include <linux/bio.h>
#include "bio.h"
#include "ctree.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
#include "dev-replace.h"
#include "zoned.h"
#include "file-item.h"
#include "raid-stripe-tree.h"

static struct bio_set btrfs_bioset;
static struct bio_set btrfs_clone_bioset;
static struct bio_set btrfs_repair_bioset;
static mempool_t btrfs_failed_bio_pool;

struct btrfs_failed_bio {};

/* Is this a data path I/O that needs storage layer checksum and repair? */
static inline bool is_data_bbio(struct btrfs_bio *bbio)
{}

static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
{}

/*
 * Initialize a btrfs_bio structure.  This skips the embedded bio itself as it
 * is already initialized by the block layer.
 */
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
		    btrfs_bio_end_io_t end_io, void *private)
{}

/*
 * Allocate a btrfs_bio structure.  The btrfs_bio is the main I/O container for
 * btrfs, and is used for all I/O submitted through btrfs_submit_bio.
 *
 * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
 * a mempool.
 */
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
				  struct btrfs_fs_info *fs_info,
				  btrfs_bio_end_io_t end_io, void *private)
{}

static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
					 struct btrfs_bio *orig_bbio,
					 u64 map_length, bool use_append)
{}

/* Free a bio that was never submitted to the underlying device. */
static void btrfs_cleanup_bio(struct btrfs_bio *bbio)
{}

static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
{}

void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{}

static void btrfs_orig_write_end_io(struct bio *bio);

static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
				       struct btrfs_bio *orig_bbio)
{}

static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio)
{}

static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
{}

static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
{}

static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
{}

static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
				 struct btrfs_device *dev)
{}

/*
 * Try to kick off a repair read to the next available mirror for a bad sector.
 *
 * This primarily tries to recover good data to serve the actual read request,
 * but also tries to write the good data back to the bad mirror(s) when a
 * read succeeded to restore the redundancy.
 */
static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
						  u32 bio_offset,
						  struct bio_vec *bv,
						  struct btrfs_failed_bio *fbio)
{}

static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev)
{}

static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
{}

static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info,
						struct bio *bio)
{}

static void btrfs_end_bio_work(struct work_struct *work)
{}

static void btrfs_simple_end_io(struct bio *bio)
{}

static void btrfs_raid56_end_io(struct bio *bio)
{}

static void btrfs_orig_write_end_io(struct bio *bio)
{}

static void btrfs_clone_write_end_io(struct bio *bio)
{}

static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
{}

static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
{}

static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
			       struct btrfs_io_stripe *smap, int mirror_num)
{}

static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio)
{}

/*
 * Async submit bios are used to offload expensive checksumming onto the worker
 * threads.
 */
struct async_submit_bio {};

/*
 * In order to insert checksums into the metadata in large chunks, we wait
 * until bio submission time.   All the pages in the bio are checksummed and
 * sums are attached onto the ordered extent record.
 *
 * At IO completion time the csums attached on the ordered extent record are
 * inserted into the btree.
 */
static void run_one_async_start(struct btrfs_work *work)
{}

/*
 * In order to insert checksums into the metadata in large chunks, we wait
 * until bio submission time.   All the pages in the bio are checksummed and
 * sums are attached onto the ordered extent record.
 *
 * At IO completion time the csums attached on the ordered extent record are
 * inserted into the tree.
 *
 * If called with @do_free == true, then it will free the work struct.
 */
static void run_one_async_done(struct btrfs_work *work, bool do_free)
{}

static bool should_async_write(struct btrfs_bio *bbio)
{}

/*
 * Submit bio to an async queue.
 *
 * Return true if the work has been successfully submitted, else false.
 */
static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
				struct btrfs_io_context *bioc,
				struct btrfs_io_stripe *smap, int mirror_num)
{}

static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
{}

void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num)
{}

/*
 * Submit a repair write.
 *
 * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a
 * RAID setup.  Here we only want to write the one bad copy, so we do the
 * mapping ourselves and submit the bio directly.
 *
 * The I/O is issued synchronously to block the repair read completion from
 * freeing the bio.
 */
int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
			    u64 length, u64 logical, struct folio *folio,
			    unsigned int folio_offset, int mirror_num)
{}

/*
 * Submit a btrfs_bio based repair write.
 *
 * If @dev_replace is true, the write would be submitted to dev-replace target.
 */
void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
{}

int __init btrfs_bioset_init(void)
{}

void __cold btrfs_bioset_exit(void)
{}