linux/drivers/md/raid1.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * raid1.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-1 management functions.
 *
 * Better read-balancing code written by Mika Kuoppala <[email protected]>, 2000
 *
 * Fixes to reconstruction by Jakob Østergaard" <[email protected]>
 * Various fixes by Neil Brown <[email protected]>
 *
 * Changes by Peter T. Breuer <[email protected]> 31/1/2003 to support
 * bitmapped intelligence in resync:
 *
 *      - bitmap marked during normal i/o
 *      - bitmap used to skip nondirty blocks during sync
 *
 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
 * - persistent bitmap code
 */

#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/interval_tree_generic.h>

#include <trace/events/block.h>

#include "md.h"
#include "raid1.h"
#include "md-bitmap.h"

#define UNSUPPORTED_MDDEV_FLAGS

static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);

#define RAID_1_10_NAME
#include "raid1-10.c"

#define START(node)
#define LAST(node)
INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
		     START, LAST, static inline, raid1_rb);

static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
				struct serial_info *si, int idx)
{}

static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
{}

static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
{}

/*
 * for resync bio, r1bio pointer can be retrieved from the per-bio
 * 'struct resync_pages'.
 */
static inline struct r1bio *get_resync_r1bio(struct bio *bio)
{}

static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{}

#define RESYNC_DEPTH
#define RESYNC_SECTORS
#define RESYNC_WINDOW
#define RESYNC_WINDOW_SECTORS
#define CLUSTER_RESYNC_WINDOW
#define CLUSTER_RESYNC_WINDOW_SECTORS

static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{}

static void r1buf_pool_free(void *__r1_bio, void *data)
{}

static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
{}

static void free_r1bio(struct r1bio *r1_bio)
{}

static void put_buf(struct r1bio *r1_bio)
{}

static void reschedule_retry(struct r1bio *r1_bio)
{}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
static void call_bio_endio(struct r1bio *r1_bio)
{}

static void raid_end_bio_io(struct r1bio *r1_bio)
{}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
static inline void update_head_pos(int disk, struct r1bio *r1_bio)
{}

/*
 * Find the disk number which triggered given bio
 */
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
{}

static void raid1_end_read_request(struct bio *bio)
{}

static void close_write(struct r1bio *r1_bio)
{}

static void r1_bio_write_done(struct r1bio *r1_bio)
{}

static void raid1_end_write_request(struct bio *bio)
{}

static sector_t align_to_barrier_unit_end(sector_t start_sector,
					  sector_t sectors)
{}

static void update_read_sectors(struct r1conf *conf, int disk,
				sector_t this_sector, int len)
{}

static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
			     int *max_sectors)
{}

static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
			  int *max_sectors)
{}

static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
			    int *max_sectors)
{}

static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
{}

/*
 * If buffered sequential IO size exceeds optimal iosize, check if there is idle
 * disk. If yes, choose the idle disk.
 */
static bool should_choose_next(struct r1conf *conf, int disk)
{}

static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
{}

struct read_balance_ctl {};

static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
{}

/*
 * This routine returns the disk from which the requested read should be done.
 *
 * 1) If resync is in progress, find the first usable disk and use it even if it
 * has some bad blocks.
 *
 * 2) Now that there is no resync, loop through all disks and skipping slow
 * disks and disks with bad blocks for now. Only pay attention to key disk
 * choice.
 *
 * 3) If we've made it this far, now look for disks with bad blocks and choose
 * the one with most number of sectors.
 *
 * 4) If we are all the way at the end, we have no choice but to use a disk even
 * if it is write mostly.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */
static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
			int *max_sectors)
{}

static void wake_up_barrier(struct r1conf *conf)
{}

static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{}

static void flush_pending_writes(struct r1conf *conf)
{}

/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
 *
 * If resync/recovery is interrupted, returns -EINTR;
 * Otherwise, returns 0.
 */
static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
{}

static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
{}

static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{}

static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{}

static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{}

static void _allow_barrier(struct r1conf *conf, int idx)
{}

static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
{}

/* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf)
{}

static void freeze_array(struct r1conf *conf, int extra)
{}
static void unfreeze_array(struct r1conf *conf)
{}

static void alloc_behind_master_bio(struct r1bio *r1_bio,
					   struct bio *bio)
{}

static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{}

static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
{}

static inline struct r1bio *
alloc_r1bio(struct mddev *mddev, struct bio *bio)
{}

static void raid1_read_request(struct mddev *mddev, struct bio *bio,
			       int max_read_sectors, struct r1bio *r1_bio)
{}

static void raid1_write_request(struct mddev *mddev, struct bio *bio,
				int max_write_sectors)
{}

static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
{}

static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{}

/**
 * raid1_error() - RAID1 error handler.
 * @mddev: affected md device.
 * @rdev: member device to fail.
 *
 * The routine acknowledges &rdev failure and determines new @mddev state.
 * If it failed, then:
 *	- &MD_BROKEN flag is set in &mddev->flags.
 *	- recovery is disabled.
 * Otherwise, it must be degraded:
 *	- recovery is interrupted.
 *	- &mddev->degraded is bumped.
 *
 * @rdev is marked as &Faulty excluding case when array is failed and
 * &mddev->fail_last_dev is off.
 */
static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{}

static void print_conf(struct r1conf *conf)
{}

static void close_sync(struct r1conf *conf)
{}

static int raid1_spare_active(struct mddev *mddev)
{}

static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
			   bool replacement)
{}

static bool raid1_remove_conf(struct r1conf *conf, int disk)
{}

static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{}

static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{}

static void end_sync_read(struct bio *bio)
{}

static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
{}

static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
{}

static void end_sync_write(struct bio *bio)
{}

static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
			   int sectors, struct page *page, blk_opf_t rw)
{}

static int fix_sync_read_error(struct r1bio *r1_bio)
{}

static void process_checks(struct r1bio *r1_bio)
{}

static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
{}

/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
 *	3.	Performs writes following reads for array synchronising.
 */

static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
{}

static int narrow_write_error(struct r1bio *r1_bio, int i)
{}

static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{}

static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{}

static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
{}

static void raid1d(struct md_thread *thread)
{}

static int init_resync(struct r1conf *conf)
{}

static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
{}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 */

static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
				   sector_t max_sector, int *skipped)
{}

static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{}

static struct r1conf *setup_conf(struct mddev *mddev)
{}

static int raid1_set_limits(struct mddev *mddev)
{}

static int raid1_run(struct mddev *mddev)
{}

static void raid1_free(struct mddev *mddev, void *priv)
{}

static int raid1_resize(struct mddev *mddev, sector_t sectors)
{}

static int raid1_reshape(struct mddev *mddev)
{}

static void raid1_quiesce(struct mddev *mddev, int quiesce)
{}

static void *raid1_takeover(struct mddev *mddev)
{}

static struct md_personality raid1_personality =;

static int __init raid_init(void)
{}

static void raid_exit(void)
{}

module_init();
module_exit(raid_exit);
MODULE_LICENSE();
MODULE_DESCRIPTION();
MODULE_ALIAS(); /* RAID1 */
MODULE_ALIAS();
MODULE_ALIAS();