linux/drivers/md/dm-mpath.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2003 Sistina Software Limited.
 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
 *
 * This file is released under the GPL.
 */

#include <linux/device-mapper.h>

#include "dm-rq.h"
#include "dm-bio-record.h"
#include "dm-path-selector.h"
#include "dm-uevent.h"

#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>

static struct workqueue_struct *dm_mpath_wq;

#define DM_MSG_PREFIX
#define DM_PG_INIT_DELAY_MSECS
#define DM_PG_INIT_DELAY_DEFAULT
#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT

static unsigned long queue_if_no_path_timeout_secs =;

/* Path properties */
struct pgpath {};

#define path_to_pgpath(__pgp)

/*
 * Paths are grouped into Priority Groups and numbered from 1 upwards.
 * Each has a path selector which controls which path gets used.
 */
struct priority_group {};

/* Multipath context */
struct multipath {};

/*
 * Context information attached to each io we process.
 */
struct dm_mpath_io {};

action_fn;

static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_or_offline_path(struct pgpath *pgpath);
static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
static void queue_if_no_path_timeout_work(struct timer_list *t);

/*
 *-----------------------------------------------
 * Multipath state flags.
 *-----------------------------------------------
 */
#define MPATHF_QUEUE_IO
#define MPATHF_QUEUE_IF_NO_PATH
#define MPATHF_SAVED_QUEUE_IF_NO_PATH
#define MPATHF_RETAIN_ATTACHED_HW_HANDLER
#define MPATHF_PG_INIT_DISABLED
#define MPATHF_PG_INIT_REQUIRED
#define MPATHF_PG_INIT_DELAY_RETRY

static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
{}

/*
 *-----------------------------------------------
 * Allocation routines
 *-----------------------------------------------
 */
static struct pgpath *alloc_pgpath(void)
{}

static void free_pgpath(struct pgpath *pgpath)
{}

static struct priority_group *alloc_priority_group(void)
{}

static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{}

static void free_priority_group(struct priority_group *pg,
				struct dm_target *ti)
{}

static struct multipath *alloc_multipath(struct dm_target *ti)
{}

static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{}

static void free_multipath(struct multipath *m)
{}

static struct dm_mpath_io *get_mpio(union map_info *info)
{}

static size_t multipath_per_bio_data_size(void)
{}

static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
{}

static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
{}

static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
{}

/*
 *-----------------------------------------------
 * Path selection
 *-----------------------------------------------
 */
static int __pg_init_all_paths(struct multipath *m)
{}

static int pg_init_all_paths(struct multipath *m)
{}

static void __switch_pg(struct multipath *m, struct priority_group *pg)
{}

static struct pgpath *choose_path_in_pg(struct multipath *m,
					struct priority_group *pg,
					size_t nr_bytes)
{}

static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
{}

/*
 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
 * report the function name and line number of the function from which
 * it has been invoked.
 */
#define dm_report_EIO(m)

/*
 * Check whether bios must be queued in the device-mapper core rather
 * than here in the target.
 */
static bool __must_push_back(struct multipath *m)
{}

static bool must_push_back_rq(struct multipath *m)
{}

/*
 * Map cloned requests (request-based multipath)
 */
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
				   union map_info *map_context,
				   struct request **__clone)
{}

static void multipath_release_clone(struct request *clone,
				    union map_info *map_context)
{}

/*
 * Map cloned bios (bio-based multipath)
 */

static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
{}

static void multipath_queue_bio(struct multipath *m, struct bio *bio)
{}

static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{}

static int __multipath_map_bio(struct multipath *m, struct bio *bio,
			       struct dm_mpath_io *mpio)
{}

static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
{}

static void process_queued_io_list(struct multipath *m)
{}

static void process_queued_bios(struct work_struct *work)
{}

/*
 * If we run out of usable paths, should we queue I/O or error it?
 */
static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
			    bool save_old_value, const char *caller)
{}

/*
 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
 * process any queued I/O.
 */
static void queue_if_no_path_timeout_work(struct timer_list *t)
{}

/*
 * Enable the queue_if_no_path timeout if necessary.
 * Called with m->lock held.
 */
static void enable_nopath_timeout(struct multipath *m)
{}

static void disable_nopath_timeout(struct multipath *m)
{}

/*
 * An event is triggered whenever a path is taken out of use.
 * Includes path failure and PG bypass.
 */
static void trigger_event(struct work_struct *work)
{}

/*
 *---------------------------------------------------------------
 * Constructor/argument parsing:
 * <#multipath feature args> [<arg>]*
 * <#hw_handler args> [hw_handler [<arg>]*]
 * <#priority groups>
 * <initial priority group>
 *     [<selector> <#selector args> [<arg>]*
 *      <#paths> <#per-path selector args>
 *         [<path> [<arg>]* ]+ ]+
 *---------------------------------------------------------------
 */
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
			       struct dm_target *ti)
{}

static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
			 const char **attached_handler_name, char **error)
{}

static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
				 struct dm_target *ti)
{}

static struct priority_group *parse_priority_group(struct dm_arg_set *as,
						   struct multipath *m)
{}

static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{}

static int parse_features(struct dm_arg_set *as, struct multipath *m)
{}

static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{}

static void multipath_wait_for_pg_init_completion(struct multipath *m)
{}

static void flush_multipath_work(struct multipath *m)
{}

static void multipath_dtr(struct dm_target *ti)
{}

/*
 * Take a path out of use.
 */
static int fail_path(struct pgpath *pgpath)
{}

/*
 * Reinstate a previously-failed path
 */
static int reinstate_path(struct pgpath *pgpath)
{}

/*
 * Fail or reinstate all paths that match the provided struct dm_dev.
 */
static int action_dev(struct multipath *m, dev_t dev, action_fn action)
{}

/*
 * Temporarily try to avoid having to use the specified PG
 */
static void bypass_pg(struct multipath *m, struct priority_group *pg,
		      bool bypassed)
{}

/*
 * Switch to using the specified PG from the next I/O that gets mapped
 */
static int switch_pg_num(struct multipath *m, const char *pgstr)
{}

/*
 * Set/clear bypassed status of a PG.
 * PGs are numbered upwards from 1 in the order they were declared.
 */
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{}

/*
 * Should we retry pg_init immediately?
 */
static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
{}

static void pg_init_done(void *data, int errors)
{}

static void activate_or_offline_path(struct pgpath *pgpath)
{}

static void activate_path_work(struct work_struct *work)
{}

static int multipath_end_io(struct dm_target *ti, struct request *clone,
			    blk_status_t error, union map_info *map_context)
{}

static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
				blk_status_t *error)
{}

/*
 * Suspend with flush can't complete until all the I/O is processed
 * so if the last path fails we must error any remaining I/O.
 * - Note that if the freeze_bdev fails while suspending, the
 *   queue_if_no_path state is lost - userspace should reset it.
 * Otherwise, during noflush suspend, queue_if_no_path will not change.
 */
static void multipath_presuspend(struct dm_target *ti)
{}

static void multipath_postsuspend(struct dm_target *ti)
{}

/*
 * Restore the queue_if_no_path setting.
 */
static void multipath_resume(struct dm_target *ti)
{}

/*
 * Info output has the following format:
 * num_multipath_feature_args [multipath_feature_args]*
 * num_handler_status_args [handler_status_args]*
 * num_groups init_group_number
 *            [A|D|E num_ps_status_args [ps_status_args]*
 *             num_paths num_selector_args
 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
 *
 * Table output has the following format (identical to the constructor string):
 * num_feature_args [features_args]*
 * num_handler_args hw_handler [hw_handler_args]*
 * num_groups init_group_number
 *     [priority selector-name num_ps_args [ps_args]*
 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
 */
static void multipath_status(struct dm_target *ti, status_type_t type,
			     unsigned int status_flags, char *result, unsigned int maxlen)
{}

static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
			     char *result, unsigned int maxlen)
{}

static int multipath_prepare_ioctl(struct dm_target *ti,
				   struct block_device **bdev)
{}

static int multipath_iterate_devices(struct dm_target *ti,
				     iterate_devices_callout_fn fn, void *data)
{}

static int pgpath_busy(struct pgpath *pgpath)
{}

/*
 * We return "busy", only when we can map I/Os but underlying devices
 * are busy (so even if we map I/Os now, the I/Os will wait on
 * the underlying queue).
 * In other words, if we want to kill I/Os or queue them inside us
 * due to map unavailability, we don't return "busy".  Otherwise,
 * dm core won't give us the I/Os and we can't do what we want.
 */
static int multipath_busy(struct dm_target *ti)
{}

/*
 *---------------------------------------------------------------
 * Module setup
 *---------------------------------------------------------------
 */
static struct target_type multipath_target =;

static int __init dm_multipath_init(void)
{}

static void __exit dm_multipath_exit(void)
{}

module_init();
module_exit(dm_multipath_exit);

module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
MODULE_PARM_DESC();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();