#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/llist.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
#include <linux/blk-crypto.h>
#include <linux/part_stat.h>
#include <linux/sched/isolation.h>
#include <trace/events/block.h>
#include <linux/t10-pi.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-pm.h"
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq,
blk_insert_t flags);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
struct io_comp_batch *iob, unsigned int flags);
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{ … }
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{ … }
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{ … }
struct mq_inflight { … };
static bool blk_mq_check_inflight(struct request *rq, void *priv)
{ … }
unsigned int blk_mq_in_flight(struct request_queue *q,
struct block_device *part)
{ … }
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
unsigned int inflight[2])
{ … }
void blk_freeze_queue_start(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_freeze_queue_wait(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_freeze_queue(struct request_queue *q)
{ … }
void blk_mq_freeze_queue(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{ … }
void blk_mq_unfreeze_queue(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_quiesce_queue(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_unquiesce_queue(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_wake_waiters(struct request_queue *q)
{ … }
void blk_rq_init(struct request_queue *q, struct request *rq)
{ … }
EXPORT_SYMBOL(…);
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
{ … }
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
struct blk_mq_tags *tags, unsigned int tag)
{ … }
static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
{ … }
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{ … }
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
struct blk_plug *plug,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{ … }
static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{ … }
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{ … }
EXPORT_SYMBOL(…);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{ … }
EXPORT_SYMBOL_GPL(…);
static void blk_mq_finish_request(struct request *rq)
{ … }
static void __blk_mq_free_request(struct request *rq)
{ … }
void blk_mq_free_request(struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_free_plug_rqs(struct blk_plug *plug)
{ … }
void blk_dump_rq_flags(struct request *rq, char *msg)
{ … }
EXPORT_SYMBOL(…);
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{ … }
static void blk_print_req_error(struct request *req, blk_status_t status)
{ … }
static void blk_complete_request(struct request *req)
{ … }
bool blk_update_request(struct request *req, blk_status_t error,
unsigned int nr_bytes)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline void blk_account_io_done(struct request *req, u64 now)
{ … }
static inline void blk_account_io_start(struct request *req)
{ … }
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
{ … }
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_end_request(struct request *rq, blk_status_t error)
{ … }
EXPORT_SYMBOL(…);
#define TAG_COMP_BATCH …
static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
int *tag_array, int nr_tags)
{ … }
void blk_mq_end_request_batch(struct io_comp_batch *iob)
{ … }
EXPORT_SYMBOL_GPL(…);
static void blk_complete_reqs(struct llist_head *list)
{ … }
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{ … }
static int blk_softirq_cpu_dead(unsigned int cpu)
{ … }
static void __blk_mq_complete_request_remote(void *data)
{ … }
static inline bool blk_mq_complete_need_ipi(struct request *rq)
{ … }
static void blk_mq_complete_send_ipi(struct request *rq)
{ … }
static void blk_mq_raise_softirq(struct request *rq)
{ … }
bool blk_mq_complete_request_remote(struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_complete_request(struct request *rq)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_start_request(struct request *rq)
{ … }
EXPORT_SYMBOL(…);
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{ … }
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{ … }
void blk_execute_rq_nowait(struct request *rq, bool at_head)
{ … }
EXPORT_SYMBOL_GPL(…);
struct blk_rq_wait { … };
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
{ … }
bool blk_rq_is_poll(struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ … }
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
{ … }
EXPORT_SYMBOL(…);
static void __blk_mq_requeue_request(struct request *rq)
{ … }
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
{ … }
EXPORT_SYMBOL(…);
static void blk_mq_requeue_work(struct work_struct *work)
{ … }
void blk_mq_kick_requeue_list(struct request_queue *q)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{ … }
EXPORT_SYMBOL(…);
static bool blk_is_flush_data_rq(struct request *rq)
{ … }
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{ … }
bool blk_mq_queue_inflight(struct request_queue *q)
{ … }
EXPORT_SYMBOL_GPL(…);
static void blk_mq_rq_timed_out(struct request *req)
{ … }
struct blk_expired_data { … };
static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
{ … }
void blk_mq_put_rq_ref(struct request *rq)
{ … }
static bool blk_mq_check_expired(struct request *rq, void *priv)
{ … }
static bool blk_mq_handle_expired(struct request *rq, void *priv)
{ … }
static void blk_mq_timeout_work(struct work_struct *work)
{ … }
struct flush_busy_ctx_data { … };
static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{ … }
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{ … }
EXPORT_SYMBOL_GPL(…);
struct dispatch_rq_data { … };
static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
void *data)
{ … }
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start)
{ … }
bool __blk_mq_alloc_driver_tag(struct request *rq)
{ … }
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
int flags, void *key)
{ … }
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{ … }
#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT …
#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR …
static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{ … }
#define BLK_MQ_RESOURCE_DELAY …
static void blk_mq_handle_dev_resource(struct request *rq,
struct list_head *list)
{ … }
enum prep_dispatch { … };
static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
bool need_budget)
{ … }
static void blk_mq_release_budgets(struct request_queue *q,
struct list_head *list)
{ … }
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
bool from_schedule)
{ … }
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
unsigned int nr_budgets)
{ … }
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
{ … }
static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx)
{ … }
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{ … }
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{ … }
EXPORT_SYMBOL(…);
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{ … }
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_stop_hw_queues(struct request_queue *q)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_start_hw_queues(struct request_queue *q)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
{ … }
EXPORT_SYMBOL(…);
static void blk_mq_run_work_fn(struct work_struct *work)
{ … }
static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
{ … }
static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx, struct list_head *list,
bool run_queue_async)
{ … }
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
{ … }
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{ … }
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, bool last)
{ … }
static bool blk_mq_get_budget_and_tag(struct request *rq)
{ … }
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{ … }
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{ … }
static void blk_mq_plug_issue_direct(struct blk_plug *plug)
{ … }
static void __blk_mq_flush_plug_list(struct request_queue *q,
struct blk_plug *plug)
{ … }
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{ … }
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{ … }
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{ … }
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
struct bio *bio, unsigned int nr_segs)
{ … }
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
struct bio *bio,
unsigned int nsegs)
{ … }
static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
struct request_queue *q, blk_opf_t opf)
{ … }
static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{ … }
static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
{ … }
void blk_mq_submit_bio(struct bio *bio)
{ … }
#ifdef CONFIG_BLK_MQ_STACKING
blk_status_t blk_insert_cloned_request(struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_rq_unprep_clone(struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
int (*bio_ctr)(struct bio *, struct bio *, void *),
void *data)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
void blk_steal_bios(struct bio_list *list, struct request *rq)
{ … }
EXPORT_SYMBOL_GPL(…);
static size_t order_to_size(unsigned int order)
{ … }
static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
struct blk_mq_tags *tags)
{ … }
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
{ … }
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
{ … }
static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
{ … }
static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
{ … }
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
unsigned int hctx_idx,
unsigned int nr_tags,
unsigned int reserved_tags)
{ … }
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, int node)
{ … }
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth)
{ … }
struct rq_iter_data { … };
static bool blk_mq_has_request(struct request *rq, void *data)
{ … }
static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
{ … }
static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx,
unsigned int this_cpu)
{ … }
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{ … }
static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
const struct blk_mq_hw_ctx *hctx)
{ … }
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{ … }
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
{ … }
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{ … }
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
unsigned int queue_depth, struct request *flush_rq)
{ … }
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{ … }
static void blk_mq_exit_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set, int nr_queue)
{ … }
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{ … }
static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
int node)
{ … }
static void blk_mq_init_cpu_queues(struct request_queue *q,
unsigned int nr_hw_queues)
{ … }
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx,
unsigned int depth)
{ … }
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx)
{ … }
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx)
{ … }
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
{ … }
static void blk_mq_map_swqueue(struct request_queue *q)
{ … }
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{ … }
static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
bool shared)
{ … }
static void blk_mq_del_queue_tag_set(struct request_queue *q)
{ … }
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{ … }
static int blk_mq_alloc_ctxs(struct request_queue *q)
{ … }
void blk_mq_release(struct request_queue *q)
{ … }
static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
{ … }
struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_destroy_queue(struct request_queue *q)
{ … }
EXPORT_SYMBOL(…);
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata,
struct lock_class_key *lkclass)
{ … }
EXPORT_SYMBOL(…);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass)
{ … }
EXPORT_SYMBOL(…);
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)
{ … }
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{ … }
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_exit_queue(struct request_queue *q)
{ … }
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{ … }
static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
{ … }
static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{ … }
static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
int new_nr_hw_queues)
{ … }
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{ … }
EXPORT_SYMBOL(…);
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int queue_depth,
unsigned int set_flags)
{ … }
EXPORT_SYMBOL_GPL(…);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{ … }
EXPORT_SYMBOL(…);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{ … }
struct blk_mq_qe_pair { … };
static bool blk_mq_elv_switch_none(struct list_head *head,
struct request_queue *q)
{ … }
static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
struct request_queue *q)
{ … }
static void blk_mq_elv_switch_back(struct list_head *head,
struct request_queue *q)
{ … }
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{ … }
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{ … }
EXPORT_SYMBOL_GPL(…);
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
struct io_comp_batch *iob, unsigned int flags)
{ … }
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
struct io_comp_batch *iob, unsigned int flags)
{ … }
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
unsigned int poll_flags)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int blk_mq_rq_cpu(struct request *rq)
{ … }
EXPORT_SYMBOL(…);
void blk_mq_cancel_work_sync(struct request_queue *q)
{ … }
static int __init blk_mq_init(void)
{ … }
subsys_initcall(blk_mq_init);