#ifndef BLK_MQ_H
#define BLK_MQ_H
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
#include <linux/lockdep.h>
#include <linux/scatterlist.h>
#include <linux/prefetch.h>
#include <linux/srcu.h>
#include <linux/rw_hint.h>
struct blk_mq_tags;
struct blk_flush_queue;
#define BLKDEV_MIN_RQ …
#define BLKDEV_DEFAULT_RQ …
enum rq_end_io_ret { … };
rq_end_io_fn;
req_flags_t;
enum { … };
#define RQF_STARTED …
#define RQF_FLUSH_SEQ …
#define RQF_MIXED_MERGE …
#define RQF_DONTPREP …
#define RQF_SCHED_TAGS …
#define RQF_USE_SCHED …
#define RQF_FAILED …
#define RQF_QUIET …
#define RQF_IO_STAT …
#define RQF_PM …
#define RQF_HASHED …
#define RQF_STATS …
#define RQF_SPECIAL_PAYLOAD …
#define RQF_ZONE_WRITE_PLUGGING …
#define RQF_TIMED_OUT …
#define RQF_RESV …
#define RQF_NOMERGE_FLAGS …
enum mq_rq_state { … };
struct request { … };
static inline enum req_op req_op(const struct request *req)
{ … }
static inline bool blk_rq_is_passthrough(struct request *rq)
{ … }
static inline unsigned short req_get_ioprio(struct request *req)
{ … }
#define rq_data_dir(rq) …
#define rq_dma_dir(rq) …
#define rq_list_add(listptr, rq) …
#define rq_list_add_tail(lastpptr, rq) …
#define rq_list_pop(listptr) …
#define rq_list_peek(listptr) …
#define rq_list_for_each(listptr, pos) …
#define rq_list_for_each_safe(listptr, pos, nxt) …
#define rq_list_next(rq) …
#define rq_list_empty(list) …
static inline void rq_list_move(struct request **src, struct request **dst,
struct request *rq, struct request *prev)
{ … }
enum blk_eh_timer_return { … };
enum { … };
struct blk_mq_hw_ctx { … };
struct blk_mq_queue_map { … };
enum hctx_type { … };
struct blk_mq_tag_set { … };
struct blk_mq_queue_data { … };
busy_tag_iter_fn;
struct blk_mq_ops { … };
enum { … };
#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) …
#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) …
#define BLK_MQ_MAX_DEPTH …
#define BLK_MQ_NO_HCTX_IDX …
enum { … };
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata,
struct lock_class_key *lkclass);
#define blk_mq_alloc_disk(set, lim, queuedata) …
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass);
struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata);
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int queue_depth,
unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_request(struct request *rq);
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
unsigned int poll_flags);
bool blk_mq_queue_inflight(struct request_queue *q);
enum { … };
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
blk_opf_t opf, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
struct blk_mq_tags { … };
static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
unsigned int tag)
{ … }
enum { … };
u32 blk_mq_unique_tag(struct request *rq);
static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
{ … }
static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
{ … }
static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
{ … }
static inline int blk_mq_request_started(struct request *rq)
{ … }
static inline int blk_mq_request_completed(struct request *rq)
{ … }
static inline void blk_mq_set_request_complete(struct request *rq)
{ … }
static inline void blk_mq_complete_request_direct(struct request *rq,
void (*complete)(struct request *rq))
{ … }
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_end_request_batch(struct io_comp_batch *ib);
static inline bool blk_mq_need_time_stamp(struct request *rq)
{ … }
static inline bool blk_mq_is_reserved_rq(struct request *rq)
{ … }
static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{ … }
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
bool __blk_should_fake_timeout(struct request_queue *q);
static inline bool blk_should_fake_timeout(struct request_queue *q)
{ … }
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{ … }
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{ … }
#define queue_for_each_hw_ctx(q, hctx, i) …
#define hctx_for_each_ctx(hctx, ctx, i) …
static inline void blk_mq_cleanup_rq(struct request *rq)
{ … }
static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{ … }
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
struct lock_class_key *key);
static inline bool rq_is_sync(struct request *rq)
{ … }
void blk_rq_init(struct request_queue *q, struct request *rq);
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
void blk_rq_unprep_clone(struct request *rq);
blk_status_t blk_insert_cloned_request(struct request *rq);
struct rq_map_data { … };
int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, gfp_t);
int blk_rq_map_user_io(struct request *, struct rq_map_data *,
void __user *, unsigned long, gfp_t, bool, int, bool, int);
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
unsigned int, gfp_t);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
bool blk_rq_is_poll(struct request *rq);
struct req_iterator { … };
#define __rq_for_each_bio(_bio, rq) …
#define rq_for_each_segment(bvl, _rq, _iter) …
#define rq_for_each_bvec(bvl, _rq, _iter) …
#define rq_iter_last(bvec, _iter) …
static inline sector_t blk_rq_pos(const struct request *rq)
{ … }
static inline unsigned int blk_rq_bytes(const struct request *rq)
{ … }
static inline int blk_rq_cur_bytes(const struct request *rq)
{ … }
static inline unsigned int blk_rq_sectors(const struct request *rq)
{ … }
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{ … }
static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
{ … }
static inline unsigned int blk_rq_payload_bytes(struct request *rq)
{ … }
static inline struct bio_vec req_bvec(struct request *rq)
{ … }
static inline unsigned int blk_rq_count_bios(struct request *rq)
{ … }
void blk_steal_bios(struct bio_list *list, struct request *rq);
bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
void blk_abort_request(struct request *);
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
{ … }
static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
{ … }
int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist, struct scatterlist **last_sg);
static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{ … }
void blk_dump_rq_flags(struct request *, char *);
#endif