linux/block/blk-mq.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H

#include <linux/blk-mq.h>
#include "blk-stat.h"

struct blk_mq_tag_set;

struct blk_mq_ctxs {};

/**
 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 */
struct blk_mq_ctx {} ____cacheline_aligned_in_smp;

enum {};

#define BLK_MQ_CPU_WORK_BATCH

blk_insert_t;
#define BLK_MQ_INSERT_AT_HEAD

void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags);
void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
			     unsigned int);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq);

/*
 * Internal helpers for allocating/freeing the request map
 */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags);
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
				unsigned int hctx_idx, unsigned int depth);
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx);

/*
 * CPU -> queue mappings
 */
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);

/*
 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 * @q: request queue
 * @type: the hctx type index
 * @cpu: CPU
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
							  enum hctx_type type,
							  unsigned int cpu)
{}

static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{}

/*
 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 * @q: request queue
 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
 * @ctx: software queue cpu ctx
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
						     blk_opf_t opf,
						     struct blk_mq_ctx *ctx)
{}

/*
 * sysfs helpers
 */
extern void blk_mq_sysfs_init(struct request_queue *q);
extern void blk_mq_sysfs_deinit(struct request_queue *q);
int blk_mq_sysfs_register(struct gendisk *disk);
void blk_mq_sysfs_unregister(struct gendisk *disk);
int blk_mq_sysfs_register_hctxs(struct request_queue *q);
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);

void blk_mq_cancel_work_sync(struct request_queue *q);

void blk_mq_release(struct request_queue *q);

static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
					   unsigned int cpu)
{}

/*
 * This assumes per-cpu software queueing queues. They could be per-node
 * as well, for instance. For now this is hardcoded as-is. Note that we don't
 * care about preemption, since we know the ctx's are persistent. This does
 * mean that we can't rely on ctx always matching the currently running CPU.
 */
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{}

struct blk_mq_alloc_data {};

struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
		unsigned int reserved_tags, int node, int alloc_policy);
void blk_mq_free_tags(struct blk_mq_tags *tags);
int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
		struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
		unsigned int reserved, int node, int alloc_policy);

unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
		unsigned int *offset);
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
		unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
		struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
		unsigned int size);
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);

void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
		void *priv);
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
		void *priv);

static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
						 struct blk_mq_hw_ctx *hctx)
{}

void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);

static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{}

static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{}

static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
					  unsigned int tag)
{}

static inline bool blk_mq_is_shared_tags(unsigned int flags)
{}

static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{}

static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{}

static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
{}

unsigned int blk_mq_in_flight(struct request_queue *q,
		struct block_device *part);
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
		unsigned int inflight[2]);

static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
					      int budget_token)
{}

static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
{}

static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
{}

static inline int blk_mq_get_rq_budget_token(struct request *rq)
{}

static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
						int val)
{}

static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{}

static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
		int val)
{}

static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{}

static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
					      int val)
{}

static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{}

static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
					      int val)
{}

static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{}

static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
{}
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
					   struct request *rq)
{}

static inline void blk_mq_put_driver_tag(struct request *rq)
{}

bool __blk_mq_alloc_driver_tag(struct request *rq);

static inline bool blk_mq_get_driver_tag(struct request *rq)
{}

static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{}

/* Free all requests on the list */
static inline void blk_mq_free_requests(struct list_head *list)
{}

/*
 * For shared tag users, we track the number of currently active users
 * and attempt to provide a fair share of the tag depth for each of them.
 */
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
				  struct sbitmap_queue *bt)
{}

/* run the code block in @dispatch_ops with rcu/srcu read lock held */
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)

#define blk_mq_run_dispatch_ops(q, dispatch_ops)	\

#endif