linux/block/kyber-iosched.c

// SPDX-License-Identifier: GPL-2.0
/*
 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
 * scalable techniques.
 *
 * Copyright (C) 2017 Facebook
 */

#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/sbitmap.h>

#include <trace/events/block.h>

#include "elevator.h"
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"

#define CREATE_TRACE_POINTS
#include <trace/events/kyber.h>

/*
 * Scheduling domains: the device is divided into multiple domains based on the
 * request type.
 */
enum {};

static const char *kyber_domain_names[] =;

enum {};

/*
 * Maximum device-wide depth for each scheduling domain.
 *
 * Even for fast devices with lots of tags like NVMe, you can saturate the
 * device with only a fraction of the maximum possible queue depth. So, we cap
 * these to a reasonable value.
 */
static const unsigned int kyber_depth[] =;

/*
 * Default latency targets for each scheduling domain.
 */
static const u64 kyber_latency_targets[] =;

/*
 * Batch size (number of requests we'll dispatch in a row) for each scheduling
 * domain.
 */
static const unsigned int kyber_batch_size[] =;

/*
 * Requests latencies are recorded in a histogram with buckets defined relative
 * to the target latency:
 *
 * <= 1/4 * target latency
 * <= 1/2 * target latency
 * <= 3/4 * target latency
 * <= target latency
 * <= 1 1/4 * target latency
 * <= 1 1/2 * target latency
 * <= 1 3/4 * target latency
 * > 1 3/4 * target latency
 */
enum {};

/*
 * We measure both the total latency and the I/O latency (i.e., latency after
 * submitting to the device).
 */
enum {};

static const char *kyber_latency_type_names[] =;

/*
 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
 * domain except for KYBER_OTHER.
 */
struct kyber_cpu_latency {};

/*
 * There is a same mapping between ctx & hctx and kcq & khd,
 * we use request->mq_ctx->index_hw to index the kcq in khd.
 */
struct kyber_ctx_queue {} ____cacheline_aligned_in_smp;

struct kyber_queue_data {};

struct kyber_hctx_data {};

static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
			     void *key);

static unsigned int kyber_sched_domain(blk_opf_t opf)
{}

static void flush_latency_buckets(struct kyber_queue_data *kqd,
				  struct kyber_cpu_latency *cpu_latency,
				  unsigned int sched_domain, unsigned int type)
{}

/*
 * Calculate the histogram bucket with the given percentile rank, or -1 if there
 * aren't enough samples yet.
 */
static int calculate_percentile(struct kyber_queue_data *kqd,
				unsigned int sched_domain, unsigned int type,
				unsigned int percentile)
{}

static void kyber_resize_domain(struct kyber_queue_data *kqd,
				unsigned int sched_domain, unsigned int depth)
{}

static void kyber_timer_fn(struct timer_list *t)
{}

static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{}

static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
{}

static void kyber_exit_sched(struct elevator_queue *e)
{}

static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
{}

static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
{}

static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{}

static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{}

static int rq_get_domain_token(struct request *rq)
{}

static void rq_set_domain_token(struct request *rq, int token)
{}

static void rq_clear_domain_token(struct kyber_queue_data *kqd,
				  struct request *rq)
{}

static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{}

static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs)
{}

static void kyber_prepare_request(struct request *rq)
{}

static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
				  struct list_head *rq_list,
				  blk_insert_t flags)
{}

static void kyber_finish_request(struct request *rq)
{}

static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
			       unsigned int sched_domain, unsigned int type,
			       u64 target, u64 latency)
{}

static void kyber_completed_request(struct request *rq, u64 now)
{}

struct flush_kcq_data {};

static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
{}

static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
				  unsigned int sched_domain,
				  struct list_head *list)
{}

static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
			     void *key)
{}

static int kyber_get_domain_token(struct kyber_queue_data *kqd,
				  struct kyber_hctx_data *khd,
				  struct blk_mq_hw_ctx *hctx)
{}

static struct request *
kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
			  struct kyber_hctx_data *khd,
			  struct blk_mq_hw_ctx *hctx)
{}

static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
{}

static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
{}

#define KYBER_LAT_SHOW_STORE
KYBER_LAT_SHOW_STORE(KYBER_READ, read);
KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
#undef KYBER_LAT_SHOW_STORE

#define KYBER_LAT_ATTR
static struct elv_fs_entry kyber_sched_attrs[] =;
#undef KYBER_LAT_ATTR

#ifdef CONFIG_BLK_DEBUG_FS
#define KYBER_DEBUGFS_DOMAIN_ATTRS
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS

static int kyber_async_depth_show(void *data, struct seq_file *m)
{}

static int kyber_cur_domain_show(void *data, struct seq_file *m)
{}

static int kyber_batching_show(void *data, struct seq_file *m)
{}

#define KYBER_QUEUE_DOMAIN_ATTRS
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] =;
#undef KYBER_QUEUE_DOMAIN_ATTRS

#define KYBER_HCTX_DOMAIN_ATTRS
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] =;
#undef KYBER_HCTX_DOMAIN_ATTRS
#endif

static struct elevator_type kyber_sched =;

static int __init kyber_init(void)
{}

static void __exit kyber_exit(void)
{}

module_init();
module_exit(kyber_exit);

MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DESCRIPTION();