linux/block/blk-wbt.c

// SPDX-License-Identifier: GPL-2.0
/*
 * buffered writeback throttling. loosely based on CoDel. We can't drop
 * packets for IO scheduling, so the logic is something like this:
 *
 * - Monitor latencies in a defined window of time.
 * - If the minimum latency in the above window exceeds some target, increment
 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 * - For any window where we don't have solid data on what the latencies
 *   look like, retain status quo.
 * - If latencies look good, decrement scaling step.
 * - If we're only doing writes, allow the scaling step to go negative. This
 *   will temporarily boost write performance, snapping back to a stable
 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 *   positive scaling steps where we shrink the monitoring window, a negative
 *   scaling step retains the default step==0 window size.
 *
 * Copyright (C) 2016 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/swap.h>

#include "blk-stat.h"
#include "blk-wbt.h"
#include "blk-rq-qos.h"
#include "elevator.h"
#include "blk.h"

#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>

enum wbt_flags {};

enum {};

/*
 * If current state is WBT_STATE_ON/OFF_DEFAULT, it can be covered to any other
 * state, if current state is WBT_STATE_ON/OFF_MANUAL, it can only be covered
 * to WBT_STATE_OFF/ON_MANUAL.
 */
enum {};

struct rq_wb {};

static inline struct rq_wb *RQWB(struct rq_qos *rqos)
{}

static inline void wbt_clear_state(struct request *rq)
{}

static inline enum wbt_flags wbt_flags(struct request *rq)
{}

static inline bool wbt_is_tracked(struct request *rq)
{}

static inline bool wbt_is_read(struct request *rq)
{}

enum {};

static inline bool rwb_enabled(struct rq_wb *rwb)
{}

static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{}

/*
 * If a task was rate throttled in balance_dirty_pages() within the last
 * second or so, use that to indicate a higher cleaning rate.
 */
static bool wb_recent_wait(struct rq_wb *rwb)
{}

static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
					  enum wbt_flags wb_acct)
{}

static void rwb_wake_all(struct rq_wb *rwb)
{}

static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
			 enum wbt_flags wb_acct)
{}

static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{}

/*
 * Called on completion of a request. Note that it's also called when
 * a request is merged, when the request gets freed.
 */
static void wbt_done(struct rq_qos *rqos, struct request *rq)
{}

static inline bool stat_sample_valid(struct blk_rq_stat *stat)
{}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{}

static inline unsigned int wbt_inflight(struct rq_wb *rwb)
{}

enum {};

static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{}

static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{}

static void calc_wb_limits(struct rq_wb *rwb)
{}

static void scale_up(struct rq_wb *rwb)
{}

static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{}

static void rwb_arm_timer(struct rq_wb *rwb)
{}

static void wb_timer_fn(struct blk_stat_callback *cb)
{}

static void wbt_update_limits(struct rq_wb *rwb)
{}

bool wbt_disabled(struct request_queue *q)
{}

u64 wbt_get_min_lat(struct request_queue *q)
{}

void wbt_set_min_lat(struct request_queue *q, u64 val)
{}


static bool close_io(struct rq_wb *rwb)
{}

#define REQ_HIPRIO

static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{}

struct wbt_wait_data {};

static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{}

static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
{}

/*
 * Block if we will exceed our limit, or if we are currently waiting for
 * the timer to kick off queuing again.
 */
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
		       blk_opf_t opf)
{}

static inline bool wbt_should_throttle(struct bio *bio)
{}

static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{}

static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
{}

/*
 * May sleep, if we have exceeded the writeback limits. Caller can pass
 * in an irq held spinlock, if it holds one when calling this function.
 * If we do sleep, we'll release and re-grab it.
 */
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{}

static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{}

static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{}

static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{}

/*
 * Enable wbt if defaults are configured that way
 */
void wbt_enable_default(struct gendisk *disk)
{}
EXPORT_SYMBOL_GPL();

u64 wbt_default_latency_nsec(struct request_queue *q)
{}

static int wbt_data_dir(const struct request *rq)
{}

static void wbt_queue_depth_changed(struct rq_qos *rqos)
{}

static void wbt_exit(struct rq_qos *rqos)
{}

/*
 * Disable wbt, if enabled by default.
 */
void wbt_disable_default(struct gendisk *disk)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_BLK_DEBUG_FS
static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
{}

static int wbt_enabled_show(void *data, struct seq_file *m)
{}

static int wbt_id_show(void *data, struct seq_file *m)
{}

static int wbt_inflight_show(void *data, struct seq_file *m)
{}

static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
{}

static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
{}

static int wbt_normal_show(void *data, struct seq_file *m)
{}

static int wbt_background_show(void *data, struct seq_file *m)
{}

static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] =;
#endif

static const struct rq_qos_ops wbt_rqos_ops =;

int wbt_init(struct gendisk *disk)
{}