linux/block/bfq-cgroup.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * cgroups support for the BFQ I/O scheduler.
 */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/cgroup.h>
#include <linux/ktime.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/sbitmap.h>
#include <linux/delay.h>

#include "elevator.h"
#include "bfq-iosched.h"

#ifdef CONFIG_BFQ_CGROUP_DEBUG
static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
{}

static void bfq_stat_exit(struct bfq_stat *stat)
{}

/**
 * bfq_stat_add - add a value to a bfq_stat
 * @stat: target bfq_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
 * don't re-enter this function for the same counter.
 */
static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
{}

/**
 * bfq_stat_read - read the current value of a bfq_stat
 * @stat: bfq_stat to read
 */
static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
{}

/**
 * bfq_stat_reset - reset a bfq_stat
 * @stat: bfq_stat to reset
 */
static inline void bfq_stat_reset(struct bfq_stat *stat)
{}

/**
 * bfq_stat_add_aux - add a bfq_stat into another's aux count
 * @to: the destination bfq_stat
 * @from: the source
 *
 * Add @from's count including the aux one to @to's aux count.
 */
static inline void bfq_stat_add_aux(struct bfq_stat *to,
				     struct bfq_stat *from)
{}

/**
 * blkg_prfill_stat - prfill callback for bfq_stat
 * @sf: seq_file to print to
 * @pd: policy private data of interest
 * @off: offset to the bfq_stat in @pd
 *
 * prfill callback for printing a bfq_stat.
 */
static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
		int off)
{}

/* bfqg stats flags */
enum bfqg_stats_flags {};

#define BFQG_FLAG_FNS									\

BFQG_FLAG_FNS
BFQG_FLAG_FNS
BFQG_FLAG_FNS
#undef BFQG_FLAG_FNS

/* This should be called with the scheduler lock held. */
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{}

/* This should be called with the scheduler lock held. */
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
						 struct bfq_group *curr_bfqg)
{}

/* This should be called with the scheduler lock held. */
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{}

void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
{}

void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
{}

void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
{}

void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
{}

void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
{}

void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
			      blk_opf_t opf)
{}

void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
{}

void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
{}

void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, blk_opf_t opf)
{}

#else /* CONFIG_BFQ_CGROUP_DEBUG */

void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, blk_opf_t opf) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }

#endif /* CONFIG_BFQ_CGROUP_DEBUG */

#ifdef CONFIG_BFQ_GROUP_IOSCHED

/*
 * blk-cgroup policy-related handlers
 * The following functions help in converting between blk-cgroup
 * internal structures and BFQ-specific structures.
 */

static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
{}

struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
{}

static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
{}

/*
 * bfq_group handlers
 * The following functions help in navigating the bfq_group hierarchy
 * by allowing to find the parent of a bfq_group or the bfq_group
 * associated to a bfq_queue.
 */

static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
{}

struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
{}

/*
 * The following two functions handle get and put of a bfq_group by
 * wrapping the related blk-cgroup hooks.
 */

static void bfqg_get(struct bfq_group *bfqg)
{}

static void bfqg_put(struct bfq_group *bfqg)
{}

static void bfqg_and_blkg_get(struct bfq_group *bfqg)
{}

void bfqg_and_blkg_put(struct bfq_group *bfqg)
{}

void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
{}

/* @stats = 0 */
static void bfqg_stats_reset(struct bfqg_stats *stats)
{}

/* @to += @from */
static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
{}

/*
 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
 * recursive stats can still account for the amount used by this bfqg after
 * it's gone.
 */
static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
{}

void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
{}

static void bfqg_stats_exit(struct bfqg_stats *stats)
{}

static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{}

static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
{}

static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
{}

static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
{}

static void bfq_cpd_free(struct blkcg_policy_data *cpd)
{}

static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
		struct blkcg *blkcg, gfp_t gfp)
{}

static void bfq_pd_init(struct blkg_policy_data *pd)
{}

static void bfq_pd_free(struct blkg_policy_data *pd)
{}

static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
{}

static void bfq_group_set_parent(struct bfq_group *bfqg,
					struct bfq_group *parent)
{}

static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
{}

struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{}

/**
 * bfq_bfqq_move - migrate @bfqq to @bfqg.
 * @bfqd: queue descriptor.
 * @bfqq: the queue to move.
 * @bfqg: the group to move to.
 *
 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
 * it on the new one.  Avoid putting the entity on the old group idle tree.
 *
 * Must be called under the scheduler lock, to make sure that the blkg
 * owning @bfqg does not disappear (see comments in
 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
 * objects).
 */
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
		   struct bfq_group *bfqg)
{}

static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
			       struct bfq_queue *sync_bfqq,
			       struct bfq_io_cq *bic,
			       struct bfq_group *bfqg,
			       unsigned int act_idx)
{}

/**
 * __bfq_bic_change_cgroup - move @bic to @bfqg.
 * @bfqd: the queue descriptor.
 * @bic: the bic to move.
 * @bfqg: the group to move to.
 *
 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
 * sure that the reference to cgroup is valid across the call (see
 * comments in bfq_bic_update_cgroup on this issue)
 */
static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
				    struct bfq_io_cq *bic,
				    struct bfq_group *bfqg)
{}

void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
{}

/**
 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
 * @st: the service tree being flushed.
 */
static void bfq_flush_idle_tree(struct bfq_service_tree *st)
{}

/**
 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
 * @bfqd: the device data structure with the root group.
 * @entity: the entity to move, if entity is a leaf; or the parent entity
 *	    of an active leaf entity to move, if entity is not a leaf.
 * @ioprio_class: I/O priority class to reparent.
 */
static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
				     struct bfq_entity *entity,
				     int ioprio_class)
{}

/**
 * bfq_reparent_active_queues - move to the root group all active queues.
 * @bfqd: the device data structure with the root group.
 * @bfqg: the group to move from.
 * @st: the service tree to start the search from.
 * @ioprio_class: I/O priority class to reparent.
 */
static void bfq_reparent_active_queues(struct bfq_data *bfqd,
				       struct bfq_group *bfqg,
				       struct bfq_service_tree *st,
				       int ioprio_class)
{}

/**
 * bfq_pd_offline - deactivate the entity associated with @pd,
 *		    and reparent its children entities.
 * @pd: descriptor of the policy going offline.
 *
 * blkio already grabs the queue_lock for us, so no need to use
 * RCU-based magic
 */
static void bfq_pd_offline(struct blkg_policy_data *pd)
{}

void bfq_end_wr_async(struct bfq_data *bfqd)
{}

static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_weight_device(struct seq_file *sf,
				     struct blkg_policy_data *pd, int off)
{}

static int bfq_io_show_weight(struct seq_file *sf, void *v)
{}

static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
{}

static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
				    struct cftype *cftype,
				    u64 val)
{}

static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
					char *buf, size_t nbytes,
					loff_t off)
{}

static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
				 char *buf, size_t nbytes,
				 loff_t off)
{}

static int bfqg_print_rwstat(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
					struct blkg_policy_data *pd, int off)
{}

static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
{}

#ifdef CONFIG_BFQ_CGROUP_DEBUG
static int bfqg_print_stat(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
				      struct blkg_policy_data *pd, int off)
{}

static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
			       int off)
{}

static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
					 struct blkg_policy_data *pd, int off)
{}

static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
{}

static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
				      struct blkg_policy_data *pd, int off)
{}

/* print avg_queue_size */
static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
{}
#endif /* CONFIG_BFQ_CGROUP_DEBUG */

struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{}

struct blkcg_policy blkcg_policy_bfq =;

struct cftype bfq_blkcg_legacy_files[] =;

struct cftype bfq_blkg_files[] =;

#else	/* CONFIG_BFQ_GROUP_IOSCHED */

void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
		   struct bfq_group *bfqg) {}

void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
{
	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);

	entity->weight = entity->new_weight;
	entity->orig_weight = entity->new_weight;
	if (bfqq) {
		bfqq->ioprio = bfqq->new_ioprio;
		bfqq->ioprio_class = bfqq->new_ioprio_class;
	}
	entity->sched_data = &bfqg->sched_data;
}

void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}

void bfq_end_wr_async(struct bfq_data *bfqd)
{
	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
}

struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{
	return bfqd->root_group;
}

struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
{
	return bfqq->bfqd->root_group;
}

void bfqg_and_blkg_put(struct bfq_group *bfqg) {}

struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
	struct bfq_group *bfqg;
	int i;

	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
	if (!bfqg)
		return NULL;

	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;

	return bfqg;
}
#endif	/* CONFIG_BFQ_GROUP_IOSCHED */