#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/cgroup.h>
#include <linux/ktime.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/sbitmap.h>
#include <linux/delay.h>
#include "elevator.h"
#include "bfq-iosched.h"
#ifdef CONFIG_BFQ_CGROUP_DEBUG
static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
{ … }
static void bfq_stat_exit(struct bfq_stat *stat)
{ … }
static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
{ … }
static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
{ … }
static inline void bfq_stat_reset(struct bfq_stat *stat)
{ … }
static inline void bfq_stat_add_aux(struct bfq_stat *to,
struct bfq_stat *from)
{ … }
static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{ … }
enum bfqg_stats_flags { … };
#define BFQG_FLAG_FNS … \
BFQG_FLAG_FNS
BFQG_FLAG_FNS
BFQG_FLAG_FNS
#undef BFQG_FLAG_FNS
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{ … }
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
struct bfq_group *curr_bfqg)
{ … }
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{ … }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
{ … }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
{ … }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
{ … }
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
{ … }
void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
{ … }
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
blk_opf_t opf)
{ … }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
{ … }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
{ … }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, blk_opf_t opf)
{ … }
#else
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, blk_opf_t opf) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
#endif
#ifdef CONFIG_BFQ_GROUP_IOSCHED
static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
{ … }
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
{ … }
static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
{ … }
static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
{ … }
struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
{ … }
static void bfqg_get(struct bfq_group *bfqg)
{ … }
static void bfqg_put(struct bfq_group *bfqg)
{ … }
static void bfqg_and_blkg_get(struct bfq_group *bfqg)
{ … }
void bfqg_and_blkg_put(struct bfq_group *bfqg)
{ … }
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
{ … }
static void bfqg_stats_reset(struct bfqg_stats *stats)
{ … }
static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
{ … }
static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
{ … }
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
{ … }
static void bfqg_stats_exit(struct bfqg_stats *stats)
{ … }
static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{ … }
static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
{ … }
static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
{ … }
static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
{ … }
static void bfq_cpd_free(struct blkcg_policy_data *cpd)
{ … }
static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
struct blkcg *blkcg, gfp_t gfp)
{ … }
static void bfq_pd_init(struct blkg_policy_data *pd)
{ … }
static void bfq_pd_free(struct blkg_policy_data *pd)
{ … }
static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
{ … }
static void bfq_group_set_parent(struct bfq_group *bfqg,
struct bfq_group *parent)
{ … }
static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
{ … }
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{ … }
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_group *bfqg)
{ … }
static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
struct bfq_queue *sync_bfqq,
struct bfq_io_cq *bic,
struct bfq_group *bfqg,
unsigned int act_idx)
{ … }
static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct bfq_group *bfqg)
{ … }
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
{ … }
static void bfq_flush_idle_tree(struct bfq_service_tree *st)
{ … }
static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
struct bfq_entity *entity,
int ioprio_class)
{ … }
static void bfq_reparent_active_queues(struct bfq_data *bfqd,
struct bfq_group *bfqg,
struct bfq_service_tree *st,
int ioprio_class)
{ … }
static void bfq_pd_offline(struct blkg_policy_data *pd)
{ … }
void bfq_end_wr_async(struct bfq_data *bfqd)
{ … }
static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_weight_device(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{ … }
static int bfq_io_show_weight(struct seq_file *sf, void *v)
{ … }
static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
{ … }
static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
struct cftype *cftype,
u64 val)
{ … }
static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{ … }
static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{ … }
static int bfqg_print_rwstat(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{ … }
static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
{ … }
#ifdef CONFIG_BFQ_CGROUP_DEBUG
static int bfqg_print_stat(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{ … }
static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{ … }
static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{ … }
static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
{ … }
static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{ … }
static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
{ … }
#endif
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{ … }
struct blkcg_policy blkcg_policy_bfq = …;
struct cftype bfq_blkcg_legacy_files[] = …;
struct cftype bfq_blkg_files[] = …;
#else
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_group *bfqg) {}
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
entity->weight = entity->new_weight;
entity->orig_weight = entity->new_weight;
if (bfqq) {
bfqq->ioprio = bfqq->new_ioprio;
bfqq->ioprio_class = bfqq->new_ioprio_class;
}
entity->sched_data = &bfqg->sched_data;
}
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
void bfq_end_wr_async(struct bfq_data *bfqd)
{
bfq_end_wr_async_queues(bfqd, bfqd->root_group);
}
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{
return bfqd->root_group;
}
struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
{
return bfqq->bfqd->root_group;
}
void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
struct bfq_group *bfqg;
int i;
bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
if (!bfqg)
return NULL;
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
return bfqg;
}
#endif