linux/drivers/md/bcache/writeback.c

// SPDX-License-Identifier: GPL-2.0
/*
 * background writeback - scan btree for dirty data and write it to the backing
 * device
 *
 * Copyright 2010, 2011 Kent Overstreet <[email protected]>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "writeback.h"

#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/sched/clock.h>
#include <trace/events/bcache.h>

static void update_gc_after_writeback(struct cache_set *c)
{}

/* Rate limiting */
static uint64_t __calc_target_rate(struct cached_dev *dc)
{}

static void __update_writeback_rate(struct cached_dev *dc)
{}

static bool idle_counter_exceeded(struct cache_set *c)
{}

/*
 * Idle_counter is increased every time when update_writeback_rate() is
 * called. If all backing devices attached to the same cache set have
 * identical dc->writeback_rate_update_seconds values, it is about 6
 * rounds of update_writeback_rate() on each backing device before
 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
 * to each dc->writeback_rate.rate.
 * In order to avoid extra locking cost for counting exact dirty cached
 * devices number, c->attached_dev_nr is used to calculate the idle
 * throushold. It might be bigger if not all cached device are in write-
 * back mode, but it still works well with limited extra rounds of
 * update_writeback_rate().
 */
static bool set_at_max_writeback_rate(struct cache_set *c,
				       struct cached_dev *dc)
{}

static void update_writeback_rate(struct work_struct *work)
{}

static unsigned int writeback_delay(struct cached_dev *dc,
				    unsigned int sectors)
{}

struct dirty_io {};

static void dirty_init(struct keybuf_key *w)
{}

static CLOSURE_CALLBACK(dirty_io_destructor)
{}

static CLOSURE_CALLBACK(write_dirty_finish)
{}

static void dirty_endio(struct bio *bio)
{}

static CLOSURE_CALLBACK(write_dirty)
{}

static void read_dirty_endio(struct bio *bio)
{}

static CLOSURE_CALLBACK(read_dirty_submit)
{}

static void read_dirty(struct cached_dev *dc)
{}

/* Scan for dirty data */

void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
				  uint64_t offset, int nr_sectors)
{}

static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{}

static void refill_full_stripes(struct cached_dev *dc)
{}

/*
 * Returns true if we scanned the entire disk
 */
static bool refill_dirty(struct cached_dev *dc)
{}

static int bch_writeback_thread(void *arg)
{}

/* Init */
#define INIT_KEYS_EACH_TIME

struct sectors_dirty_init {};

static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
				 struct bkey *k)
{}

static int bch_root_node_dirty_init(struct cache_set *c,
				     struct bcache_device *d,
				     struct bkey *k)
{}

static int bch_dirty_init_thread(void *arg)
{}

static int bch_btre_dirty_init_thread_nr(void)
{}

void bch_sectors_dirty_init(struct bcache_device *d)
{}

void bch_cached_dev_writeback_init(struct cached_dev *dc)
{}

int bch_cached_dev_writeback_start(struct cached_dev *dc)
{}