#ifndef _BCACHE_WRITEBACK_H
#define _BCACHE_WRITEBACK_H
#define CUTOFF_WRITEBACK …
#define CUTOFF_WRITEBACK_SYNC …
#define CUTOFF_WRITEBACK_MAX …
#define CUTOFF_WRITEBACK_SYNC_MAX …
#define MAX_WRITEBACKS_IN_PASS …
#define MAX_WRITESIZE_IN_PASS …
#define WRITEBACK_RATE_UPDATE_SECS_MAX …
#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT …
#define BCH_AUTO_GC_DIRTY_THRESHOLD …
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW …
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID …
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH …
#define BCH_DIRTY_INIT_THRD_MAX …
#define WRITEBACK_SHARE_SHIFT …
struct bch_dirty_init_state;
struct dirty_init_thrd_info { … };
struct bch_dirty_init_state { … };
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{ … }
static inline int offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{ … }
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned int nr_sectors)
{ … }
extern unsigned int bch_cutoff_writeback;
extern unsigned int bch_cutoff_writeback_sync;
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned int cache_mode, bool would_skip)
{ … }
static inline void bch_writeback_queue(struct cached_dev *dc)
{ … }
static inline void bch_writeback_add(struct cached_dev *dc)
{ … }
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors);
void bch_sectors_dirty_init(struct bcache_device *d);
void bch_cached_dev_writeback_init(struct cached_dev *dc);
int bch_cached_dev_writeback_start(struct cached_dev *dc);
#endif