linux/mm/backing-dev.c

// SPDX-License-Identifier: GPL-2.0-only

#include <linux/blkdev.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
#include <linux/kthread.h>
#include <linux/backing-dev.h>
#include <linux/blk-cgroup.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/device.h>
#include <trace/events/writeback.h>
#include "internal.h"

struct backing_dev_info noop_backing_dev_info;
EXPORT_SYMBOL_GPL();

static const char *bdi_unknown_name =;

/*
 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
 * reader side locking.
 */
DEFINE_SPINLOCK();
static u64 bdi_id_cursor;
static struct rb_root bdi_tree =;
LIST_HEAD();

/* bdi_wq serves all asynchronous writeback tasks */
struct workqueue_struct *bdi_wq;

#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>

struct wb_stats {};

static struct dentry *bdi_debug_root;

static void bdi_debug_init(void)
{}

static void collect_wb_stats(struct wb_stats *stats,
			     struct bdi_writeback *wb)
{}

#ifdef CONFIG_CGROUP_WRITEBACK
static void bdi_collect_stats(struct backing_dev_info *bdi,
			      struct wb_stats *stats)
{}
#else
static void bdi_collect_stats(struct backing_dev_info *bdi,
			      struct wb_stats *stats)
{
	collect_wb_stats(stats, &bdi->wb);
}
#endif

static int bdi_debug_stats_show(struct seq_file *m, void *v)
{}
DEFINE_SHOW_ATTRIBUTE();

static void wb_stats_show(struct seq_file *m, struct bdi_writeback *wb,
			  struct wb_stats *stats)
{}

static int cgwb_debug_stats_show(struct seq_file *m, void *v)
{}
DEFINE_SHOW_ATTRIBUTE();

static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
{}

static void bdi_debug_unregister(struct backing_dev_info *bdi)
{}
#else /* CONFIG_DEBUG_FS */
static inline void bdi_debug_init(void)
{
}
static inline void bdi_debug_register(struct backing_dev_info *bdi,
				      const char *name)
{
}
static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
{
}
#endif /* CONFIG_DEBUG_FS */

static ssize_t read_ahead_kb_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{}

#define BDI_SHOW(name, expr)

BDI_SHOW()

static ssize_t min_ratio_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
BDI_SHOW()

static ssize_t min_ratio_fine_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
BDI_SHOW()

static ssize_t max_ratio_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
BDI_SHOW()

static ssize_t max_ratio_fine_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
BDI_SHOW()

static ssize_t min_bytes_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{}

static ssize_t min_bytes_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
static DEVICE_ATTR_RW(min_bytes);

static ssize_t max_bytes_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{}

static ssize_t max_bytes_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
static DEVICE_ATTR_RW(max_bytes);

static ssize_t stable_pages_required_show(struct device *dev,
					  struct device_attribute *attr,
					  char *buf)
{}
static DEVICE_ATTR_RO(stable_pages_required);

static ssize_t strict_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}

static ssize_t strict_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RW(strict_limit);

static struct attribute *bdi_dev_attrs[] =;
ATTRIBUTE_GROUPS();

static const struct class bdi_class =;

static __init int bdi_class_init(void)
{}
postcore_initcall(bdi_class_init);

static int __init default_bdi_init(void)
{}
subsys_initcall(default_bdi_init);

static void wb_update_bandwidth_workfn(struct work_struct *work)
{}

/*
 * Initial write bandwidth: 100 MB/s
 */
#define INIT_BW

static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
		   gfp_t gfp)
{}

static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);

/*
 * Remove bdi from the global list and shutdown any threads we have running
 */
static void wb_shutdown(struct bdi_writeback *wb)
{}

static void wb_exit(struct bdi_writeback *wb)
{}

#ifdef CONFIG_CGROUP_WRITEBACK

#include <linux/memcontrol.h>

/*
 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
 * memcg->cgwb_list.  bdi->cgwb_tree is also RCU protected.
 */
static DEFINE_SPINLOCK(cgwb_lock);
static struct workqueue_struct *cgwb_release_wq;

static LIST_HEAD(offline_cgwbs);
static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);

static void cgwb_free_rcu(struct rcu_head *rcu_head)
{}

static void cgwb_release_workfn(struct work_struct *work)
{}

static void cgwb_release(struct percpu_ref *refcnt)
{}

static void cgwb_kill(struct bdi_writeback *wb)
{}

static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
{}

static int cgwb_create(struct backing_dev_info *bdi,
		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
{}

/**
 * wb_get_lookup - get wb for a given memcg
 * @bdi: target bdi
 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 *
 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 * refcount incremented.
 *
 * This function uses css_get() on @memcg_css and thus expects its refcnt
 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 * @memcg_css isn't enough.  try_get it before calling this function.
 *
 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 * memcg on the default hierarchy, memcg association is guaranteed to be
 * more specific (equal or descendant to the associated blkcg) and thus can
 * identify both the memcg and blkcg associations.
 *
 * Because the blkcg associated with a memcg may change as blkcg is enabled
 * and disabled closer to root in the hierarchy, each wb keeps track of
 * both the memcg and blkcg associated with it and verifies the blkcg on
 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 * created.
 */
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
				    struct cgroup_subsys_state *memcg_css)
{}

/**
 * wb_get_create - get wb for a given memcg, create if necessary
 * @bdi: target bdi
 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 * @gfp: allocation mask to use
 *
 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 * create one.  See wb_get_lookup() for more details.
 */
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
				    struct cgroup_subsys_state *memcg_css,
				    gfp_t gfp)
{}

static int cgwb_bdi_init(struct backing_dev_info *bdi)
{}

static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
{}

/*
 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
 *
 * Try to release dying cgwbs by switching attached inodes to the nearest
 * living ancestor's writeback. Processed wbs are placed at the end
 * of the list to guarantee the forward progress.
 */
static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
{}

/**
 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 * @memcg: memcg being offlined
 *
 * Also prevents creation of any new wb's associated with @memcg.
 */
void wb_memcg_offline(struct mem_cgroup *memcg)
{}

/**
 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 * @css: blkcg being offlined
 *
 * Also prevents creation of any new wb's associated with @blkcg.
 */
void wb_blkcg_offline(struct cgroup_subsys_state *css)
{}

static void cgwb_bdi_register(struct backing_dev_info *bdi)
{}

static int __init cgwb_init(void)
{}
subsys_initcall(cgwb_init);

#else	/* CONFIG_CGROUP_WRITEBACK */

static int cgwb_bdi_init(struct backing_dev_info *bdi)
{
	return wb_init(&bdi->wb, bdi, GFP_KERNEL);
}

static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }

static void cgwb_bdi_register(struct backing_dev_info *bdi)
{
	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
}

static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
{
	list_del_rcu(&wb->bdi_node);
}

#endif	/* CONFIG_CGROUP_WRITEBACK */

int bdi_init(struct backing_dev_info *bdi)
{}

struct backing_dev_info *bdi_alloc(int node_id)
{}
EXPORT_SYMBOL();

static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
{}

/**
 * bdi_get_by_id - lookup and get bdi from its id
 * @id: bdi id to lookup
 *
 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 * doesn't exist or is already unregistered.
 */
struct backing_dev_info *bdi_get_by_id(u64 id)
{}

int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
{}

int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
{}
EXPORT_SYMBOL();

void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
{}

/*
 * Remove bdi from bdi_list, and ensure that it is no longer visible
 */
static void bdi_remove_from_list(struct backing_dev_info *bdi)
{}

void bdi_unregister(struct backing_dev_info *bdi)
{}
EXPORT_SYMBOL();

static void release_bdi(struct kref *ref)
{}

void bdi_put(struct backing_dev_info *bdi)
{}
EXPORT_SYMBOL();

struct backing_dev_info *inode_to_bdi(struct inode *inode)
{}
EXPORT_SYMBOL();

const char *bdi_dev_name(struct backing_dev_info *bdi)
{}
EXPORT_SYMBOL_GPL();