linux/drivers/md/bcache/super.c

// SPDX-License-Identifier: GPL-2.0
/*
 * bcache setup/teardown code, and some metadata io - read a superblock and
 * figure out what to do with it.
 *
 * Copyright 2010, 2011 Kent Overstreet <[email protected]>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "extents.h"
#include "request.h"
#include "writeback.h"
#include "features.h"

#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/sysfs.h>

unsigned int bch_cutoff_writeback;
unsigned int bch_cutoff_writeback_sync;

static const char bcache_magic[] =;

static const char invalid_uuid[] =;

static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
bool bcache_is_reboot;
LIST_HEAD();
static LIST_HEAD(uncached_devices);

static int bcache_major;
static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
struct workqueue_struct *bch_flush_wq;
struct workqueue_struct *bch_journal_wq;


#define BTREE_MAX_PAGES
/* limitation of partitions number on single bcache device */
#define BCACHE_MINORS
/* limitation of bcache devices number on single system */
#define BCACHE_DEVICE_IDX_MAX

/* Superblock */

static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
{}

static const char *read_super_common(struct cache_sb *sb,  struct block_device *bdev,
				     struct cache_sb_disk *s)
{}


static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
			      struct cache_sb_disk **res)
{}

static void write_bdev_super_endio(struct bio *bio)
{}

static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
		struct bio *bio)
{}

static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
{}

void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
{}

static void write_super_endio(struct bio *bio)
{}

static CLOSURE_CALLBACK(bcache_write_super_unlock)
{}

void bcache_write_super(struct cache_set *c)
{}

/* UUID io */

static void uuid_endio(struct bio *bio)
{}

static CLOSURE_CALLBACK(uuid_io_unlock)
{}

static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
		    struct closure *parent)
{}

static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{}

static int __uuid_write(struct cache_set *c)
{}

int bch_uuid_write(struct cache_set *c)
{}

static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
{}

static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{}

/*
 * Bucket priorities/gens:
 *
 * For each bucket, we store on disk its
 *   8 bit gen
 *  16 bit priority
 *
 * See alloc.c for an explanation of the gen. The priority is used to implement
 * lru (and in the future other) cache replacement policies; for most purposes
 * it's just an opaque integer.
 *
 * The gens and the priorities don't have a whole lot to do with each other, and
 * it's actually the gens that must be written out at specific times - it's no
 * big deal if the priorities don't get written, if we lose them we just reuse
 * buckets in suboptimal order.
 *
 * On disk they're stored in a packed array, and in as many buckets are required
 * to fit them all. The buckets we use to store them form a list; the journal
 * header points to the first bucket, the first bucket points to the second
 * bucket, et cetera.
 *
 * This code is used by the allocation code; periodically (whenever it runs out
 * of buckets to allocate from) the allocation code will invalidate some
 * buckets, but it can't use those buckets until their new gens are safely on
 * disk.
 */

static void prio_endio(struct bio *bio)
{}

static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
{}

int bch_prio_write(struct cache *ca, bool wait)
{}

static int prio_read(struct cache *ca, uint64_t bucket)
{}

/* Bcache device */

static int open_dev(struct gendisk *disk, blk_mode_t mode)
{}

static void release_dev(struct gendisk *b)
{}

static int ioctl_dev(struct block_device *b, blk_mode_t mode,
		     unsigned int cmd, unsigned long arg)
{}

static const struct block_device_operations bcache_cached_ops =;

static const struct block_device_operations bcache_flash_ops =;

void bcache_device_stop(struct bcache_device *d)
{}

static void bcache_device_unlink(struct bcache_device *d)
{}

static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
			       const char *name)
{}

static void bcache_device_detach(struct bcache_device *d)
{}

static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
				 unsigned int id)
{}

static inline int first_minor_to_idx(int first_minor)
{}

static inline int idx_to_first_minor(int idx)
{}

static void bcache_device_free(struct bcache_device *d)
{}

static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
		sector_t sectors, struct block_device *cached_bdev,
		const struct block_device_operations *ops)
{}

/* Cached device */

static void calc_cached_dev_sectors(struct cache_set *c)
{}

#define BACKING_DEV_OFFLINE_TIMEOUT
static int cached_dev_status_update(void *arg)
{}


int bch_cached_dev_run(struct cached_dev *dc)
{}

/*
 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
 * work dc->writeback_rate_update is running. Wait until the routine
 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
 * seconds, give up waiting here and continue to cancel it too.
 */
static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
{}

static void cached_dev_detach_finish(struct work_struct *w)
{}

void bch_cached_dev_detach(struct cached_dev *dc)
{}

int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
			  uint8_t *set_uuid)
{}

/* when dc->disk.kobj released */
void bch_cached_dev_release(struct kobject *kobj)
{}

static CLOSURE_CALLBACK(cached_dev_free)
{}

static CLOSURE_CALLBACK(cached_dev_flush)
{}

static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
{}

/* Cached device - bcache superblock */

static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
				 struct file *bdev_file,
				 struct cached_dev *dc)
{}

/* Flash only volumes */

/* When d->kobj released */
void bch_flash_dev_release(struct kobject *kobj)
{}

static CLOSURE_CALLBACK(flash_dev_free)
{}

static CLOSURE_CALLBACK(flash_dev_flush)
{}

static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{}

static int flash_devs_run(struct cache_set *c)
{}

int bch_flash_dev_create(struct cache_set *c, uint64_t size)
{}

bool bch_cached_dev_error(struct cached_dev *dc)
{}

/* Cache set */

__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{}

/* When c->kobj released */
void bch_cache_set_release(struct kobject *kobj)
{}

static CLOSURE_CALLBACK(cache_set_free)
{}

static CLOSURE_CALLBACK(cache_set_flush)
{}

/*
 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
 * cache set is unregistering due to too many I/O errors. In this condition,
 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
 * value and whether the broken cache has dirty data:
 *
 * dc->stop_when_cache_set_failed    dc->has_dirty   stop bcache device
 *  BCH_CACHED_STOP_AUTO               0               NO
 *  BCH_CACHED_STOP_AUTO               1               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         0               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         1               YES
 *
 * The expected behavior is, if stop_when_cache_set_failed is configured to
 * "auto" via sysfs interface, the bcache device will not be stopped if the
 * backing device is clean on the broken cache device.
 */
static void conditional_stop_bcache_device(struct cache_set *c,
					   struct bcache_device *d,
					   struct cached_dev *dc)
{}

static CLOSURE_CALLBACK(__cache_set_unregister)
{}

void bch_cache_set_stop(struct cache_set *c)
{}

void bch_cache_set_unregister(struct cache_set *c)
{}

#define alloc_meta_bucket_pages(gfp, sb)

struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{}

static int run_cache_set(struct cache_set *c)
{}

static const char *register_cache_set(struct cache *ca)
{}

/* Cache device */

/* When ca->kobj released */
void bch_cache_release(struct kobject *kobj)
{}

static int cache_alloc(struct cache *ca)
{}

static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
				struct file *bdev_file,
				struct cache *ca)
{}

/* Global interfaces/init */

static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
			       const char *buffer, size_t size);
static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
					 struct kobj_attribute *attr,
					 const char *buffer, size_t size);

kobj_attribute_write();
kobj_attribute_write();
kobj_attribute_write();

static bool bch_is_open_backing(dev_t dev)
{}

static bool bch_is_open_cache(dev_t dev)
{}

static bool bch_is_open(dev_t dev)
{}

struct async_reg_args {};

static void register_bdev_worker(struct work_struct *work)
{}

static void register_cache_worker(struct work_struct *work)
{}

static void register_device_async(struct async_reg_args *args)
{}

static void *alloc_holder_object(struct cache_sb *sb)
{}

static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
			       const char *buffer, size_t size)
{}


struct pdev {};

static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
					 struct kobj_attribute *attr,
					 const char *buffer,
					 size_t size)
{}

static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
{}

static struct notifier_block reboot =;

static void bcache_exit(void)
{}

/* Check and fixup module parameters */
static void check_module_parameters(void)
{}

static int __init bcache_init(void)
{}

/*
 * Module hooks
 */
module_exit(bcache_exit);
module_init();

module_param(bch_cutoff_writeback, uint, 0);
MODULE_PARM_DESC();

module_param(bch_cutoff_writeback_sync, uint, 0);
MODULE_PARM_DESC();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();