linux/drivers/md/bcache/request.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Main bcache entry point - handle a read or a write request and decide what to
 * do with it; the make_request functions are called by the block layer.
 *
 * Copyright 2010, 2011 Kent Overstreet <[email protected]>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
#include "writeback.h"

#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/backing-dev.h>

#include <trace/events/bcache.h>

#define CUTOFF_CACHE_ADD
#define CUTOFF_CACHE_READA

struct kmem_cache *bch_search_cache;

static CLOSURE_CALLBACK(bch_data_insert_start);

static unsigned int cache_mode(struct cached_dev *dc)
{}

static bool verify(struct cached_dev *dc)
{}

static void bio_csum(struct bio *bio, struct bkey *k)
{}

/* Insert data into cache */

static CLOSURE_CALLBACK(bch_data_insert_keys)
{}

static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
			       struct cache_set *c)
{}

static void bch_data_invalidate(struct closure *cl)
{}

static CLOSURE_CALLBACK(bch_data_insert_error)
{}

static void bch_data_insert_endio(struct bio *bio)
{}

static CLOSURE_CALLBACK(bch_data_insert_start)
{}

/**
 * bch_data_insert - stick some data in the cache
 * @cl: closure pointer.
 *
 * This is the starting point for any data to end up in a cache device; it could
 * be from a normal write, or a writeback write, or a write to a flash only
 * volume - it's also used by the moving garbage collector to compact data in
 * mostly empty buckets.
 *
 * It first writes the data to the cache, creating a list of keys to be inserted
 * (if the data had to be fragmented there will be multiple keys); after the
 * data is written it calls bch_journal, and after the keys have been added to
 * the next journal write they're inserted into the btree.
 *
 * It inserts the data in op->bio; bi_sector is used for the key offset,
 * and op->inode is used for the key inode.
 *
 * If op->bypass is true, instead of inserting the data it invalidates the
 * region of the cache represented by op->bio and op->inode.
 */
CLOSURE_CALLBACK(bch_data_insert)
{}

/*
 * Congested?  Return 0 (not congested) or the limit (in sectors)
 * beyond which we should bypass the cache due to congestion.
 */
unsigned int bch_get_congested(const struct cache_set *c)
{}

static void add_sequential(struct task_struct *t)
{}

static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{}

static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{}

/* Cache lookup */

struct search {};

static void bch_cache_read_endio(struct bio *bio)
{}

/*
 * Read from a single key, handling the initial cache miss if the key starts in
 * the middle of the bio
 */
static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
{}

static CLOSURE_CALLBACK(cache_lookup)
{}

/* Common code for the make_request functions */

static void request_endio(struct bio *bio)
{}

static void backing_request_endio(struct bio *bio)
{}

static void bio_complete(struct search *s)
{}

static void do_bio_hook(struct search *s,
			struct bio *orig_bio,
			bio_end_io_t *end_io_fn)
{}

static CLOSURE_CALLBACK(search_free)
{}

static inline struct search *search_alloc(struct bio *bio,
		struct bcache_device *d, struct block_device *orig_bdev,
		unsigned long start_time)
{}

/* Cached devices */

static CLOSURE_CALLBACK(cached_dev_bio_complete)
{}

/* Process reads */

static CLOSURE_CALLBACK(cached_dev_read_error_done)
{}

static CLOSURE_CALLBACK(cached_dev_read_error)
{}

static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
{}

static CLOSURE_CALLBACK(cached_dev_read_done)
{}

static CLOSURE_CALLBACK(cached_dev_read_done_bh)
{}

static int cached_dev_cache_miss(struct btree *b, struct search *s,
				 struct bio *bio, unsigned int sectors)
{}

static void cached_dev_read(struct cached_dev *dc, struct search *s)
{}

/* Process writes */

static CLOSURE_CALLBACK(cached_dev_write_complete)
{}

static void cached_dev_write(struct cached_dev *dc, struct search *s)
{}

static CLOSURE_CALLBACK(cached_dev_nodata)
{}

struct detached_dev_io_private {};

static void detached_dev_end_io(struct bio *bio)
{}

static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
		struct block_device *orig_bdev, unsigned long start_time)
{}

static void quit_max_writeback_rate(struct cache_set *c,
				    struct cached_dev *this_dc)
{}

/* Cached devices - read & write stuff */

void cached_dev_submit_bio(struct bio *bio)
{}

static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
			    unsigned int cmd, unsigned long arg)
{}

void bch_cached_dev_request_init(struct cached_dev *dc)
{}

/* Flash backed devices */

static int flash_dev_cache_miss(struct btree *b, struct search *s,
				struct bio *bio, unsigned int sectors)
{}

static CLOSURE_CALLBACK(flash_dev_nodata)
{}

void flash_dev_submit_bio(struct bio *bio)
{}

static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
			   unsigned int cmd, unsigned long arg)
{}

void bch_flash_dev_request_init(struct bcache_device *d)
{}

void bch_request_exit(void)
{}

int __init bch_request_init(void)
{}