linux/drivers/md/dm-bufio.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2009-2011 Red Hat, Inc.
 *
 * Author: Mikulas Patocka <[email protected]>
 *
 * This file is released under the GPL.
 */

#include <linux/dm-bufio.h>

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/jiffies.h>
#include <linux/vmalloc.h>
#include <linux/shrinker.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
#include <linux/jump_label.h>

#include "dm.h"

#define DM_MSG_PREFIX

/*
 * Memory management policy:
 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
 *	dirty buffers.
 */
#define DM_BUFIO_MIN_BUFFERS

#define DM_BUFIO_MEMORY_PERCENT
#define DM_BUFIO_VMALLOC_PERCENT
#define DM_BUFIO_WRITEBACK_RATIO
#define DM_BUFIO_LOW_WATERMARK_RATIO

/*
 * Check buffer ages in this interval (seconds)
 */
#define DM_BUFIO_WORK_TIMER_SECS

/*
 * Free buffers when they are older than this (seconds)
 */
#define DM_BUFIO_DEFAULT_AGE_SECS

/*
 * The nr of bytes of cached data to keep around.
 */
#define DM_BUFIO_DEFAULT_RETAIN_BYTES

/*
 * Align buffer writes to this boundary.
 * Tests show that SSDs have the highest IOPS when using 4k writes.
 */
#define DM_BUFIO_WRITE_ALIGN

/*
 * dm_buffer->list_mode
 */
#define LIST_CLEAN
#define LIST_DIRTY
#define LIST_SIZE

/*--------------------------------------------------------------*/

/*
 * Rather than use an LRU list, we use a clock algorithm where entries
 * are held in a circular list.  When an entry is 'hit' a reference bit
 * is set.  The least recently used entry is approximated by running a
 * cursor around the list selecting unreferenced entries. Referenced
 * entries have their reference bit cleared as the cursor passes them.
 */
struct lru_entry {};

struct lru_iter {};

struct lru {};

/*--------------*/

static void lru_init(struct lru *lru)
{}

static void lru_destroy(struct lru *lru)
{}

/*
 * Insert a new entry into the lru.
 */
static void lru_insert(struct lru *lru, struct lru_entry *le)
{}

/*--------------*/

/*
 * Convert a list_head pointer to an lru_entry pointer.
 */
static inline struct lru_entry *to_le(struct list_head *l)
{}

/*
 * Initialize an lru_iter and add it to the list of cursors in the lru.
 */
static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
{}

/*
 * Remove an lru_iter from the list of cursors in the lru.
 */
static inline void lru_iter_end(struct lru_iter *it)
{}

/* Predicate function type to be used with lru_iter_next */
iter_predicate;

/*
 * Advance the cursor to the next entry that passes the
 * predicate, and return that entry.  Returns NULL if the
 * iteration is complete.
 */
static struct lru_entry *lru_iter_next(struct lru_iter *it,
				       iter_predicate pred, void *context)
{}

/*
 * Invalidate a specific lru_entry and update all cursors in
 * the lru accordingly.
 */
static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
{}

/*--------------*/

/*
 * Remove a specific entry from the lru.
 */
static void lru_remove(struct lru *lru, struct lru_entry *le)
{}

/*
 * Mark as referenced.
 */
static inline void lru_reference(struct lru_entry *le)
{}

/*--------------*/

/*
 * Remove the least recently used entry (approx), that passes the predicate.
 * Returns NULL on failure.
 */
enum evict_result {};

le_predicate;

static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
{}

/*--------------------------------------------------------------*/

/*
 * Buffer state bits.
 */
#define B_READING
#define B_WRITING
#define B_DIRTY

/*
 * Describes how the block was allocated:
 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 * See the comment at alloc_buffer_data.
 */
enum data_mode {};

struct dm_buffer {};

/*--------------------------------------------------------------*/

/*
 * The buffer cache manages buffers, particularly:
 *  - inc/dec of holder count
 *  - setting the last_accessed field
 *  - maintains clean/dirty state along with lru
 *  - selecting buffers that match predicates
 *
 * It does *not* handle:
 *  - allocation/freeing of buffers.
 *  - IO
 *  - Eviction or cache sizing.
 *
 * cache_get() and cache_put() are threadsafe, you do not need to
 * protect these calls with a surrounding mutex.  All the other
 * methods are not threadsafe; they do use locking primitives, but
 * only enough to ensure get/put are threadsafe.
 */

struct buffer_tree {} ____cacheline_aligned_in_smp;

struct dm_buffer_cache {};

static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);

static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
{}

static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
{}

static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
{}

static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
{}

static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
{}

/*
 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
 * This struct helps avoid redundant drop and gets of the same lock.
 */
struct lock_history {};

static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
{}

static void __lh_lock(struct lock_history *lh, unsigned int index)
{}

static void __lh_unlock(struct lock_history *lh, unsigned int index)
{}

/*
 * Make sure you call this since it will unlock the final lock.
 */
static void lh_exit(struct lock_history *lh)
{}

/*
 * Named 'next' because there is no corresponding
 * 'up/unlock' call since it's done automatically.
 */
static void lh_next(struct lock_history *lh, sector_t b)
{}

static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
{}

static struct dm_buffer *list_to_buffer(struct list_head *l)
{}

static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
{}

static void cache_destroy(struct dm_buffer_cache *bc)
{}

/*--------------*/

/*
 * not threadsafe, or racey depending how you look at it
 */
static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
{}

static inline unsigned long cache_total(struct dm_buffer_cache *bc)
{}

/*--------------*/

/*
 * Gets a specific buffer, indexed by block.
 * If the buffer is found then its holder count will be incremented and
 * lru_reference will be called.
 *
 * threadsafe
 */
static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
{}

static void __cache_inc_buffer(struct dm_buffer *b)
{}

static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
{}

/*--------------*/

/*
 * Returns true if the hold count hits zero.
 * threadsafe
 */
static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
{}

/*--------------*/

b_predicate;

/*
 * Evicts a buffer based on a predicate.  The oldest buffer that
 * matches the predicate will be selected.  In addition to the
 * predicate the hold_count of the selected buffer will be zero.
 */
struct evict_wrapper {};

/*
 * Wraps the buffer predicate turning it into an lru predicate.  Adds
 * extra test for hold_count.
 */
static enum evict_result __evict_pred(struct lru_entry *le, void *context)
{}

static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
				       b_predicate pred, void *context,
				       struct lock_history *lh)
{}

static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
				     b_predicate pred, void *context)
{}

/*--------------*/

/*
 * Mark a buffer as clean or dirty. Not threadsafe.
 */
static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
{}

/*--------------*/

/*
 * Runs through the lru associated with 'old_mode', if the predicate matches then
 * it moves them to 'new_mode'.  Not threadsafe.
 */
static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
			      b_predicate pred, void *context, struct lock_history *lh)
{}

static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
			    b_predicate pred, void *context)
{}

/*--------------*/

/*
 * Iterates through all clean or dirty entries calling a function for each
 * entry.  The callback may terminate the iteration early.  Not threadsafe.
 */

/*
 * Iterator functions should return one of these actions to indicate
 * how the iteration should proceed.
 */
enum it_action {};

iter_fn;

static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
			    iter_fn fn, void *context, struct lock_history *lh)
{}

static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
			  iter_fn fn, void *context)
{}

/*--------------*/

/*
 * Passes ownership of the buffer to the cache. Returns false if the
 * buffer was already present (in which case ownership does not pass).
 * eg, a race with another thread.
 *
 * Holder count should be 1 on insertion.
 *
 * Not threadsafe.
 */
static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
{}

static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
{}

/*--------------*/

/*
 * Removes buffer from cache, ownership of the buffer passes back to the caller.
 * Fails if the hold_count is not one (ie. the caller holds the only reference).
 *
 * Not threadsafe.
 */
static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
{}

/*--------------*/

b_release;

static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
{}

static void __remove_range(struct dm_buffer_cache *bc,
			   struct rb_root *root,
			   sector_t begin, sector_t end,
			   b_predicate pred, b_release release)
{}

static void cache_remove_range(struct dm_buffer_cache *bc,
			       sector_t begin, sector_t end,
			       b_predicate pred, b_release release)
{}

/*----------------------------------------------------------------*/

/*
 * Linking of buffers:
 *	All buffers are linked to buffer_cache with their node field.
 *
 *	Clean buffers that are not being written (B_WRITING not set)
 *	are linked to lru[LIST_CLEAN] with their lru_list field.
 *
 *	Dirty and clean buffers that are being written are linked to
 *	lru[LIST_DIRTY] with their lru_list field. When the write
 *	finishes, the buffer cannot be relinked immediately (because we
 *	are in an interrupt context and relinking requires process
 *	context), so some clean-not-writing buffers can be held on
 *	dirty_lru too.  They are later added to lru in the process
 *	context.
 */
struct dm_bufio_client {};

/*----------------------------------------------------------------*/

#define dm_bufio_in_request()

static void dm_bufio_lock(struct dm_bufio_client *c)
{}

static void dm_bufio_unlock(struct dm_bufio_client *c)
{}

/*----------------------------------------------------------------*/

/*
 * Default cache size: available memory divided by the ratio.
 */
static unsigned long dm_bufio_default_cache_size;

/*
 * Total cache size set by the user.
 */
static unsigned long dm_bufio_cache_size;

/*
 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 * at any time.  If it disagrees, the user has changed cache size.
 */
static unsigned long dm_bufio_cache_size_latch;

static DEFINE_SPINLOCK(global_spinlock);

/*
 * Buffers are freed after this timeout
 */
static unsigned int dm_bufio_max_age =;
static unsigned long dm_bufio_retain_bytes =;

static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache;
static unsigned long dm_bufio_allocated_get_free_pages;
static unsigned long dm_bufio_allocated_vmalloc;
static unsigned long dm_bufio_current_allocated;

/*----------------------------------------------------------------*/

/*
 * The current number of clients.
 */
static int dm_bufio_client_count;

/*
 * The list of all clients.
 */
static LIST_HEAD(dm_bufio_all_clients);

/*
 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
 */
static DEFINE_MUTEX(dm_bufio_clients_lock);

static struct workqueue_struct *dm_bufio_wq;
static struct delayed_work dm_bufio_cleanup_old_work;
static struct work_struct dm_bufio_replacement_work;


#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
static void buffer_record_stack(struct dm_buffer *b)
{}
#endif

/*----------------------------------------------------------------*/

static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
{}

/*
 * Change the number of clients and recalculate per-client limit.
 */
static void __cache_size_refresh(void)
{}

/*
 * Allocating buffer data.
 *
 * Small buffers are allocated with kmem_cache, to use space optimally.
 *
 * For large buffers, we choose between get_free_pages and vmalloc.
 * Each has advantages and disadvantages.
 *
 * __get_free_pages can randomly fail if the memory is fragmented.
 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 * as low as 128M) so using it for caching is not appropriate.
 *
 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 * won't have a fatal effect here, but it just causes flushes of some other
 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 * always fails (i.e. order > MAX_PAGE_ORDER).
 *
 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 * initial reserve allocation, so there's no risk of wasting all vmalloc
 * space.
 */
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
			       unsigned char *data_mode)
{}

/*
 * Free buffer's data.
 */
static void free_buffer_data(struct dm_bufio_client *c,
			     void *data, unsigned char data_mode)
{}

/*
 * Allocate buffer and its data.
 */
static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
{}

/*
 * Free buffer and its data.
 */
static void free_buffer(struct dm_buffer *b)
{}

/*
 *--------------------------------------------------------------------------
 * Submit I/O on the buffer.
 *
 * Bio interface is faster but it has some problems:
 *	the vector list is limited (increasing this limit increases
 *	memory-consumption per buffer, so it is not viable);
 *
 *	the memory must be direct-mapped, not vmalloced;
 *
 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 * it is not vmalloced, try using the bio interface.
 *
 * If the buffer is big, if it is vmalloced or if the underlying device
 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 * shortcomings.
 *--------------------------------------------------------------------------
 */

/*
 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 * that the request was handled directly with bio interface.
 */
static void dmio_complete(unsigned long error, void *context)
{}

static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
		     unsigned int n_sectors, unsigned int offset,
		     unsigned short ioprio)
{}

static void bio_complete(struct bio *bio)
{}

static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
		    unsigned int n_sectors, unsigned int offset,
		    unsigned short ioprio)
{}

static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
{}

static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
		      void (*end_io)(struct dm_buffer *, blk_status_t))
{}

/*
 *--------------------------------------------------------------
 * Writing dirty buffers
 *--------------------------------------------------------------
 */

/*
 * The endio routine for write.
 *
 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 * it.
 */
static void write_endio(struct dm_buffer *b, blk_status_t status)
{}

/*
 * Initiate a write on a dirty buffer, but don't wait for it.
 *
 * - If the buffer is not dirty, exit.
 * - If there some previous write going on, wait for it to finish (we can't
 *   have two writes on the same buffer simultaneously).
 * - Submit our write and don't wait on it. We set B_WRITING indicating
 *   that there is a write in progress.
 */
static void __write_dirty_buffer(struct dm_buffer *b,
				 struct list_head *write_list)
{}

static void __flush_write_list(struct list_head *write_list)
{}

/*
 * Wait until any activity on the buffer finishes.  Possibly write the
 * buffer if it is dirty.  When this function finishes, there is no I/O
 * running on the buffer and the buffer is not dirty.
 */
static void __make_buffer_clean(struct dm_buffer *b)
{}

static enum evict_result is_clean(struct dm_buffer *b, void *context)
{}

static enum evict_result is_dirty(struct dm_buffer *b, void *context)
{}

/*
 * Find some buffer that is not held by anybody, clean it, unlink it and
 * return it.
 */
static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
{}

/*
 * Wait until some other threads free some buffer or release hold count on
 * some buffer.
 *
 * This function is entered with c->lock held, drops it and regains it
 * before exiting.
 */
static void __wait_for_free_buffer(struct dm_bufio_client *c)
{}

enum new_flag {};

/*
 * Allocate a new buffer. If the allocation is not possible, wait until
 * some other thread frees a buffer.
 *
 * May drop the lock and regain it.
 */
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{}

static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
{}

/*
 * Free a buffer and wake other threads waiting for free buffers.
 */
static void __free_buffer_wake(struct dm_buffer *b)
{}

static enum evict_result cleaned(struct dm_buffer *b, void *context)
{}

static void __move_clean_buffers(struct dm_bufio_client *c)
{}

struct write_context {};

static enum it_action write_one(struct dm_buffer *b, void *context)
{}

static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
					struct list_head *write_list)
{}

/*
 * Check if we're over watermark.
 * If we are over threshold_buffers, start freeing buffers.
 * If we're over "limit_buffers", block until we get under the limit.
 */
static void __check_watermark(struct dm_bufio_client *c,
			      struct list_head *write_list)
{}

/*
 *--------------------------------------------------------------
 * Getting a buffer
 *--------------------------------------------------------------
 */

static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
{}

/*
 * This assumes you have already checked the cache to see if the buffer
 * is already present (it will recheck after dropping the lock for allocation).
 */
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
				     enum new_flag nf, int *need_submit,
				     struct list_head *write_list)
{}

/*
 * The endio routine for reading: set the error, clear the bit and wake up
 * anyone waiting on the buffer.
 */
static void read_endio(struct dm_buffer *b, blk_status_t status)
{}

/*
 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
 * functions is similar except that dm_bufio_new doesn't read the
 * buffer from the disk (assuming that the caller overwrites all the data
 * and uses dm_bufio_mark_buffer_dirty to write new data back).
 */
static void *new_read(struct dm_bufio_client *c, sector_t block,
		      enum new_flag nf, struct dm_buffer **bp,
		      unsigned short ioprio)
{}

void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
		   struct dm_buffer **bp)
{}
EXPORT_SYMBOL_GPL();

static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
			struct dm_buffer **bp, unsigned short ioprio)
{}

void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
		    struct dm_buffer **bp)
{}
EXPORT_SYMBOL_GPL();

void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
				struct dm_buffer **bp, unsigned short ioprio)
{}
EXPORT_SYMBOL_GPL();

void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
		   struct dm_buffer **bp)
{}
EXPORT_SYMBOL_GPL();

static void __dm_bufio_prefetch(struct dm_bufio_client *c,
			sector_t block, unsigned int n_blocks,
			unsigned short ioprio)
{}

void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
				unsigned int n_blocks, unsigned short ioprio)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_release(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
					unsigned int start, unsigned int end)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

/*
 * For performance, it is essential that the buffers are written asynchronously
 * and simultaneously (so that the block layer can merge the writes) and then
 * waited upon.
 *
 * Finally, we flush hardware disk cache.
 */
static bool is_writing(struct lru_entry *e, void *context)
{}

int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

/*
 * Use dm-io to send an empty barrier to flush the device.
 */
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

/*
 * Use dm-io to send a discard request to flush the device.
 */
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{}
EXPORT_SYMBOL_GPL();

static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
{}

/*
 * Free the given buffer.
 *
 * This is just a hint, if the buffer is in use or dirty, this function
 * does nothing.
 */
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
{}
EXPORT_SYMBOL_GPL();

static enum evict_result idle(struct dm_buffer *b, void *context)
{}

void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
{}
EXPORT_SYMBOL_GPL();

unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

void *dm_bufio_get_block_data(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

void *dm_bufio_get_aux_data(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
{}
EXPORT_SYMBOL_GPL();

static enum it_action warn_leak(struct dm_buffer *b, void *context)
{}

static void drop_buffers(struct dm_bufio_client *c)
{}

static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{}

static void __scan(struct dm_bufio_client *c)
{}

static void shrink_work(struct work_struct *w)
{}

static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{}

static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{}

/*
 * Create the buffering interface
 */
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
					       unsigned int reserved_buffers, unsigned int aux_size,
					       void (*alloc_callback)(struct dm_buffer *),
					       void (*write_callback)(struct dm_buffer *),
					       unsigned int flags)
{}
EXPORT_SYMBOL_GPL();

/*
 * Free the buffering interface.
 * It is required that there are no references on any buffers.
 */
void dm_bufio_client_destroy(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_client_reset(struct dm_bufio_client *c)
{}
EXPORT_SYMBOL_GPL();

void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
{}
EXPORT_SYMBOL_GPL();

/*--------------------------------------------------------------*/

static unsigned int get_max_age_hz(void)
{}

static bool older_than(struct dm_buffer *b, unsigned long age_hz)
{}

struct evict_params {};

/*
 * We may not be able to evict this buffer if IO pending or the client
 * is still using it.
 *
 * And if GFP_NOFS is used, we must not do any I/O because we hold
 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
 * rerouted to different bufio client.
 */
static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
{}

static unsigned long __evict_many(struct dm_bufio_client *c,
				  struct evict_params *params,
				  int list_mode, unsigned long max_count)
{}

static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{}

static void cleanup_old_buffers(void)
{}

static void work_fn(struct work_struct *w)
{}

/*--------------------------------------------------------------*/

/*
 * Global cleanup tries to evict the oldest buffers from across _all_
 * the clients.  It does this by repeatedly evicting a few buffers from
 * the client that holds the oldest buffer.  It's approximate, but hopefully
 * good enough.
 */
static struct dm_bufio_client *__pop_client(void)
{}

/*
 * Inserts the client in the global client list based on its
 * 'oldest_buffer' field.
 */
static void __insert_client(struct dm_bufio_client *new_client)
{}

static unsigned long __evict_a_few(unsigned long nr_buffers)
{}

static void check_watermarks(void)
{}

static void evict_old(void)
{}

static void do_global_cleanup(struct work_struct *w)
{}

/*
 *--------------------------------------------------------------
 * Module setup
 *--------------------------------------------------------------
 */

/*
 * This is called only once for the whole dm_bufio module.
 * It initializes memory limit.
 */
static int __init dm_bufio_init(void)
{}

/*
 * This is called once when unloading the dm_bufio module.
 */
static void __exit dm_bufio_exit(void)
{}

module_init()
module_exit()

module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC();

module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
MODULE_PARM_DESC();

module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC();

module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
MODULE_PARM_DESC();

module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
MODULE_PARM_DESC();

module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
MODULE_PARM_DESC();

module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
MODULE_PARM_DESC();

module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
MODULE_PARM_DESC();

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();