linux/drivers/block/zram/zram_drv.c

/*
 * Compressed RAM block device
 *
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
 *               2012, 2013 Minchan Kim
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

#define KMSG_COMPONENT
#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>

#include "zram_drv.h"

static DEFINE_IDR(zram_index_idr);
/* idr index must be protected */
static DEFINE_MUTEX(zram_index_mutex);

static int zram_major;
static const char *default_compressor =;

/* Module params (documentation at end) */
static unsigned int num_devices =;
/*
 * Pages that compress to sizes equals or greater than this are stored
 * uncompressed in memory.
 */
static size_t huge_class_size;

static const struct block_device_operations zram_devops;

static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
			  struct bio *parent);

static int zram_slot_trylock(struct zram *zram, u32 index)
{}

static void zram_slot_lock(struct zram *zram, u32 index)
{}

static void zram_slot_unlock(struct zram *zram, u32 index)
{}

static inline bool init_done(struct zram *zram)
{}

static inline struct zram *dev_to_zram(struct device *dev)
{}

static unsigned long zram_get_handle(struct zram *zram, u32 index)
{}

static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
{}

/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
{}

static void zram_set_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
{}

static void zram_clear_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
{}

static inline void zram_set_element(struct zram *zram, u32 index,
			unsigned long element)
{}

static unsigned long zram_get_element(struct zram *zram, u32 index)
{}

static size_t zram_get_obj_size(struct zram *zram, u32 index)
{}

static void zram_set_obj_size(struct zram *zram,
					u32 index, size_t size)
{}

static inline bool zram_allocated(struct zram *zram, u32 index)
{}

#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}
#define ZRAM_PARTIAL_IO
#else
static inline bool is_partial_io(struct bio_vec *bvec)
{}
#endif

static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
{}

static inline u32 zram_get_priority(struct zram *zram, u32 index)
{}

static void zram_accessed(struct zram *zram, u32 index)
{}

static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{}

static inline void zram_fill_page(void *ptr, unsigned long len,
					unsigned long value)
{}

static bool page_same_filled(void *ptr, unsigned long *element)
{}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

/*
 * Mark all pages which are older than or equal to cutoff as IDLE.
 * Callers should hold the zram init lock in read mode
 */
static void mark_idle(struct zram *zram, ktime_t cutoff)
{}

static ssize_t idle_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

#ifdef CONFIG_ZRAM_WRITEBACK
static ssize_t writeback_limit_enable_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static ssize_t writeback_limit_enable_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t writeback_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static ssize_t writeback_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static void reset_bdev(struct zram *zram)
{}

static ssize_t backing_dev_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t backing_dev_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static unsigned long alloc_block_bdev(struct zram *zram)
{}

static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
{}

static void read_from_bdev_async(struct zram *zram, struct page *page,
			unsigned long entry, struct bio *parent)
{}

#define PAGE_WB_SIG

#define PAGE_WRITEBACK
#define HUGE_WRITEBACK
#define IDLE_WRITEBACK
#define INCOMPRESSIBLE_WRITEBACK

static ssize_t writeback_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

struct zram_work {};

static void zram_sync_read(struct work_struct *work)
{}

/*
 * Block layer want one ->submit_bio to be active at a time, so if we use
 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
 * use a worker thread context.
 */
static int read_from_bdev_sync(struct zram *zram, struct page *page,
				unsigned long entry)
{}

static int read_from_bdev(struct zram *zram, struct page *page,
			unsigned long entry, struct bio *parent)
{}
#else
static inline void reset_bdev(struct zram *zram) {};
static int read_from_bdev(struct zram *zram, struct page *page,
			unsigned long entry, struct bio *parent)
{
	return -EIO;
}

static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
#endif

#ifdef CONFIG_ZRAM_MEMORY_TRACKING

static struct dentry *zram_debugfs_root;

static void zram_debugfs_create(void)
{}

static void zram_debugfs_destroy(void)
{}

static ssize_t read_block_state(struct file *file, char __user *buf,
				size_t count, loff_t *ppos)
{}

static const struct file_operations proc_zram_block_state_op =;

static void zram_debugfs_register(struct zram *zram)
{}

static void zram_debugfs_unregister(struct zram *zram)
{}
#else
static void zram_debugfs_create(void) {};
static void zram_debugfs_destroy(void) {};
static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif

/*
 * We switched to per-cpu streams and this attr is not needed anymore.
 * However, we will keep it around for some time, because:
 * a) we may revert per-cpu streams in the future
 * b) it's visible to user space and we need to follow our 2 years
 *    retirement rule; but we already have a number of 'soon to be
 *    altered' attrs, so max_comp_streams need to wait for the next
 *    layoff cycle.
 */
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{}

static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
{}

static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{}

static ssize_t comp_algorithm_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{}

static ssize_t comp_algorithm_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{}

#ifdef CONFIG_ZRAM_MULTI_COMP
static ssize_t recomp_algorithm_show(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{}

static ssize_t recomp_algorithm_store(struct device *dev,
				      struct device_attribute *attr,
				      const char *buf,
				      size_t len)
{}
#endif

static ssize_t compact_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static ssize_t io_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t mm_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

#ifdef CONFIG_ZRAM_WRITEBACK
#define FOUR_K(x)
static ssize_t bd_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
#endif

static ssize_t debug_stat_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RO(bd_stat);
#endif
static DEVICE_ATTR_RO(debug_stat);

static void zram_meta_free(struct zram *zram, u64 disksize)
{}

static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{}

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
static void zram_free_page(struct zram *zram, size_t index)
{}

/*
 * Reads (decompresses if needed) a page from zspool (zsmalloc).
 * Corresponding ZRAM slot should be locked.
 */
static int zram_read_from_zspool(struct zram *zram, struct page *page,
				 u32 index)
{}

static int zram_read_page(struct zram *zram, struct page *page, u32 index,
			  struct bio *parent)
{}

/*
 * Use a temporary buffer to decompress the page, as the decompressor
 * always expects a full page for the output.
 */
static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
				  u32 index, int offset)
{}

static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
{}

static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{}

/*
 * This is a partial IO. Read the full page before writing the changes.
 */
static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
				   u32 index, int offset, struct bio *bio)
{}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
			   u32 index, int offset, struct bio *bio)
{}

#ifdef CONFIG_ZRAM_MULTI_COMP
/*
 * This function will decompress (unless it's ZRAM_HUGE) the page and then
 * attempt to compress it using provided compression algorithm priority
 * (which is potentially more effective).
 *
 * Corresponding ZRAM slot should be locked.
 */
static int zram_recompress(struct zram *zram, u32 index, struct page *page,
			   u64 *num_recomp_pages, u32 threshold, u32 prio,
			   u32 prio_max)
{}

#define RECOMPRESS_IDLE
#define RECOMPRESS_HUGE

static ssize_t recompress_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t len)
{}
#endif

static void zram_bio_discard(struct zram *zram, struct bio *bio)
{}

static void zram_bio_read(struct zram *zram, struct bio *bio)
{}

static void zram_bio_write(struct zram *zram, struct bio *bio)
{}

/*
 * Handler function for all zram I/O requests.
 */
static void zram_submit_bio(struct bio *bio)
{}

static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
{}

static void zram_destroy_comps(struct zram *zram)
{}

static void zram_reset_device(struct zram *zram)
{}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}

static int zram_open(struct gendisk *disk, blk_mode_t mode)
{}

static const struct block_device_operations zram_devops =;

static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif

static struct attribute *zram_disk_attrs[] =;

ATTRIBUTE_GROUPS();

/*
 * Allocate and initialize new zram device. the function returns
 * '>= 0' device_id upon success, and negative value otherwise.
 */
static int zram_add(void)
{}

static int zram_remove(struct zram *zram)
{}

/* zram-control sysfs attributes */

/*
 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
 * sense that reading from this file does alter the state of your system -- it
 * creates a new un-initialized zram device and returns back this device's
 * device_id (or an error code if it fails to create a new device).
 */
static ssize_t hot_add_show(const struct class *class,
			const struct class_attribute *attr,
			char *buf)
{}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =;

static ssize_t hot_remove_store(const struct class *class,
			const struct class_attribute *attr,
			const char *buf,
			size_t count)
{}
static CLASS_ATTR_WO(hot_remove);

static struct attribute *zram_control_class_attrs[] =;
ATTRIBUTE_GROUPS();

static struct class zram_control_class =;

static int zram_remove_cb(int id, void *ptr, void *data)
{}

static void destroy_devices(void)
{}

static int __init zram_init(void)
{}

static void __exit zram_exit(void)
{}

module_init();
module_exit(zram_exit);

module_param(num_devices, uint, 0);
MODULE_PARM_DESC();

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();