linux/fs/f2fs/compress.c

// SPDX-License-Identifier: GPL-2.0
/*
 * f2fs compress support
 *
 * Copyright (c) 2019 Chao Yu <[email protected]>
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/moduleparam.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/lzo.h>
#include <linux/lz4.h>
#include <linux/zstd.h>
#include <linux/pagevec.h>

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include <trace/events/f2fs.h>

static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;

static void *page_array_alloc(struct inode *inode, int nr)
{}

static void page_array_free(struct inode *inode, void *pages, int nr)
{}

struct f2fs_compress_ops {};

static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
{}

static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
{}

static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
{}

bool f2fs_is_compressed_page(struct page *page)
{}

static void f2fs_set_compressed_page(struct page *page,
		struct inode *inode, pgoff_t index, void *data)
{}

static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
{}

static void f2fs_put_rpages(struct compress_ctx *cc)
{}

static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
{}

static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
		struct writeback_control *wbc, bool redirty, int unlock)
{}

struct page *f2fs_compress_control_page(struct page *page)
{}

int f2fs_init_compress_ctx(struct compress_ctx *cc)
{}

void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{}

void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
{}

#ifdef CONFIG_F2FS_FS_LZO
static int lzo_init_compress_ctx(struct compress_ctx *cc)
{}

static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
{}

static int lzo_compress_pages(struct compress_ctx *cc)
{}

static int lzo_decompress_pages(struct decompress_io_ctx *dic)
{}

static const struct f2fs_compress_ops f2fs_lzo_ops =;
#endif

#ifdef CONFIG_F2FS_FS_LZ4
static int lz4_init_compress_ctx(struct compress_ctx *cc)
{}

static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
{}

static int lz4_compress_pages(struct compress_ctx *cc)
{}

static int lz4_decompress_pages(struct decompress_io_ctx *dic)
{}

static bool lz4_is_level_valid(int lvl)
{}

static const struct f2fs_compress_ops f2fs_lz4_ops =;
#endif

#ifdef CONFIG_F2FS_FS_ZSTD
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{}

static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
{}

static int zstd_compress_pages(struct compress_ctx *cc)
{}

static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
{}

static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
{}

static int zstd_decompress_pages(struct decompress_io_ctx *dic)
{}

static bool zstd_is_level_valid(int lvl)
{}

static const struct f2fs_compress_ops f2fs_zstd_ops =;
#endif

#ifdef CONFIG_F2FS_FS_LZO
#ifdef CONFIG_F2FS_FS_LZORLE
static int lzorle_compress_pages(struct compress_ctx *cc)
{}

static const struct f2fs_compress_ops f2fs_lzorle_ops =;
#endif
#endif

static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] =;

bool f2fs_is_compress_backend_ready(struct inode *inode)
{}

bool f2fs_is_compress_level_valid(int alg, int lvl)
{}

static mempool_t *compress_page_pool;
static int num_compress_pages =;
module_param(num_compress_pages, uint, 0444);
MODULE_PARM_DESC();

int __init f2fs_init_compress_mempool(void)
{}

void f2fs_destroy_compress_mempool(void)
{}

static struct page *f2fs_compress_alloc_page(void)
{}

static void f2fs_compress_free_page(struct page *page)
{}

#define MAX_VMAP_RETRIES

static void *f2fs_vmap(struct page **pages, unsigned int count)
{}

static int f2fs_compress_pages(struct compress_ctx *cc)
{}

static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
		bool pre_alloc);
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback, bool pre_alloc);

void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{}

/*
 * This is called when a page of a compressed cluster has been read from disk
 * (or failed to be read from disk).  It checks whether this page was the last
 * page being waited on in the cluster, and if so, it decompresses the cluster
 * (or in the case of a failure, cleans up without actually decompressing).
 */
void f2fs_end_read_compressed_page(struct page *page, bool failed,
		block_t blkaddr, bool in_task)
{}

static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
{}

bool f2fs_cluster_is_empty(struct compress_ctx *cc)
{}

static bool f2fs_cluster_is_full(struct compress_ctx *cc)
{}

bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
{}

bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
				int index, int nr_pages, bool uptodate)
{}

static bool cluster_has_invalid_data(struct compress_ctx *cc)
{}

bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
{}

static int __f2fs_get_cluster_blocks(struct inode *inode,
					struct dnode_of_data *dn)
{}

static int __f2fs_cluster_blocks(struct inode *inode,
				unsigned int cluster_idx, bool compr_blks)
{}

/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{}

/* return whether cluster is compressed one or not */
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{}

static bool cluster_may_compress(struct compress_ctx *cc)
{}

static void set_cluster_writeback(struct compress_ctx *cc)
{}

static void cancel_cluster_writeback(struct compress_ctx *cc,
			struct compress_io_ctx *cic, int submitted)
{}

static void set_cluster_dirty(struct compress_ctx *cc)
{}

static int prepare_compress_overwrite(struct compress_ctx *cc,
		struct page **pagep, pgoff_t index, void **fsdata)
{}

int f2fs_prepare_compress_overwrite(struct inode *inode,
		struct page **pagep, pgoff_t index, void **fsdata)
{}

bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
					pgoff_t index, unsigned copied)

{}

int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
{}

static int f2fs_write_compressed_pages(struct compress_ctx *cc,
					int *submitted,
					struct writeback_control *wbc,
					enum iostat_type io_type)
{}

void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
{}

static int f2fs_write_raw_pages(struct compress_ctx *cc,
					int *submitted_p,
					struct writeback_control *wbc,
					enum iostat_type io_type)
{}

int f2fs_write_multi_pages(struct compress_ctx *cc,
					int *submitted,
					struct writeback_control *wbc,
					enum iostat_type io_type)
{}

static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
		bool pre_alloc)
{}

static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
		bool pre_alloc)
{}

static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback, bool pre_alloc)
{}

static void f2fs_free_dic(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback);

struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{}

static void f2fs_free_dic(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback)
{}

static void f2fs_late_free_dic(struct work_struct *work)
{}

static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
{}

static void f2fs_verify_cluster(struct work_struct *work)
{}

/*
 * This is called when a compressed cluster has been decompressed
 * (or failed to be read and/or decompressed).
 */
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
				bool in_task)
{}

/*
 * Put a reference to a compressed page's decompress_io_ctx.
 *
 * This is called when the page is no longer needed and can be freed.
 */
void f2fs_put_page_dic(struct page *page, bool in_task)
{}

/*
 * check whether cluster blocks are contiguous, and add extent cache entry
 * only if cluster blocks are logically and physically contiguous.
 */
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
						unsigned int ofs_in_node)
{}

const struct address_space_operations f2fs_compress_aops =;

struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
{}

void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
{}

void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
						nid_t ino, block_t blkaddr)
{}

bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
								block_t blkaddr)
{}

void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
{}

int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
{}

void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
{}

int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
{}

void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
{}

int __init f2fs_init_compress_cache(void)
{}

void f2fs_destroy_compress_cache(void)
{}