linux/fs/f2fs/segment.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * fs/f2fs/segment.h
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/blkdev.h>
#include <linux/backing-dev.h>

/* constant macro */
#define NULL_SEGNO
#define NULL_SECNO

#define DEF_RECLAIM_PREFREE_SEGMENTS
#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS

#define F2FS_MIN_SEGMENTS
#define F2FS_MIN_META_SEGMENTS

/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno)
#define GET_R2L_SEGNO(free_i, segno)

#define IS_DATASEG(t)
#define IS_NODESEG(t)
#define SE_PAGETYPE(se)

static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
						unsigned short seg_type)
{}

#define IS_HOT(t)
#define IS_WARM(t)
#define IS_COLD(t)

#define IS_CURSEG(sbi, seg)

#define IS_CURSEC(sbi, secno)

#define MAIN_BLKADDR(sbi)
#define SEG0_BLKADDR(sbi)

#define MAIN_SEGS(sbi)
#define MAIN_SECS(sbi)

#define TOTAL_SEGS(sbi)
#define TOTAL_BLKS(sbi)

#define MAX_BLKADDR(sbi)
#define SEGMENT_SIZE(sbi)

#define START_BLOCK(sbi, segno)

#define NEXT_FREE_BLKADDR(sbi, curseg)

#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr)
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)

#define GET_SEGNO(sbi, blk_addr)
#define CAP_BLKS_PER_SEC(sbi)
#define CAP_SEGS_PER_SEC(sbi)
#define GET_SEC_FROM_SEG(sbi, segno)
#define GET_SEG_FROM_SEC(sbi, secno)
#define GET_ZONE_FROM_SEC(sbi, secno)
#define GET_ZONE_FROM_SEG(sbi, segno)

#define GET_SUM_BLOCK(sbi, segno)

#define GET_SUM_TYPE(footer)
#define SET_SUM_TYPE(footer, type)

#define SIT_ENTRY_OFFSET(sit_i, segno)
#define SIT_BLOCK_OFFSET(segno)
#define START_SEGNO(segno)
#define SIT_BLK_CNT(sbi)
#define f2fs_bitmap_size(nr)

#define SECTOR_FROM_BLOCK(blk_addr)
#define SECTOR_TO_BLOCK(sectors)

/*
 * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
 * LFS writes data sequentially with cleaning operations.
 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
 * fragmented segment which has similar aging degree.
 */
enum {};

/*
 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
 * GC_CB is based on cost-benefit algorithm.
 * GC_GREEDY is based on greedy algorithm.
 * GC_AT is based on age-threshold algorithm.
 */
enum {};

/*
 * BG_GC means the background cleaning job.
 * FG_GC means the on-demand cleaning job.
 */
enum {};

/* for a function parameter to select a victim segment */
struct victim_sel_policy {};

struct seg_entry {};

struct sec_entry {};

#define MAX_SKIP_GC_COUNT

struct revoke_entry {};

struct sit_info {};

struct free_segmap_info {};

/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
enum dirty_type {};

struct dirty_seglist_info {};

/* for active log information */
struct curseg_info {};

struct sit_entry_set {};

/*
 * inline functions
 */
static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
{}

static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
						unsigned int segno)
{}

static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
						unsigned int segno)
{}

static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
				unsigned int segno, bool use_section)
{}

static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
				unsigned int segno, bool use_section)
{}

static inline void seg_info_from_raw_sit(struct seg_entry *se,
					struct f2fs_sit_entry *rs)
{}

static inline void __seg_info_to_raw_sit(struct seg_entry *se,
					struct f2fs_sit_entry *rs)
{}

static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
				struct page *page, unsigned int start)
{}

static inline void seg_info_to_raw_sit(struct seg_entry *se,
					struct f2fs_sit_entry *rs)
{}

static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
		unsigned int max, unsigned int segno)
{}

static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
{}

static inline void __set_inuse(struct f2fs_sb_info *sbi,
		unsigned int segno)
{}

static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
		unsigned int segno, bool inmem)
{}

static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
		unsigned int segno)
{}

static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
		void *dst_addr)
{}

static inline block_t written_block_count(struct f2fs_sb_info *sbi)
{}

static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
{}

static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
{}

static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
{}

static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
{}

static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
{}

static inline int overprovision_segments(struct f2fs_sb_info *sbi)
{}

static inline int reserved_sections(struct f2fs_sb_info *sbi)
{}

static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
			unsigned int node_blocks, unsigned int dent_blocks)
{}

/*
 * calculate needed sections for dirty node/dentry
 * and call has_curseg_enough_space
 */
static inline void __get_secs_required(struct f2fs_sb_info *sbi,
		unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
{}

static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
					int freed, int needed)
{}

static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
					int freed, int needed)
{}

static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
{}

static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
{}

static inline int utilization(struct f2fs_sb_info *sbi)
{}

/*
 * Sometimes f2fs may be better to drop out-of-place update policy.
 * And, users can control the policy through sysfs entries.
 * There are five policies with triggering conditions as follows.
 * F2FS_IPU_FORCE - all the time,
 * F2FS_IPU_SSR - if SSR mode is activated,
 * F2FS_IPU_UTIL - if FS utilization is over threashold,
 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
 *                     threashold,
 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
 *                     storages. IPU will be triggered only if the # of dirty
 *                     pages over min_fsync_blocks. (=default option)
 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
 * F2FS_IPU_NOCACHE - disable IPU bio cache.
 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
 *                            FI_OPU_WRITE flag.
 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
 */
#define DEF_MIN_IPU_UTIL
#define DEF_MIN_FSYNC_BLOCKS
#define DEF_MIN_HOT_BLOCKS

#define SMALL_VOLUME_SEGMENTS

#define F2FS_IPU_DISABLE

/* Modification on enum should be synchronized with ipu_mode_names array */
enum {};

static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
{}

#define F2FS_IPU_POLICY(name)

F2FS_IPU_POLICY(F2FS_IPU_FORCE);
F2FS_IPU_POLICY(F2FS_IPU_SSR);
F2FS_IPU_POLICY(F2FS_IPU_UTIL);
F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);

static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
		int type)
{}

static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
		int type)
{}

static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
		unsigned int segno)
{}

static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
{}

/*
 * Summary block is always treated as an invalid block
 */
static inline int check_block_count(struct f2fs_sb_info *sbi,
		int segno, struct f2fs_sit_entry *raw_sit)
{}

static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
						unsigned int start)
{}

static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
						pgoff_t block_addr)
{}

static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
{}

static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
						bool base_time)
{}

static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
			unsigned int ofs_in_node, unsigned char version)
{}

static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
{}

static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
{}

static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
{}

/*
 * It is very important to gather dirty pages and write at once, so that we can
 * submit a big bio without interfering other data writes.
 * By default, 512 pages for directory data,
 * 512 pages (2MB) * 8 for nodes, and
 * 256 pages * 8 for meta are set.
 */
static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
{}

/*
 * When writing pages, it'd better align nr_to_write for segment size.
 */
static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
					struct writeback_control *wbc)
{}

static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
{}

static inline unsigned int first_zoned_segno(struct f2fs_sb_info *sbi)
{}