#include "compress.h"
#include <linux/psi.h>
#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
#define Z_EROFS_PCLUSTER_MAX_PAGES …
#define Z_EROFS_INLINE_BVECS …
z_erofs_next_pcluster_t;
struct z_erofs_bvec { … };
#define __Z_EROFS_BVSET(name, total) …
__Z_EROFS_BVSET(z_erofs_bvset,);
__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
struct z_erofs_pcluster { … };
#define Z_EROFS_PCLUSTER_TAIL …
#define Z_EROFS_PCLUSTER_NIL …
struct z_erofs_decompressqueue { … };
static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
{ … }
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
{ … }
#define MNGD_MAPPING(sbi) …
static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
{ … }
#define Z_EROFS_FOLIO_EIO …
static void z_erofs_onlinefolio_init(struct folio *folio)
{ … }
static void z_erofs_onlinefolio_split(struct folio *folio)
{ … }
static void z_erofs_onlinefolio_end(struct folio *folio, int err)
{ … }
#define Z_EROFS_ONSTACK_PAGES …
struct z_erofs_pcluster_slab { … };
#define _PCLP(n) …
static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = …;
struct z_erofs_bvec_iter { … };
static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
{ … }
static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
{ … }
static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvset_inline *bvset,
unsigned int bootstrap_nr,
unsigned int cur)
{ … }
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec,
struct page **candidate_bvpage,
struct page **pagepool)
{ … }
static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec,
struct page **old_bvpage)
{ … }
static void z_erofs_destroy_pcluster_pool(void)
{ … }
static int z_erofs_create_pcluster_pool(void)
{ … }
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
{ … }
static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
{ … }
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static struct kthread_worker __rcu **z_erofs_pcpu_workers;
static void erofs_destroy_percpu_workers(void)
{ … }
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{ … }
static int erofs_init_percpu_workers(void)
{ … }
#else
static inline void erofs_destroy_percpu_workers(void) {}
static inline int erofs_init_percpu_workers(void) { return 0; }
#endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
static enum cpuhp_state erofs_cpuhp_state;
static int erofs_cpu_online(unsigned int cpu)
{ … }
static int erofs_cpu_offline(unsigned int cpu)
{ … }
static int erofs_cpu_hotplug_init(void)
{ … }
static void erofs_cpu_hotplug_destroy(void)
{ … }
#else
static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {}
#endif
void z_erofs_exit_subsystem(void)
{ … }
int __init z_erofs_init_subsystem(void)
{ … }
enum z_erofs_pclustermode { … };
struct z_erofs_decompress_frontend { … };
#define DECOMPRESS_FRONTEND_INIT(__i) …
static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
{ … }
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
{ … }
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{ … }
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{ … }
static void z_erofs_cache_invalidate_folio(struct folio *folio,
size_t offset, size_t length)
{ … }
static const struct address_space_operations z_erofs_cache_aops = …;
int erofs_init_managed_cache(struct super_block *sb)
{ … }
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec, bool exclusive)
{ … }
static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
{ … }
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
{ … }
static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{ … }
static void z_erofs_rcu_callback(struct rcu_head *head)
{ … }
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
{ … }
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
{ … }
static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
unsigned int cur, unsigned int end, erofs_off_t pos)
{ … }
static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
struct folio *folio, bool ra)
{ … }
static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{ … }
static bool z_erofs_page_is_invalidated(struct page *page)
{ … }
struct z_erofs_decompress_backend { … };
struct z_erofs_bvec_item { … };
static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
struct z_erofs_bvec *bvec)
{ … }
static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
int err)
{ … }
static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
{ … }
static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
bool *overlapped)
{ … }
static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
int err)
{ … }
static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct page **pagepool)
{ … }
static void z_erofs_decompressqueue_work(struct work_struct *work)
{ … }
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
{ … }
#endif
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
int bios)
{ … }
static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
struct z_erofs_decompress_frontend *f,
struct z_erofs_pcluster *pcl,
unsigned int nr,
struct address_space *mc)
{ … }
static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
struct z_erofs_decompressqueue *fgq, bool *fg)
{ … }
enum { … };
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
z_erofs_next_pcluster_t qtail[],
z_erofs_next_pcluster_t owned_head)
{ … }
static void z_erofs_endio(struct bio *bio)
{ … }
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
struct z_erofs_decompressqueue *fgq,
bool *force_fg, bool readahead)
{ … }
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
bool force_fg, bool ra)
{ … }
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
struct readahead_control *rac, bool backmost)
{ … }
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{ … }
static void z_erofs_readahead(struct readahead_control *rac)
{ … }
const struct address_space_operations z_erofs_aops = …;