linux/mm/z3fold.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * z3fold.c
 *
 * Author: Vitaly Wool <[email protected]>
 * Copyright (C) 2016, Sony Mobile Communications Inc.
 *
 * This implementation is based on zbud written by Seth Jennings.
 *
 * z3fold is an special purpose allocator for storing compressed pages. It
 * can store up to three compressed pages per page which improves the
 * compression ratio of zbud while retaining its main concepts (e. g. always
 * storing an integral number of objects per page) and simplicity.
 * It still has simple and deterministic reclaim properties that make it
 * preferable to a higher density approach (with no requirement on integral
 * number of object per page) when reclaim is used.
 *
 * As in zbud, pages are divided into "chunks".  The size of the chunks is
 * fixed at compile time and is determined by NCHUNKS_ORDER below.
 *
 * z3fold doesn't export any API and is meant to be used via zpool API.
 */

#define pr_fmt(fmt)

#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/page-flags.h>
#include <linux/migrate.h>
#include <linux/node.h>
#include <linux/compaction.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/zpool.h>
#include <linux/kmemleak.h>

/*
 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
 * adjusting internal fragmentation.  It also determines the number of
 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
 * in the beginning of an allocated page are occupied by z3fold header, so
 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
 * which shows the max number of free chunks in z3fold page, also there will
 * be 63, or 62, respectively, freelists per pool.
 */
#define NCHUNKS_ORDER

#define CHUNK_SHIFT
#define CHUNK_SIZE
#define ZHDR_SIZE_ALIGNED
#define ZHDR_CHUNKS
#define TOTAL_CHUNKS
#define NCHUNKS

#define BUDDY_MASK
#define BUDDY_SHIFT
#define SLOTS_ALIGN

/*****************
 * Structures
*****************/
struct z3fold_pool;

enum buddy {};

struct z3fold_buddy_slots {};
#define HANDLE_FLAG_MASK

/*
 * struct z3fold_header - z3fold page metadata occupying first chunks of each
 *			z3fold page, except for HEADLESS pages
 * @buddy:		links the z3fold page into the relevant list in the
 *			pool
 * @page_lock:		per-page lock
 * @refcount:		reference count for the z3fold page
 * @work:		work_struct for page layout optimization
 * @slots:		pointer to the structure holding buddy slots
 * @pool:		pointer to the containing pool
 * @cpu:		CPU which this page "belongs" to
 * @first_chunks:	the size of the first buddy in chunks, 0 if free
 * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
 * @last_chunks:	the size of the last buddy in chunks, 0 if free
 * @first_num:		the starting number (for the first handle)
 * @mapped_count:	the number of objects currently mapped
 */
struct z3fold_header {};

/**
 * struct z3fold_pool - stores metadata for each z3fold pool
 * @name:	pool name
 * @lock:	protects pool unbuddied lists
 * @stale_lock:	protects pool stale page list
 * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
 *		buddies; the list each z3fold page is added to depends on
 *		the size of its free region.
 * @stale:	list of pages marked for freeing
 * @pages_nr:	number of z3fold pages in the pool.
 * @c_handle:	cache for z3fold_buddy_slots allocation
 * @compact_wq:	workqueue for page layout background optimization
 * @release_wq:	workqueue for safe page release
 * @work:	work_struct for safe page release
 *
 * This structure is allocated at pool creation time and maintains metadata
 * pertaining to a particular z3fold pool.
 */
struct z3fold_pool {};

/*
 * Internal z3fold page flags
 */
enum z3fold_page_flags {};

/*
 * handle flags, go under HANDLE_FLAG_MASK
 */
enum z3fold_handle_flags {};

/*
 * Forward declarations
 */
static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
static void compact_page_work(struct work_struct *w);

/*****************
 * Helpers
*****************/

/* Converts an allocation size in bytes to size in z3fold chunks */
static int size_to_chunks(size_t size)
{}

#define for_each_unbuddied_list(_iter, _begin)

static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
							gfp_t gfp)
{}

static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
{}

static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
{}

/* Lock a z3fold page */
static inline void z3fold_page_lock(struct z3fold_header *zhdr)
{}

/* Try to lock a z3fold page */
static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
{}

/* Unlock a z3fold page */
static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
{}

/* return locked z3fold page if it's not headless */
static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
{}

static inline void put_z3fold_header(struct z3fold_header *zhdr)
{}

static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
{}

/* Initializes the z3fold header of a newly allocated z3fold page */
static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
					struct z3fold_pool *pool, gfp_t gfp)
{}

/* Resets the struct page fields and frees the page */
static void free_z3fold_page(struct page *page, bool headless)
{}

/* Helper function to build the index */
static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
{}

/*
 * Encodes the handle of a particular buddy within a z3fold page.
 * Zhdr->page_lock should be held as this function accesses first_num
 * if bud != HEADLESS.
 */
static unsigned long __encode_handle(struct z3fold_header *zhdr,
				struct z3fold_buddy_slots *slots,
				enum buddy bud)
{}

static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
{}

/* only for LAST bud, returns zero otherwise */
static unsigned short handle_to_chunks(unsigned long handle)
{}

/*
 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
 *  but that doesn't matter. because the masking will result in the
 *  correct buddy number.
 */
static enum buddy handle_to_buddy(unsigned long handle)
{}

static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
{}

static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
{}

static void release_z3fold_page_locked(struct kref *ref)
{}

static void release_z3fold_page_locked_list(struct kref *ref)
{}

static inline int put_z3fold_locked(struct z3fold_header *zhdr)
{}

static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
{}

static void free_pages_work(struct work_struct *w)
{}

/*
 * Returns the number of free chunks in a z3fold page.
 * NB: can't be used with HEADLESS pages.
 */
static int num_free_chunks(struct z3fold_header *zhdr)
{}

/* Add to the appropriate unbuddied list */
static inline void add_to_unbuddied(struct z3fold_pool *pool,
				struct z3fold_header *zhdr)
{}

static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
{}

static inline void *mchunk_memmove(struct z3fold_header *zhdr,
				unsigned short dst_chunk)
{}

static inline bool buddy_single(struct z3fold_header *zhdr)
{}

static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
{}

#define BIG_CHUNK_GAP
/* Has to be called with lock held */
static int z3fold_compact_page(struct z3fold_header *zhdr)
{}

static void do_compact_page(struct z3fold_header *zhdr, bool locked)
{}

static void compact_page_work(struct work_struct *w)
{}

/* returns _locked_ z3fold page header or NULL */
static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
						size_t size, bool can_sleep)
{}

/*
 * API Functions
 */

/**
 * z3fold_create_pool() - create a new z3fold pool
 * @name:	pool name
 * @gfp:	gfp flags when allocating the z3fold pool structure
 *
 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
 * failed.
 */
static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
{}

/**
 * z3fold_destroy_pool() - destroys an existing z3fold pool
 * @pool:	the z3fold pool to be destroyed
 *
 * The pool should be emptied before this function is called.
 */
static void z3fold_destroy_pool(struct z3fold_pool *pool)
{}

static const struct movable_operations z3fold_mops;

/**
 * z3fold_alloc() - allocates a region of a given size
 * @pool:	z3fold pool from which to allocate
 * @size:	size in bytes of the desired allocation
 * @gfp:	gfp flags used if the pool needs to grow
 * @handle:	handle of the new allocation
 *
 * This function will attempt to find a free region in the pool large enough to
 * satisfy the allocation request.  A search of the unbuddied lists is
 * performed first. If no suitable free region is found, then a new page is
 * allocated and added to the pool to satisfy the request.
 *
 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
 * a new page.
 */
static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
			unsigned long *handle)
{}

/**
 * z3fold_free() - frees the allocation associated with the given handle
 * @pool:	pool in which the allocation resided
 * @handle:	handle associated with the allocation returned by z3fold_alloc()
 *
 * In the case that the z3fold page in which the allocation resides is under
 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
 * only sets the first|middle|last_chunks to 0.  The page is actually freed
 * once all buddies are evicted (see z3fold_reclaim_page() below).
 */
static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
{}

/**
 * z3fold_map() - maps the allocation associated with the given handle
 * @pool:	pool in which the allocation resides
 * @handle:	handle associated with the allocation to be mapped
 *
 * Extracts the buddy number from handle and constructs the pointer to the
 * correct starting chunk within the page.
 *
 * Returns: a pointer to the mapped allocation
 */
static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
{}

/**
 * z3fold_unmap() - unmaps the allocation associated with the given handle
 * @pool:	pool in which the allocation resides
 * @handle:	handle associated with the allocation to be unmapped
 */
static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
{}

/**
 * z3fold_get_pool_pages() - gets the z3fold pool size in pages
 * @pool:	pool whose size is being queried
 *
 * Returns: size in pages of the given pool.
 */
static u64 z3fold_get_pool_pages(struct z3fold_pool *pool)
{}

static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
{}

static int z3fold_page_migrate(struct page *newpage, struct page *page,
		enum migrate_mode mode)
{}

static void z3fold_page_putback(struct page *page)
{}

static const struct movable_operations z3fold_mops =;

/*****************
 * zpool
 ****************/

static void *z3fold_zpool_create(const char *name, gfp_t gfp)
{}

static void z3fold_zpool_destroy(void *pool)
{}

static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
			unsigned long *handle)
{}
static void z3fold_zpool_free(void *pool, unsigned long handle)
{}

static void *z3fold_zpool_map(void *pool, unsigned long handle,
			enum zpool_mapmode mm)
{}
static void z3fold_zpool_unmap(void *pool, unsigned long handle)
{}

static u64 z3fold_zpool_total_pages(void *pool)
{}

static struct zpool_driver z3fold_zpool_driver =;

MODULE_ALIAS();

static int __init init_z3fold(void)
{}

static void __exit exit_z3fold(void)
{}

module_init();
module_exit(exit_z3fold);

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();