linux/drivers/md/dm-kcopyd.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2002 Sistina Software (UK) Limited.
 * Copyright (C) 2006 Red Hat GmbH
 *
 * This file is released under the GPL.
 *
 * Kcopyd provides a simple interface for copying an area of one
 * block-device to one or more other block-devices, with an asynchronous
 * completion notification.
 */

#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>

#include "dm-core.h"

#define SPLIT_COUNT
#define MIN_JOBS

#define DEFAULT_SUB_JOB_SIZE_KB
#define MAX_SUB_JOB_SIZE_KB

static unsigned int kcopyd_subjob_size_kb =;

module_param(kcopyd_subjob_size_kb, uint, 0644);
MODULE_PARM_DESC();

static unsigned int dm_get_kcopyd_subjob_size(void)
{}

/*
 *----------------------------------------------------------------
 * Each kcopyd client has its own little pool of preallocated
 * pages for kcopyd io.
 *---------------------------------------------------------------
 */
struct dm_kcopyd_client {};

static struct page_list zero_page_list;

static DEFINE_SPINLOCK(throttle_spinlock);

/*
 * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
 * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
 * by 2.
 */
#define ACCOUNT_INTERVAL_SHIFT

/*
 * Sleep this number of milliseconds.
 *
 * The value was decided experimentally.
 * Smaller values seem to cause an increased copy rate above the limit.
 * The reason for this is unknown but possibly due to jiffies rounding errors
 * or read/write cache inside the disk.
 */
#define SLEEP_USEC

/*
 * Maximum number of sleep events. There is a theoretical livelock if more
 * kcopyd clients do work simultaneously which this limit avoids.
 */
#define MAX_SLEEPS

static void io_job_start(struct dm_kcopyd_throttle *t)
{}

static void io_job_finish(struct dm_kcopyd_throttle *t)
{}


static void wake(struct dm_kcopyd_client *kc)
{}

/*
 * Obtain one page for the use of kcopyd.
 */
static struct page_list *alloc_pl(gfp_t gfp)
{}

static void free_pl(struct page_list *pl)
{}

/*
 * Add the provided pages to a client's free page list, releasing
 * back to the system any beyond the reserved_pages limit.
 */
static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{}

static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
			    unsigned int nr, struct page_list **pages)
{}

/*
 * These three functions resize the page pool.
 */
static void drop_pages(struct page_list *pl)
{}

/*
 * Allocate and reserve nr_pages for the use of a specific client.
 */
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{}

static void client_free_pages(struct dm_kcopyd_client *kc)
{}

/*
 *---------------------------------------------------------------
 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
 * for this reason we use a mempool to prevent the client from
 * ever having to do io (which could cause a deadlock).
 *---------------------------------------------------------------
 */
struct kcopyd_job {};

static struct kmem_cache *_job_cache;

int __init dm_kcopyd_init(void)
{}

void dm_kcopyd_exit(void)
{}

/*
 * Functions to push and pop a job onto the head of a given job
 * list.
 */
static struct kcopyd_job *pop_io_job(struct list_head *jobs,
				     struct dm_kcopyd_client *kc)
{}

static struct kcopyd_job *pop(struct list_head *jobs,
			      struct dm_kcopyd_client *kc)
{}

static void push(struct list_head *jobs, struct kcopyd_job *job)
{}


static void push_head(struct list_head *jobs, struct kcopyd_job *job)
{}

/*
 * These three functions process 1 item from the corresponding
 * job list.
 *
 * They return:
 * < 0: error
 *   0: success
 * > 0: can't process yet.
 */
static int run_complete_job(struct kcopyd_job *job)
{}

static void complete_io(unsigned long error, void *context)
{}

/*
 * Request io on as many buffer heads as we can currently get for
 * a particular job.
 */
static int run_io_job(struct kcopyd_job *job)
{}

static int run_pages_job(struct kcopyd_job *job)
{}

/*
 * Run through a list for as long as possible.  Returns the count
 * of successful jobs.
 */
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
			int (*fn)(struct kcopyd_job *))
{}

/*
 * kcopyd does this every time it's woken up.
 */
static void do_work(struct work_struct *work)
{}

/*
 * If we are copying a small region we just dispatch a single job
 * to do the copy, otherwise the io has to be split up into many
 * jobs.
 */
static void dispatch_job(struct kcopyd_job *job)
{}

static void segment_complete(int read_err, unsigned long write_err,
			     void *context)
{}

/*
 * Create some sub jobs to share the work between them.
 */
static void split_job(struct kcopyd_job *master_job)
{}

void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
		    unsigned int num_dests, struct dm_io_region *dests,
		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{}
EXPORT_SYMBOL();

void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
		    unsigned int num_dests, struct dm_io_region *dests,
		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{}
EXPORT_SYMBOL();

void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
				 dm_kcopyd_notify_fn fn, void *context)
{}
EXPORT_SYMBOL();

void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
{}
EXPORT_SYMBOL();

/*
 * Cancels a kcopyd job, eg. someone might be deactivating a
 * mirror.
 */
#if 0
int kcopyd_cancel(struct kcopyd_job *job, int block)
{
	/* FIXME: finish */
	return -1;
}
#endif  /*  0  */

/*
 *---------------------------------------------------------------
 * Client setup
 *---------------------------------------------------------------
 */
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{}
EXPORT_SYMBOL();

void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
{}
EXPORT_SYMBOL();

void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc)
{}
EXPORT_SYMBOL();