linux/drivers/md/dm-log-userspace-base.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2006-2009 Red Hat, Inc.
 *
 * This file is released under the LGPL.
 */

#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/dm-dirty-log.h>
#include <linux/device-mapper.h>
#include <linux/dm-log-userspace.h>
#include <linux/module.h>
#include <linux/workqueue.h>

#include "dm-log-userspace-transfer.h"

#define DM_LOG_USERSPACE_VSN

#define FLUSH_ENTRY_POOL_SIZE

struct dm_dirty_log_flush_entry {};

/*
 * This limit on the number of mark and clear request is, to a degree,
 * arbitrary.  However, there is some basis for the choice in the limits
 * imposed on the size of data payload by dm-log-userspace-transfer.c:
 * dm_consult_userspace().
 */
#define MAX_FLUSH_GROUP_COUNT

struct log_c {};

static struct kmem_cache *_flush_entry_cache;

static int userspace_do_request(struct log_c *lc, const char *uuid,
				int request_type, char *data, size_t data_size,
				char *rdata, size_t *rdata_size)
{}

static int build_constructor_string(struct dm_target *ti,
				    unsigned int argc, char **argv,
				    char **ctr_str)
{}

static void do_flush(struct work_struct *work)
{}

/*
 * userspace_ctr
 *
 * argv contains:
 *	<UUID> [integrated_flush] <other args>
 * Where 'other args' are the userspace implementation-specific log
 * arguments.
 *
 * Example:
 *	<UUID> [integrated_flush] clustered-disk <arg count> <log dev>
 *	<region_size> [[no]sync]
 *
 * This module strips off the <UUID> and uses it for identification
 * purposes when communicating with userspace about a log.
 *
 * If integrated_flush is defined, the kernel combines flush
 * and mark requests.
 *
 * The rest of the line, beginning with 'clustered-disk', is passed
 * to the userspace ctr function.
 */
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
			 unsigned int argc, char **argv)
{}

static void userspace_dtr(struct dm_dirty_log *log)
{}

static int userspace_presuspend(struct dm_dirty_log *log)
{}

static int userspace_postsuspend(struct dm_dirty_log *log)
{}

static int userspace_resume(struct dm_dirty_log *log)
{}

static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
{}

/*
 * userspace_is_clean
 *
 * Check whether a region is clean.  If there is any sort of
 * failure when consulting the server, we return not clean.
 *
 * Returns: 1 if clean, 0 otherwise
 */
static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
{}

/*
 * userspace_in_sync
 *
 * Check if the region is in-sync.  If there is any sort
 * of failure when consulting the server, we assume that
 * the region is not in sync.
 *
 * If 'can_block' is set, return immediately
 *
 * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
 */
static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
			     int can_block)
{}

static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
{}

static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
			  int flush_with_payload)
{}

/*
 * userspace_flush
 *
 * This function is ok to block.
 * The flush happens in two stages.  First, it sends all
 * clear/mark requests that are on the list.  Then it
 * tells the server to commit them.  This gives the
 * server a chance to optimise the commit, instead of
 * doing it for every request.
 *
 * Additionally, we could implement another thread that
 * sends the requests up to the server - reducing the
 * load on flush.  Then the flush would have less in
 * the list and be responsible for the finishing commit.
 *
 * Returns: 0 on success, < 0 on failure
 */
static int userspace_flush(struct dm_dirty_log *log)
{}

/*
 * userspace_mark_region
 *
 * This function should avoid blocking unless absolutely required.
 * (Memory allocation is valid for blocking.)
 */
static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
{}

/*
 * userspace_clear_region
 *
 * This function must not block.
 * So, the alloc can't block.  In the worst case, it is ok to
 * fail.  It would simply mean we can't clear the region.
 * Does nothing to current sync context, but does mean
 * the region will be re-sync'ed on a reload of the mirror
 * even though it is in-sync.
 */
static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
{}

/*
 * userspace_get_resync_work
 *
 * Get a region that needs recovery.  It is valid to return
 * an error for this function.
 *
 * Returns: 1 if region filled, 0 if no work, <0 on error
 */
static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
{}

/*
 * userspace_set_region_sync
 *
 * Set the sync status of a given region.  This function
 * must not fail.
 */
static void userspace_set_region_sync(struct dm_dirty_log *log,
				      region_t region, int in_sync)
{}

/*
 * userspace_get_sync_count
 *
 * If there is any sort of failure when consulting the server,
 * we assume that the sync count is zero.
 *
 * Returns: sync count on success, 0 on failure
 */
static region_t userspace_get_sync_count(struct dm_dirty_log *log)
{}

/*
 * userspace_status
 *
 * Returns: amount of space consumed
 */
static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
			    char *result, unsigned int maxlen)
{}

/*
 * userspace_is_remote_recovering
 *
 * Returns: 1 if region recovering, 0 otherwise
 */
static int userspace_is_remote_recovering(struct dm_dirty_log *log,
					  region_t region)
{}

static struct dm_dirty_log_type _userspace_type =;

static int __init userspace_dirty_log_init(void)
{}

static void __exit userspace_dirty_log_exit(void)
{}

module_init();
module_exit(userspace_dirty_log_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();