linux/drivers/block/drbd/drbd_main.c

// SPDX-License-Identifier: GPL-2.0-only
/*
   drbd.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
   Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.

   Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
   from Logicworks, Inc. for making SDP replication support possible.


 */

#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/drbd.h>
#include <linux/uaccess.h>
#include <asm/types.h>
#include <net/sock.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>

#include <linux/drbd_limits.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
#include "drbd_debugfs.h"

static DEFINE_MUTEX(drbd_main_mutex);
static int drbd_open(struct gendisk *disk, blk_mode_t mode);
static void drbd_release(struct gendisk *gd);
static void md_sync_timer_fn(struct timer_list *t);
static int w_bitmap_io(struct drbd_work *w, int unused);

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_VERSION();
MODULE_LICENSE();
MODULE_PARM_DESC();
MODULE_ALIAS_BLOCKDEV_MAJOR();

#include <linux/moduleparam.h>
/* thanks to these macros, if compiled into the kernel (not-module),
 * these become boot parameters (e.g., drbd.minor_count) */

#ifdef CONFIG_DRBD_FAULT_INJECTION
int drbd_enable_faults;
int drbd_fault_rate;
static int drbd_fault_count;
static int drbd_fault_devs;
/* bitmap of enabled faults */
module_param_named(enable_faults, drbd_enable_faults, int, 0664);
/* fault rate % value - applies to all enabled faults */
module_param_named(fault_rate, drbd_fault_rate, int, 0664);
/* count of faults inserted */
module_param_named(fault_count, drbd_fault_count, int, 0664);
/* bitmap of devices to insert faults on */
module_param_named(fault_devs, drbd_fault_devs, int, 0644);
#endif

/* module parameters we can keep static */
static bool drbd_allow_oos; /* allow_open_on_secondary */
static bool drbd_disable_sendpage;
MODULE_PARM_DESC();
module_param_named(allow_oos, drbd_allow_oos, bool, 0);
module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);

/* module parameters we share */
int drbd_proc_details; /* Detail level in proc drbd*/
module_param_named(proc_details, drbd_proc_details, int, 0644);
/* module parameters shared with defaults */
unsigned int drbd_minor_count =;
/* Module parameter for setting the user mode helper program
 * to run. Default is /sbin/drbdadm */
char drbd_usermode_helper[80] =;
module_param_named(minor_count, drbd_minor_count, uint, 0444);
module_param_string();

/* in 2.6.x, our device mapping and config info contains our virtual gendisks
 * as member "struct gendisk *vdisk;"
 */
struct idr drbd_devices;
struct list_head drbd_resources;
struct mutex resources_mutex;

struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache;	/* peer requests */
struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;

/* I do not use a standard mempool, because:
   1) I want to hand out the pre-allocated objects first.
   2) I want to be able to interrupt sleeping allocation with a signal.
   Note: This is a single linked list, the next pointer is the private
	 member of struct page.
 */
struct page *drbd_pp_pool;
DEFINE_SPINLOCK();
int          drbd_pp_vacant;
wait_queue_head_t drbd_pp_wait;

DEFINE_RATELIMIT_STATE();

static const struct block_device_operations drbd_ops =;

#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
   give tons of false positives. When this is a real functions sparse works.
 */
int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
	int io_allowed;

	atomic_inc(&device->local_cnt);
	io_allowed = (device->state.disk >= mins);
	if (!io_allowed) {
		if (atomic_dec_and_test(&device->local_cnt))
			wake_up(&device->misc_wait);
	}
	return io_allowed;
}

#endif

/**
 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
 * @connection:	DRBD connection.
 * @barrier_nr:	Expected identifier of the DRBD write barrier packet.
 * @set_size:	Expected number of requests before that barrier.
 *
 * In case the passed barrier_nr or set_size does not match the oldest
 * epoch of not yet barrier-acked requests, this function will cause a
 * termination of the connection.
 */
void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
		unsigned int set_size)
{}


/**
 * _tl_restart() - Walks the transfer log, and applies an action to all requests
 * @connection:	DRBD connection to operate on.
 * @what:       The action/event to perform with all request objects
 *
 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
 * RESTART_FROZEN_DISK_IO.
 */
/* must hold resource->req_lock */
void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{}

void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{}

/**
 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
 * @connection:	DRBD connection.
 *
 * This is called after the connection to the peer was lost. The storage covered
 * by the requests on the transfer gets marked as our of sync. Called from the
 * receiver thread and the worker thread.
 */
void tl_clear(struct drbd_connection *connection)
{}

/**
 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
 * @device:	DRBD device.
 */
void tl_abort_disk_io(struct drbd_device *device)
{}

static int drbd_thread_setup(void *arg)
{}

static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
			     int (*func) (struct drbd_thread *), const char *name)
{}

int drbd_thread_start(struct drbd_thread *thi)
{}


void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{}

int conn_lowest_minor(struct drbd_connection *connection)
{}

#ifdef CONFIG_SMP
/*
 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
 *
 * Forces all threads of a resource onto the same CPU. This is beneficial for
 * DRBD's performance. May be overwritten by user's configuration.
 */
static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
{}

/**
 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
 * @thi:	drbd_thread object
 *
 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
 * prematurely.
 */
void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{}
#else
#define drbd_calc_cpu_mask
#endif

/*
 * drbd_header_size  -  size of a packet header
 *
 * The header size is a multiple of 8, so any payload following the header is
 * word aligned on 64-bit architectures.  (The bitmap send and receive code
 * relies on this.)
 */
unsigned int drbd_header_size(struct drbd_connection *connection)
{}

static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
{}

static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
{}

static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
				      int size, int vnr)
{}

static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
				   void *buffer, enum drbd_packet cmd, int size)
{}

static void *__conn_prepare_command(struct drbd_connection *connection,
				    struct drbd_socket *sock)
{}

void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{}

void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
{}

static int __send_command(struct drbd_connection *connection, int vnr,
			  struct drbd_socket *sock, enum drbd_packet cmd,
			  unsigned int header_size, void *data,
			  unsigned int size)
{}

static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
			       enum drbd_packet cmd, unsigned int header_size,
			       void *data, unsigned int size)
{}

int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
		      enum drbd_packet cmd, unsigned int header_size,
		      void *data, unsigned int size)
{}

int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
		      enum drbd_packet cmd, unsigned int header_size,
		      void *data, unsigned int size)
{}

int drbd_send_ping(struct drbd_connection *connection)
{}

int drbd_send_ping_ack(struct drbd_connection *connection)
{}

int drbd_send_sync_param(struct drbd_peer_device *peer_device)
{}

int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{}

int drbd_send_protocol(struct drbd_connection *connection)
{}

static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
{}

int drbd_send_uuids(struct drbd_peer_device *peer_device)
{}

int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
{}

void drbd_print_uuids(struct drbd_device *device, const char *text)
{}

void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
{}

int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{}

/**
 * drbd_send_current_state() - Sends the drbd state to the peer
 * @peer_device:	DRBD peer device.
 */
int drbd_send_current_state(struct drbd_peer_device *peer_device)
{}

/**
 * drbd_send_state() - After a state change, sends the new state to the peer
 * @peer_device:      DRBD peer device.
 * @state:     the state to send, not necessarily the current state.
 *
 * Each state change queues an "after_state_ch" work, which will eventually
 * send the resulting new state to the peer. If more state changes happen
 * between queuing and processing of the after_state_ch work, we still
 * want to send each intermediary state in the order it occurred.
 */
int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
{}

int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
{}

int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{}

void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
{}

void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{}

static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{}

static void dcbp_set_start(struct p_compressed_bm *p, int set)
{}

static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
{}

static int fill_bitmap_rle_bits(struct drbd_device *device,
			 struct p_compressed_bm *p,
			 unsigned int size,
			 struct bm_xfer_ctx *c)
{}

/*
 * send_bitmap_rle_or_plain
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
send_bitmap_rle_or_plain(struct drbd_peer_device *peer_device, struct bm_xfer_ctx *c)
{}

/* See the comment at receive_bitmap() */
static int _drbd_send_bitmap(struct drbd_device *device,
			    struct drbd_peer_device *peer_device)
{}

int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
{}

void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{}

/**
 * _drbd_send_ack() - Sends an ack packet
 * @peer_device:	DRBD peer device.
 * @cmd:		Packet command code.
 * @sector:		sector, needs to be in big endian byte order
 * @blksize:		size in byte, needs to be in big endian byte order
 * @block_id:		Id, big endian byte order
 */
static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
			  u64 sector, u32 blksize, u64 block_id)
{}

/* dp->sector and dp->block_id already/still in network byte order,
 * data_size is payload size according to dp->head,
 * and may need to be corrected for digest size. */
void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
		      struct p_data *dp, int data_size)
{}

void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
		      struct p_block_req *rp)
{}

/**
 * drbd_send_ack() - Sends an ack packet
 * @peer_device:	DRBD peer device
 * @cmd:		packet command code
 * @peer_req:		peer request
 */
int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
		  struct drbd_peer_request *peer_req)
{}

/* This function misuses the block_id field to signal if the blocks
 * are is sync or not. */
int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
		     sector_t sector, int blksize, u64 block_id)
{}

int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
			     struct drbd_peer_request *peer_req)
{}

int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
		       sector_t sector, int size, u64 block_id)
{}

int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
			    void *digest, int digest_size, enum drbd_packet cmd)
{}

int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
{}

/* called on sndtimeo
 * returns false if we should retry,
 * true if we think connection is dead
 */
static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{}

static void drbd_update_congested(struct drbd_connection *connection)
{}

/* The idea of sendpage seems to be to put some kind of reference
 * to the page into the skb, and to hand it over to the NIC. In
 * this process get_page() gets called.
 *
 * As soon as the page was really sent over the network put_page()
 * gets called by some part of the network layer. [ NIC driver? ]
 *
 * [ get_page() / put_page() increment/decrement the count. If count
 *   reaches 0 the page will be freed. ]
 *
 * This works nicely with pages from FSs.
 * But this means that in protocol A we might signal IO completion too early!
 *
 * In order not to corrupt data during a resync we must make sure
 * that we do not reuse our own buffer pages (EEs) to early, therefore
 * we have the net_ee list.
 *
 * XFS seems to have problems, still, it submits pages with page_count == 0!
 * As a workaround, we disable sendpage on pages
 * with page_count == 0 or PageSlab.
 */
static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
			      int offset, size_t size, unsigned msg_flags)
{}

static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
		    int offset, size_t size, unsigned msg_flags)
{}

static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{}

static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{}

static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
			    struct drbd_peer_request *peer_req)
{}

static u32 bio_flags_to_wire(struct drbd_connection *connection,
			     struct bio *bio)
{}

/* Used to send write or TRIM aka REQ_OP_DISCARD requests
 * R_PRIMARY -> Peer	(P_DATA, P_TRIM)
 */
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

/* answer packet, used to send data back for read requests:
 *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
 *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
 */
int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
		    struct drbd_peer_request *peer_req)
{}

int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

/*
  drbd_send distinguishes two cases:

  Packets sent via the data socket "sock"
  and packets sent via the meta data socket "msock"

		    sock                      msock
  -----------------+-------------------------+------------------------------
  timeout           conf.timeout / 2          conf.timeout / 2
  timeout action    send a ping via msock     Abort communication
					      and close all sockets
*/

/*
 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
 */
int drbd_send(struct drbd_connection *connection, struct socket *sock,
	      void *buf, size_t size, unsigned msg_flags)
{}

/*
 * drbd_send_all  -  Send an entire buffer
 *
 * Returns 0 upon success and a negative error value otherwise.
 */
int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
		  size_t size, unsigned msg_flags)
{}

static int drbd_open(struct gendisk *disk, blk_mode_t mode)
{}

static void drbd_release(struct gendisk *gd)
{}

/* need to hold resource->req_lock */
void drbd_queue_unplug(struct drbd_device *device)
{}

static void drbd_set_defaults(struct drbd_device *device)
{}

void drbd_init_set_defaults(struct drbd_device *device)
{}

void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
{}

void drbd_device_cleanup(struct drbd_device *device)
{}


static void drbd_destroy_mempools(void)
{}

static int drbd_create_mempools(void)
{}

static void drbd_release_all_peer_reqs(struct drbd_device *device)
{}

/* caution. no locking. */
void drbd_destroy_device(struct kref *kref)
{}

/* One global retry thread, if we need to push back some bio and have it
 * reinserted through our make request function.
 */
static struct retry_worker {} retry;

static void do_retry(struct work_struct *ws)
{}

/* called via drbd_req_put_completion_ref(),
 * holds resource->req_lock */
void drbd_restart_request(struct drbd_request *req)
{}

void drbd_destroy_resource(struct kref *kref)
{}

void drbd_free_resource(struct drbd_resource *resource)
{}

static void drbd_cleanup(void)
{}

static void drbd_init_workqueue(struct drbd_work_queue* wq)
{}

struct completion_work {};

static int w_complete(struct drbd_work *w, int cancel)
{}

void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
{}

struct drbd_resource *drbd_find_resource(const char *name)
{}

struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
				     void *peer_addr, int peer_addr_len)
{}

static int drbd_alloc_socket(struct drbd_socket *socket)
{}

static void drbd_free_socket(struct drbd_socket *socket)
{}

void conn_free_crypto(struct drbd_connection *connection)
{}

int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
{}

struct drbd_resource *drbd_create_resource(const char *name)
{}

/* caller must be under adm_mutex */
struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{}

void drbd_destroy_connection(struct kref *kref)
{}

static int init_submitter(struct drbd_device *device)
{}

enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{}

void drbd_delete_device(struct drbd_device *device)
{}

static int __init drbd_init(void)
{}

static void drbd_free_one_sock(struct drbd_socket *ds)
{}

void drbd_free_sock(struct drbd_connection *connection)
{}

/* meta data management */

void conn_md_sync(struct drbd_connection *connection)
{}

/* aligned 4kByte */
struct meta_data_on_disk {} __packed;



void drbd_md_write(struct drbd_device *device, void *b)
{}

/**
 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
 * @device:	DRBD device.
 */
void drbd_md_sync(struct drbd_device *device)
{}

static int check_activity_log_stripe_size(struct drbd_device *device,
		struct meta_data_on_disk *on_disk,
		struct drbd_md *in_core)
{}

static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
{}


/**
 * drbd_md_read() - Reads in the meta data super block
 * @device:	DRBD device.
 * @bdev:	Device from which the meta data should be read in.
 *
 * Return NO_ERROR on success, and an enum drbd_ret_code in case
 * something goes wrong.
 *
 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
 * even before @bdev is assigned to @device->ldev.
 */
int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
{}

/**
 * drbd_md_mark_dirty() - Mark meta data super block as dirty
 * @device:	DRBD device.
 *
 * Call this function if you change anything that should be written to
 * the meta-data super block. This function sets MD_DIRTY, and starts a
 * timer that ensures that within five seconds you have to call drbd_md_sync().
 */
void drbd_md_mark_dirty(struct drbd_device *device)
{}

void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{}

void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{}

void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{}

void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{}

/**
 * drbd_uuid_new_current() - Creates a new current UUID
 * @device:	DRBD device.
 *
 * Creates a new current UUID, and rotates the old current UUID into
 * the bitmap slot. Causes an incremental resync upon next connect.
 */
void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
{}

void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{}

/**
 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
 * @device:	DRBD device.
 * @peer_device: Peer DRBD device.
 *
 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
 */
int drbd_bmio_set_n_write(struct drbd_device *device,
			  struct drbd_peer_device *peer_device) __must_hold(local)

{}

/**
 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
 * @device:	DRBD device.
 * @peer_device: Peer DRBD device.
 *
 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
 */
int drbd_bmio_clear_n_write(struct drbd_device *device,
			  struct drbd_peer_device *peer_device) __must_hold(local)

{}

static int w_bitmap_io(struct drbd_work *w, int unused)
{}

/**
 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
 * @device:	DRBD device.
 * @io_fn:	IO callback to be called when bitmap IO is possible
 * @done:	callback to be called after the bitmap IO was performed
 * @why:	Descriptive text of the reason for doing the IO
 * @flags:	Bitmap flags
 * @peer_device: Peer DRBD device.
 *
 * While IO on the bitmap happens we freeze application IO thus we ensure
 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
 * called from worker context. It MUST NOT be used while a previous such
 * work is still pending!
 *
 * Its worker function encloses the call of io_fn() by get_ldev() and
 * put_ldev().
 */
void drbd_queue_bitmap_io(struct drbd_device *device,
			  int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
			  void (*done)(struct drbd_device *, int),
			  char *why, enum bm_flag flags,
			  struct drbd_peer_device *peer_device)
{}

/**
 * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
 * @device:	DRBD device.
 * @io_fn:	IO callback to be called when bitmap IO is possible
 * @why:	Descriptive text of the reason for doing the IO
 * @flags:	Bitmap flags
 * @peer_device: Peer DRBD device.
 *
 * freezes application IO while that the actual IO operations runs. This
 * functions MAY NOT be called from worker context.
 */
int drbd_bitmap_io(struct drbd_device *device,
		int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
		char *why, enum bm_flag flags,
		struct drbd_peer_device *peer_device)
{}

void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
{}

void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
{}
int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
{}

static void md_sync_timer_fn(struct timer_list *t)
{}

const char *cmdname(enum drbd_packet cmd)
{}

/**
 * drbd_wait_misc  -  wait for a request to make progress
 * @device:	device associated with the request
 * @i:		the struct drbd_interval embedded in struct drbd_request or
 *		struct drbd_peer_request
 */
int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
{}

void lock_all_resources(void)
{}

void unlock_all_resources(void)
{}

#ifdef CONFIG_DRBD_FAULT_INJECTION
/* Fault insertion support including random number generator shamelessly
 * stolen from kernel/rcutorture.c */
struct fault_random_state {};

#define FAULT_RANDOM_MULT
#define FAULT_RANDOM_ADD
#define FAULT_RANDOM_REFRESH

/*
 * Crude but fast random-number generator.  Uses a linear congruential
 * generator, with occasional help from get_random_bytes().
 */
static unsigned long
_drbd_fault_random(struct fault_random_state *rsp)
{}

static char *
_drbd_fault_str(unsigned int type) {}

unsigned int
_drbd_insert_fault(struct drbd_device *device, unsigned int type)
{}
#endif

module_init()
module_exit()

EXPORT_SYMBOL();
EXPORT_SYMBOL();
EXPORT_SYMBOL();
EXPORT_SYMBOL();