linux/drivers/block/drbd/drbd_receiver.c

// SPDX-License-Identifier: GPL-2.0-only
/*
   drbd_receiver.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
   Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.

 */


#include <linux/module.h>

#include <linux/uaccess.h>
#include <net/sock.h>

#include <linux/drbd.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/signal.h>
#include <linux/pkt_sched.h>
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_vli.h"

#define PRO_FEATURES

struct packet_info {};

enum finish_epoch {};

static int drbd_do_features(struct drbd_connection *connection);
static int drbd_do_auth(struct drbd_connection *connection);
static int drbd_disconnected(struct drbd_peer_device *);
static void conn_wait_active_ee_empty(struct drbd_connection *connection);
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_work *, int);


#define GFP_TRY

/*
 * some helper functions to deal with single linked page lists,
 * page->private being our "next" pointer.
 */

/* If at least n pages are linked at head, get n pages off.
 * Otherwise, don't modify head, and return NULL.
 * Locking is the responsibility of the caller.
 */
static struct page *page_chain_del(struct page **head, int n)
{}

/* may be used outside of locks to find the tail of a (usually short)
 * "private" page chain, before adding it back to a global chain head
 * with page_chain_add() under a spinlock. */
static struct page *page_chain_tail(struct page *page, int *len)
{}

static int page_chain_free(struct page *page)
{}

static void page_chain_add(struct page **head,
		struct page *chain_first, struct page *chain_last)
{}

static struct page *__drbd_alloc_pages(struct drbd_device *device,
				       unsigned int number)
{}

static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
					   struct list_head *to_be_freed)
{}

static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
{}

static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
{}

/**
 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
 * @peer_device:	DRBD device.
 * @number:		number of pages requested
 * @retry:		whether to retry, if not enough pages are available right now
 *
 * Tries to allocate number pages, first from our own page pool, then from
 * the kernel.
 * Possibly retry until DRBD frees sufficient pages somewhere else.
 *
 * If this allocation would exceed the max_buffers setting, we throttle
 * allocation (schedule_timeout) to give the system some room to breathe.
 *
 * We do not use max-buffers as hard limit, because it could lead to
 * congestion and further to a distributed deadlock during online-verify or
 * (checksum based) resync, if the max-buffers, socket buffer sizes and
 * resync-rate settings are mis-configured.
 *
 * Returns a page chain linked via page->private.
 */
struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
			      bool retry)
{}

/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
 * Either links the page chain back to the global pool,
 * or returns all pages to the system. */
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
{}

/*
You need to hold the req_lock:
 _drbd_wait_ee_list_empty()

You must not have the req_lock:
 drbd_free_peer_req()
 drbd_alloc_peer_req()
 drbd_free_peer_reqs()
 drbd_ee_fix_bhs()
 drbd_finish_peer_reqs()
 drbd_clear_done_ee()
 drbd_wait_ee_list_empty()
*/

/* normal: payload_size == request size (bi_size)
 * w_same: payload_size == logical_block_size
 * trim: payload_size == 0 */
struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
		    unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
{}

void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
		       int is_net)
{}

int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
{}

/*
 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
 */
static int drbd_finish_peer_reqs(struct drbd_device *device)
{}

static void _drbd_wait_ee_list_empty(struct drbd_device *device,
				     struct list_head *head)
{}

static void drbd_wait_ee_list_empty(struct drbd_device *device,
				    struct list_head *head)
{}

static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{}

static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
{}

static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
{}

static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
{}

/* quoting tcp(7):
 *   On individual connections, the socket buffer size must be set prior to the
 *   listen(2) or connect(2) calls in order to have it take effect.
 * This is our wrapper to do so.
 */
static void drbd_setbufsize(struct socket *sock, unsigned int snd,
		unsigned int rcv)
{}

static struct socket *drbd_try_connect(struct drbd_connection *connection)
{}

struct accept_wait_data {};

static void drbd_incoming_connection(struct sock *sk)
{}

static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
{}

static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
{}

static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
{}

static int decode_header(struct drbd_connection *, void *, struct packet_info *);

static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
			     enum drbd_packet cmd)
{}

static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
{}

/**
 * drbd_socket_okay() - Free the socket if its connection is not okay
 * @sock:	pointer to the pointer to the socket.
 */
static bool drbd_socket_okay(struct socket **sock)
{}

static bool connection_established(struct drbd_connection *connection,
				   struct socket **sock1,
				   struct socket **sock2)
{}

/* Gets called if a connection is established, or if a new minor gets created
   in a connection */
int drbd_connected(struct drbd_peer_device *peer_device)
{}

/*
 * return values:
 *   1 yes, we have a valid connection
 *   0 oops, did not work out, please try again
 *  -1 peer talks different language,
 *     no point in trying again, please go standalone.
 *  -2 We do not have a network config...
 */
static int conn_connect(struct drbd_connection *connection)
{}

static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
{}

static void drbd_unplug_all_devices(struct drbd_connection *connection)
{}

static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{}

static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
{}
/* This is blkdev_issue_flush, but asynchronous.
 * We want to submit to all component volumes in parallel,
 * then wait for all completions.
 */
struct issue_flush_context {};
struct one_flush_context {};

static void one_flush_endio(struct bio *bio)
{}

static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{}

static void drbd_flush(struct drbd_connection *connection)
{}

/**
 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
 * @connection:	DRBD connection.
 * @epoch:	Epoch object.
 * @ev:		Epoch event.
 */
static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
					       struct drbd_epoch *epoch,
					       enum epoch_event ev)
{}

static enum write_ordering_e
max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
{}

/*
 * drbd_bump_write_ordering() - Fall back to an other write ordering method
 * @wo:		Write ordering method to try.
 */
void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
			      enum write_ordering_e wo)
{}

/*
 * Mapping "discard" to ZEROOUT with UNMAP does not work for us:
 * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it
 * will directly go to fallback mode, submitting normal writes, and
 * never even try to UNMAP.
 *
 * And dm-thin does not do this (yet), mostly because in general it has
 * to assume that "skip_block_zeroing" is set.  See also:
 * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html
 * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html
 *
 * We *may* ignore the discard-zeroes-data setting, if so configured.
 *
 * Assumption is that this "discard_zeroes_data=0" is only because the backend
 * may ignore partial unaligned discards.
 *
 * LVM/DM thin as of at least
 *   LVM version:     2.02.115(2)-RHEL7 (2015-01-28)
 *   Library version: 1.02.93-RHEL7 (2015-01-28)
 *   Driver version:  4.29.0
 * still behaves this way.
 *
 * For unaligned (wrt. alignment and granularity) or too small discards,
 * we zero-out the initial (and/or) trailing unaligned partial chunks,
 * but discard all the aligned full chunks.
 *
 * At least for LVM/DM thin, with skip_block_zeroing=false,
 * the result is effectively "discard_zeroes_data=1".
 */
/* flags: EE_TRIM|EE_ZEROOUT */
int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
{}

static bool can_do_reliable_discards(struct drbd_device *device)
{}

static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
{}

static int peer_request_fault_type(struct drbd_peer_request *peer_req)
{}

/**
 * drbd_submit_peer_request()
 * @peer_req:	peer request
 *
 * May spread the pages to multiple bios,
 * depending on bio_add_page restrictions.
 *
 * Returns 0 if all bios have been submitted,
 * -ENOMEM if we could not allocate enough bios,
 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
 *  single page to an empty bio (which should never happen and likely indicates
 *  that the lower level IO stack is in some way broken). This has been observed
 *  on certain Xen deployments.
 */
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_peer_request *peer_req)
{}

static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
					     struct drbd_peer_request *peer_req)
{}

static void conn_wait_active_ee_empty(struct drbd_connection *connection)
{}

static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
{}

/* quick wrapper in case payload size != request_size (write same) */
static void drbd_csum_ee_size(struct crypto_shash *h,
			      struct drbd_peer_request *r, void *d,
			      unsigned int payload_size)
{}

/* used from receive_RSDataReply (recv_resync_read)
 * and from receive_Data.
 * data_size: actual payload ("data in")
 * 	for normal writes that is bi_size.
 * 	for discards, that is zero.
 * 	for write same, it is logical_block_size.
 * both trim and write same have the bi_size ("data len to be affected")
 * as extra argument in the packet header.
 */
static struct drbd_peer_request *
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
	      struct packet_info *pi) __must_hold(local)
{}

/* drbd_drain_block() just takes a data block
 * out of the socket input buffer, and discards it.
 */
static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
{}

static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
			   sector_t sector, int data_size)
{}

/*
 * e_end_resync_block() is called in ack_sender context via
 * drbd_finish_peer_reqs().
 */
static int e_end_resync_block(struct drbd_work *w, int unused)
{}

static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
			    struct packet_info *pi) __releases(local)
{}

static struct drbd_request *
find_request(struct drbd_device *device, struct rb_root *root, u64 id,
	     sector_t sector, bool missing_ok, const char *func)
{}

static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static void restart_conflicting_writes(struct drbd_device *device,
				       sector_t sector, int size)
{}

/*
 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
 */
static int e_end_block(struct drbd_work *w, int cancel)
{}

static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{}

static int e_send_superseded(struct drbd_work *w, int unused)
{}

static int e_send_retry_write(struct drbd_work *w, int unused)
{}

static bool seq_greater(u32 a, u32 b)
{}

static u32 seq_max(u32 a, u32 b)
{}

static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
{}

static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
{}

/* maybe change sync_ee into interval trees as well? */
static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{}

/* Called from receive_Data.
 * Synchronize packets on sock with packets on msock.
 *
 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
 * packet traveling on msock, they are still processed in the order they have
 * been sent.
 *
 * Note: we don't care for Ack packets overtaking P_DATA packets.
 *
 * In case packet_seq is larger than device->peer_seq number, there are
 * outstanding packets on the msock. We wait for them to arrive.
 * In case we are the logically next packet, we update device->peer_seq
 * ourselves. Correctly handles 32bit wrap around.
 *
 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
 *
 * returns 0 if we may process the packet,
 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
{}

static enum req_op wire_flags_to_bio_op(u32 dpf)
{}

/* see also bio_flags_to_wire() */
static blk_opf_t wire_flags_to_bio(struct drbd_connection *connection, u32 dpf)
{}

static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
				    unsigned int size)
{}

static int handle_write_conflicts(struct drbd_device *device,
				  struct drbd_peer_request *peer_req)
{}

/* mirrored write */
static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
{}

/* We may throttle resync, if the lower device seems to be busy,
 * and current sync rate is above c_min_rate.
 *
 * To decide whether or not the lower device is busy, we use a scheme similar
 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
 * (more than 64 sectors) of activity we cannot account for with our own resync
 * activity, it obviously is "busy".
 *
 * The current sync rate used here uses only the most recent two step marks,
 * to have a short time average so we can react faster.
 */
bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
		bool throttle_if_app_is_waiting)
{}

bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
{}

static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
{}

/*
 * drbd_asb_recover_0p  -  Recover after split-brain with no remaining primaries
 */
static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
{}

/*
 * drbd_asb_recover_1p  -  Recover after split-brain with one remaining primary
 */
static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
{}

/*
 * drbd_asb_recover_2p  -  Recover after split-brain with two remaining primaries
 */
static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
{}

static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
			   u64 bits, u64 flags)
{}

/*
  100	after split brain try auto recover
    2	C_SYNC_SOURCE set BitMap
    1	C_SYNC_SOURCE use BitMap
    0	no Sync
   -1	C_SYNC_TARGET use BitMap
   -2	C_SYNC_TARGET set BitMap
 -100	after split brain, disconnect
-1000	unrelated data
-1091   requires proto 91
-1096   requires proto 96
 */

static int drbd_uuid_compare(struct drbd_peer_device *const peer_device,
		enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
{}

/* drbd_sync_handshake() returns the new conn state on success, or
   CONN_MASK (-1) on failure.
 */
static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
					   enum drbd_role peer_role,
					   enum drbd_disk_state peer_disk) __must_hold(local)
{}

static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{}

static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
{}

/* helper function
 * input: alg name, feature name
 * return: NULL (alg name was "")
 *         ERR_PTR(error) if something goes wrong
 *         or the crypto hash ptr, if it worked out ok. */
static struct crypto_shash *drbd_crypto_alloc_digest_safe(
		const struct drbd_device *device,
		const char *alg, const char *name)
{}

static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
{}

/*
 * config_unknown_volume  -  device configuration command for unknown volume
 *
 * When a device is added to an existing connection, the node on which the
 * device is added first will send configuration commands to its peer but the
 * peer will not know about the device yet.  It will warn and ignore these
 * commands.  Once the device is added on the second node, the second node will
 * send the same device configuration commands, but in the other direction.
 *
 * (We can also end up here if drbd is misconfigured.)
 */
static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
{}

/* warn if the arguments differ by more than 12.5% */
static void warn_if_differ_considerably(struct drbd_device *device,
	const char *s, sector_t a, sector_t b)
{}

static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
{}

/**
 * convert_state() - Converts the peer's view of the cluster state to our point of view
 * @ps:		The state as seen by the peer.
 */
static union drbd_state convert_state(union drbd_state ps)
{}

static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
{}

/*
 * receive_bitmap_plain
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
		     unsigned long *p, struct bm_xfer_ctx *c)
{}

static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
{}

static int dcbp_get_start(struct p_compressed_bm *p)
{}

static int dcbp_get_pad_bits(struct p_compressed_bm *p)
{}

/*
 * recv_bm_rle_bits
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
recv_bm_rle_bits(struct drbd_peer_device *peer_device,
		struct p_compressed_bm *p,
		 struct bm_xfer_ctx *c,
		 unsigned int len)
{}

/*
 * decode_bitmap_c
 *
 * Return 0 when done, 1 when another iteration is needed, and a negative error
 * code upon failure.
 */
static int
decode_bitmap_c(struct drbd_peer_device *peer_device,
		struct p_compressed_bm *p,
		struct bm_xfer_ctx *c,
		unsigned int len)
{}

void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
		const char *direction, struct bm_xfer_ctx *c)
{}

/* Since we are processing the bitfield from lower addresses to higher,
   it does not matter if the process it in 32 bit chunks or 64 bit
   chunks as long as it is little endian. (Understand it as byte stream,
   beginning with the lowest byte...) If we would use big endian
   we would need to process it from the highest address to the lowest,
   in order to be agnostic to the 32 vs 64 bits issue.

   returns 0 on failure, 1 if we successfully received it. */
static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
{}

static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
{}

struct data_cmd {};

static struct data_cmd drbd_cmd_handler[] =;

static void drbdd(struct drbd_connection *connection)
{}

static void conn_disconnect(struct drbd_connection *connection)
{}

static int drbd_disconnected(struct drbd_peer_device *peer_device)
{}

/*
 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
 * we can agree on is stored in agreed_pro_version.
 *
 * feature flags and the reserved array should be enough room for future
 * enhancements of the handshake protocol, and possible plugins...
 *
 * for now, they are expected to be zero, but ignored.
 */
static int drbd_send_features(struct drbd_connection *connection)
{}

/*
 * return values:
 *   1 yes, we have a valid connection
 *   0 oops, did not work out, please try again
 *  -1 peer talks different language,
 *     no point in trying again, please go standalone.
 */
static int drbd_do_features(struct drbd_connection *connection)
{}

#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
static int drbd_do_auth(struct drbd_connection *connection)
{
	drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
	drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
	return -1;
}
#else
#define CHALLENGE_LEN

/* Return value:
	1 - auth succeeded,
	0 - failed, try again (network error),
	-1 - auth failed, don't try again.
*/

static int drbd_do_auth(struct drbd_connection *connection)
{}
#endif

int drbd_receiver(struct drbd_thread *thi)
{}

/* ********* acknowledge sender ******** */

static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
{}

static int
validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
			      struct rb_root *root, const char *func,
			      enum drbd_req_event what, bool missing_ok)
{}

static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
{}

static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
{}

struct meta_sock_cmd {};

static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
{}

static void set_ping_timeout(struct drbd_connection *connection)
{}

static void set_idle_timeout(struct drbd_connection *connection)
{}

static struct meta_sock_cmd ack_receiver_tbl[] =;

int drbd_ack_receiver(struct drbd_thread *thi)
{}

void drbd_send_acks_wf(struct work_struct *ws)
{}