linux/drivers/block/drbd/drbd_protocol.h

/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __DRBD_PROTOCOL_H
#define __DRBD_PROTOCOL_H

enum drbd_packet {};

#ifndef __packed
#define __packed
#endif

/* This is the layout for a packet on the wire.
 * The byteorder is the network byte order.
 *     (except block_id and barrier fields.
 *	these are pointers to local structs
 *	and have no relevance for the partner,
 *	which just echoes them as received.)
 *
 * NOTE that the payload starts at a long aligned offset,
 * regardless of 32 or 64 bit arch!
 */
struct p_header80 {} __packed;

/* Header for big packets, Used for data packets exceeding 64kB */
struct p_header95 {} __packed;

struct p_header100 {} __packed;

/* These defines must not be changed without changing the protocol version.
 * New defines may only be introduced together with protocol version bump or
 * new protocol feature flags.
 */
#define DP_HARDBARRIER
#define DP_RW_SYNC
#define DP_MAY_SET_IN_SYNC
#define DP_UNPLUG
#define DP_FUA
#define DP_FLUSH
#define DP_DISCARD
#define DP_SEND_RECEIVE_ACK
#define DP_SEND_WRITE_ACK
#define DP_WSAME
#define DP_ZEROES

/* possible combinations:
 * REQ_OP_WRITE_ZEROES:  DP_DISCARD | DP_ZEROES
 * REQ_OP_WRITE_ZEROES + REQ_NOUNMAP: DP_ZEROES
 */

struct p_data {} __packed;

struct p_trim {} __packed;

struct p_wsame {} __packed;

/*
 * commands which share a struct:
 *  p_block_ack:
 *   P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
 *   P_SUPERSEDED (proto C, two-primaries conflict detection)
 *  p_block_req:
 *   P_DATA_REQUEST, P_RS_DATA_REQUEST
 */
struct p_block_ack {} __packed;

struct p_block_req {} __packed;

/*
 * commands with their own struct for additional fields:
 *   P_CONNECTION_FEATURES
 *   P_BARRIER
 *   P_BARRIER_ACK
 *   P_SYNC_PARAM
 *   ReportParams
 */

/* supports TRIM/DISCARD on the "wire" protocol */
#define DRBD_FF_TRIM

/* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks
 * instead of fully allocate a supposedly thin volume on initial resync */
#define DRBD_FF_THIN_RESYNC

/* supports REQ_WRITE_SAME on the "wire" protocol.
 * Note: this flag is overloaded,
 * its presence also
 *   - indicates support for 128 MiB "batch bios",
 *     max discard size of 128 MiB
 *     instead of 4M before that.
 *   - indicates that we exchange additional settings in p_sizes
 *     drbd_send_sizes()/receive_sizes()
 */
#define DRBD_FF_WSAME

/* supports REQ_OP_WRITE_ZEROES on the "wire" protocol.
 *
 * We used to map that to "discard" on the sending side, and if we cannot
 * guarantee that discard zeroes data, the receiving side would map discard
 * back to zero-out.
 *
 * With the introduction of REQ_OP_WRITE_ZEROES,
 * we started to use that for both WRITE_ZEROES and DISCARDS,
 * hoping that WRITE_ZEROES would "do what we want",
 * UNMAP if possible, zero-out the rest.
 *
 * The example scenario is some LVM "thin" backend.
 *
 * While an un-allocated block on dm-thin reads as zeroes, on a dm-thin
 * with "skip_block_zeroing=true", after a partial block write allocated
 * that block, that same block may well map "undefined old garbage" from
 * the backends on LBAs that have not yet been written to.
 *
 * If we cannot distinguish between zero-out and discard on the receiving
 * side, to avoid "undefined old garbage" to pop up randomly at later times
 * on supposedly zero-initialized blocks, we'd need to map all discards to
 * zero-out on the receiving side.  But that would potentially do a full
 * alloc on thinly provisioned backends, even when the expectation was to
 * unmap/trim/discard/de-allocate.
 *
 * We need to distinguish on the protocol level, whether we need to guarantee
 * zeroes (and thus use zero-out, potentially doing the mentioned full-alloc),
 * or if we want to put the emphasis on discard, and only do a "best effort
 * zeroing" (by "discarding" blocks aligned to discard-granularity, and zeroing
 * only potential unaligned head and tail clippings), to at least *try* to
 * avoid "false positives" in an online-verify later, hoping that someone
 * set skip_block_zeroing=false.
 */
#define DRBD_FF_WZEROES


struct p_connection_features {} __packed;

struct p_barrier {} __packed;

struct p_barrier_ack {} __packed;

struct p_rs_param {} __packed;

struct p_rs_param_89 {} __packed;

struct p_rs_param_95 {} __packed;

enum drbd_conn_flags {};

struct p_protocol {} __packed;

struct p_uuids {} __packed;

struct p_rs_uuid {} __packed;

/* optional queue_limits if (agreed_features & DRBD_FF_WSAME)
 * see also struct queue_limits, as of late 2015 */
struct o_qlim {} __packed;

struct p_sizes {} __packed;

struct p_state {} __packed;

struct p_req_state {} __packed;

struct p_req_state_reply {} __packed;

struct p_drbd06_param {} __packed;

struct p_block_desc {} __packed;

/* Valid values for the encoding field.
 * Bump proto version when changing this. */
enum drbd_bitmap_code {};

struct p_compressed_bm {} __packed;

struct p_delay_probe93 {} __packed;

/*
 * Bitmap packets need to fit within a single page on the sender and receiver,
 * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
 */
#define DRBD_SOCKET_BUFFER_SIZE

#endif  /* __DRBD_PROTOCOL_H */