linux/drivers/block/drbd/drbd_req.c

// SPDX-License-Identifier: GPL-2.0-only
/*
   drbd_req.c

   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.

   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
   Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.


 */

#include <linux/module.h>

#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_req.h"


static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);

static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{}

static void drbd_remove_request_interval(struct rb_root *root,
					 struct drbd_request *req)
{}

void drbd_req_destroy(struct kref *kref)
{}

static void wake_all_senders(struct drbd_connection *connection)
{}

/* must hold resource->req_lock */
void start_new_tl_epoch(struct drbd_connection *connection)
{}

void complete_master_bio(struct drbd_device *device,
		struct bio_and_error *m)
{}


/* Helper for __req_mod().
 * Set m->bio to the master bio, if it is fit to be completed,
 * or leave it alone (it is initialized to NULL in __req_mod),
 * if it has already been completed, or cannot be completed yet.
 * If m->bio is set, the error status to be returned is placed in m->error.
 */
static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{}

/* still holds resource->req_lock */
static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{}

static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{}

/* I'd like this to be the only place that manipulates
 * req->completion_ref and req->kref. */
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
		int clear, int set)
{}

static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{}

/* Helper for HANDED_OVER_TO_NETWORK.
 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
 * Is it also still "PENDING"?
 * --> If so, clear PENDING and set NET_OK below.
 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
 * (and we must not set RQ_NET_OK) */
static inline bool is_pending_write_protocol_A(struct drbd_request *req)
{}

/* obviously this could be coded as many single functions
 * instead of one huge switch,
 * or by putting the code directly in the respective locations
 * (as it has been before).
 *
 * but having it this way
 *  enforces that it is all in this one place, where it is easier to audit,
 *  it makes it obvious that whatever "event" "happens" to a request should
 *  happen "atomically" within the req_lock,
 *  and it enforces that we have to think in a very structured manner
 *  about the "events" that may happen to a request during its life time ...
 *
 *
 * peer_device == NULL means local disk
 */
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
		struct drbd_peer_device *peer_device,
		struct bio_and_error *m)
{}

/* we may do a local read if:
 * - we are consistent (of course),
 * - or we are generally inconsistent,
 *   BUT we are still/already IN SYNC for this area.
 *   since size may be bigger than BM_BLOCK_SIZE,
 *   we may need to check several bits.
 */
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{}

static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
		enum drbd_read_balancing rbm)
{}

/*
 * complete_conflicting_writes  -  wait for any conflicting write requests
 *
 * The write_requests tree contains all active write requests which we
 * currently know about.  Wait for any requests to complete which conflict with
 * the new one.
 *
 * Only way out: remove the conflicting intervals from the tree.
 */
static void complete_conflicting_writes(struct drbd_request *req)
{}

/* called within req_lock */
static void maybe_pull_ahead(struct drbd_device *device)
{}

/* If this returns false, and req->private_bio is still set,
 * this should be submitted locally.
 *
 * If it returns false, but req->private_bio is not set,
 * we do not have access to good data :(
 *
 * Otherwise, this destroys req->private_bio, if any,
 * and returns true.
 */
static bool do_remote_read(struct drbd_request *req)
{}

bool drbd_should_do_remote(union drbd_dev_state s)
{}

static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
{}

/* returns number of connections (== 1, for drbd 8.4)
 * expected to actually write this data,
 * which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{}

static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
{}

static void
drbd_submit_req_private_bio(struct drbd_request *req)
{}

static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
{}

/* returns the new drbd_request pointer, if the caller is expected to
 * drbd_send_and_submit() it (to save latency), or NULL if we queued the
 * request on the submitter thread.
 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
 */
static struct drbd_request *
drbd_request_prepare(struct drbd_device *device, struct bio *bio)
{}

/* Require at least one path to current data.
 * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
 * We would not allow to read what was written,
 * we would not have bumped the data generation uuids,
 * we would cause data divergence for all the wrong reasons.
 *
 * If we don't see at least one D_UP_TO_DATE, we will fail this request,
 * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
 * and queues for retry later.
 */
static bool may_do_writes(struct drbd_device *device)
{}

struct drbd_plug_cb {};

static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
{}

static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
{}

static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
{}

static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{}

void __drbd_make_request(struct drbd_device *device, struct bio *bio)
{}

static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{}

static bool prepare_al_transaction_nonblock(struct drbd_device *device,
					    struct list_head *incoming,
					    struct list_head *pending,
					    struct list_head *later)
{}

static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
{}

void do_submit(struct work_struct *ws)
{}

void drbd_submit_bio(struct bio *bio)
{}

static bool net_timeout_reached(struct drbd_request *net_req,
		struct drbd_connection *connection,
		unsigned long now, unsigned long ent,
		unsigned int ko_count, unsigned int timeout)
{}

/* A request is considered timed out, if
 * - we have some effective timeout from the configuration,
 *   with some state restrictions applied,
 * - the oldest request is waiting for a response from the network
 *   resp. the local disk,
 * - the oldest request is in fact older than the effective timeout,
 * - the connection was established (resp. disk was attached)
 *   for longer than the timeout already.
 * Note that for 32bit jiffies and very stable connections/disks,
 * we may have a wrap around, which is catched by
 *   !time_in_range(now, last_..._jif, last_..._jif + timeout).
 *
 * Side effect: once per 32bit wrap-around interval, which means every
 * ~198 days with 250 HZ, we have a window where the timeout would need
 * to expire twice (worst case) to become effective. Good enough.
 */

void request_timer_fn(struct timer_list *t)
{}