linux/drivers/md/dm-vdo/io-submitter.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2023 Red Hat
 */

#include "io-submitter.h"

#include <linux/bio.h>
#include <linux/kernel.h>
#include <linux/mutex.h>

#include "memory-alloc.h"
#include "permassert.h"

#include "data-vio.h"
#include "logger.h"
#include "types.h"
#include "vdo.h"
#include "vio.h"

/*
 * Submission of bio operations to the underlying storage device will go through a separate work
 * queue thread (or more than one) to prevent blocking in other threads if the storage device has a
 * full queue. The plug structure allows that thread to do better batching of requests to make the
 * I/O more efficient.
 *
 * When multiple worker threads are used, a thread is chosen for a I/O operation submission based
 * on the PBN, so a given PBN will consistently wind up on the same thread. Flush operations are
 * assigned round-robin.
 *
 * The map (protected by the mutex) collects pending I/O operations so that the worker thread can
 * reorder them to try to encourage I/O request merging in the request queue underneath.
 */
struct bio_queue_data {};

struct io_submitter {};

static void start_bio_queue(void *ptr)
{}

static void finish_bio_queue(void *ptr)
{}

static const struct vdo_work_queue_type bio_queue_type =;

/**
 * count_all_bios() - Determine which bio counter to use.
 * @vio: The vio associated with the bio.
 * @bio: The bio to count.
 */
static void count_all_bios(struct vio *vio, struct bio *bio)
{}

/**
 * assert_in_bio_zone() - Assert that a vio is in the correct bio zone and not in interrupt
 *                        context.
 * @vio: The vio to check.
 */
static void assert_in_bio_zone(struct vio *vio)
{}

/**
 * send_bio_to_device() - Update stats and tracing info, then submit the supplied bio to the OS for
 *                        processing.
 * @vio: The vio associated with the bio.
 * @bio: The bio to submit to the OS.
 */
static void send_bio_to_device(struct vio *vio, struct bio *bio)
{}

/**
 * vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device
 *		      is busy. This callback should be used by vios which did not attempt to merge.
 */
void vdo_submit_vio(struct vdo_completion *completion)
{}

/**
 * get_bio_list() - Extract the list of bios to submit from a vio.
 * @vio: The vio submitting I/O.
 *
 * The list will always contain at least one entry (the bio for the vio on which it is called), but
 * other bios may have been merged with it as well.
 *
 * Return: bio  The head of the bio list to submit.
 */
static struct bio *get_bio_list(struct vio *vio)
{}

/**
 * submit_data_vio() - Submit a data_vio's bio to the storage below along with
 *		       any bios that have been merged with it.
 *
 * Context: This call may block and so should only be called from a bio thread.
 */
static void submit_data_vio(struct vdo_completion *completion)
{}

/**
 * get_mergeable_locked() - Attempt to find an already queued bio that the current bio can be
 *                          merged with.
 * @map: The bio map to use for merging.
 * @vio: The vio we want to merge.
 * @back_merge: Set to true for a back merge, false for a front merge.
 *
 * There are two types of merging possible, forward and backward, which are distinguished by a flag
 * that uses kernel elevator terminology.
 *
 * Return: the vio to merge to, NULL if no merging is possible.
 */
static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
					bool back_merge)
{}

static int map_merged_vio(struct int_map *bio_map, struct vio *vio)
{}

static int merge_to_prev_tail(struct int_map *bio_map, struct vio *vio,
			      struct vio *prev_vio)
{}

static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
			      struct vio *next_vio)
{}

/**
 * try_bio_map_merge() - Attempt to merge a vio's bio with other pending I/Os.
 * @vio: The vio to merge.
 *
 * Currently this is only used for data_vios, but is broken out for future use with metadata vios.
 *
 * Return: whether or not the vio was merged.
 */
static bool try_bio_map_merge(struct vio *vio)
{}

/**
 * vdo_submit_data_vio() - Submit I/O for a data_vio.
 * @data_vio: the data_vio for which to issue I/O.
 *
 * If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to
 * the appropriate bio zone directly.
 */
void vdo_submit_data_vio(struct data_vio *data_vio)
{}

/**
 * __submit_metadata_vio() - Submit I/O for a metadata vio.
 * @vio: the vio for which to issue I/O
 * @physical: the physical block number to read or write
 * @callback: the bio endio function which will be called after the I/O completes
 * @error_handler: the handler for submission or I/O errors (may be NULL)
 * @operation: the type of I/O to perform
 * @data: the buffer to read or write (may be NULL)
 *
 * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
 * other vdo threads.
 *
 * That the error handler will run on the correct thread is only true so long as the thread calling
 * this function, and the thread set in the endio callback are the same, as well as the fact that
 * no error can occur on the bio queue. Currently this is true for all callers, but additional care
 * will be needed if this ever changes.
 */
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
			   bio_end_io_t callback, vdo_action_fn error_handler,
			   blk_opf_t operation, char *data)
{}

/**
 * vdo_make_io_submitter() - Create an io_submitter structure.
 * @thread_count: Number of bio-submission threads to set up.
 * @rotation_interval: Interval to use when rotating between bio-submission threads when enqueuing
 *                     completions.
 * @max_requests_active: Number of bios for merge tracking.
 * @vdo: The vdo which will use this submitter.
 * @io_submitter: pointer to the new data structure.
 *
 * Return: VDO_SUCCESS or an error.
 */
int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
			  unsigned int max_requests_active, struct vdo *vdo,
			  struct io_submitter **io_submitter_ptr)
{}

/**
 * vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer.
 * @io_submitter: The I/O submitter data to tear down (may be NULL).
 */
void vdo_cleanup_io_submitter(struct io_submitter *io_submitter)
{}

/**
 * vdo_free_io_submitter() - Free the io_submitter fields and structure as needed.
 * @io_submitter: The I/O submitter data to destroy.
 *
 * This must be called after vdo_cleanup_io_submitter(). It is used to release resources late in
 * the shutdown process to avoid or reduce the chance of race conditions.
 */
void vdo_free_io_submitter(struct io_submitter *io_submitter)
{}