linux/drivers/md/dm-vdo/funnel-workqueue.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2023 Red Hat
 */

#include "funnel-workqueue.h"

#include <linux/atomic.h>
#include <linux/cache.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/percpu.h>

#include "funnel-queue.h"
#include "logger.h"
#include "memory-alloc.h"
#include "numeric.h"
#include "permassert.h"
#include "string-utils.h"

#include "completion.h"
#include "status-codes.h"

static DEFINE_PER_CPU(unsigned int, service_queue_rotor);

/**
 * DOC: Work queue definition.
 *
 * There are two types of work queues: simple, with one worker thread, and round-robin, which uses
 * a group of the former to do the work, and assigns work to them in round-robin fashion (roughly).
 * Externally, both are represented via the same common sub-structure, though there's actually not
 * a great deal of overlap between the two types internally.
 */
struct vdo_work_queue {};

struct simple_work_queue {};

struct round_robin_work_queue {};

static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue)
{}

static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue)
{}

/* Processing normal completions. */

/*
 * Dequeue and return the next waiting completion, if any.
 *
 * We scan the funnel queues from highest priority to lowest, once; there is therefore a race
 * condition where a high-priority completion can be enqueued followed by a lower-priority one, and
 * we'll grab the latter (but we'll catch the high-priority item on the next call). If strict
 * enforcement of priorities becomes necessary, this function will need fixing.
 */
static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue)
{}

static void enqueue_work_queue_completion(struct simple_work_queue *queue,
					  struct vdo_completion *completion)
{}

static void run_start_hook(struct simple_work_queue *queue)
{}

static void run_finish_hook(struct simple_work_queue *queue)
{}

/*
 * Wait for the next completion to process, or until kthread_should_stop indicates that it's time
 * for us to shut down.
 *
 * If kthread_should_stop says it's time to stop but we have pending completions return a
 * completion.
 *
 * Also update statistics relating to scheduler interactions.
 */
static struct vdo_completion *wait_for_next_completion(struct simple_work_queue *queue)
{}

static void process_completion(struct simple_work_queue *queue,
			       struct vdo_completion *completion)
{}

static void service_work_queue(struct simple_work_queue *queue)
{}

static int work_queue_runner(void *ptr)
{}

/* Creation & teardown */

static void free_simple_work_queue(struct simple_work_queue *queue)
{}

static void free_round_robin_work_queue(struct round_robin_work_queue *queue)
{}

void vdo_free_work_queue(struct vdo_work_queue *queue)
{}

static int make_simple_work_queue(const char *thread_name_prefix, const char *name,
				  struct vdo_thread *owner, void *private,
				  const struct vdo_work_queue_type *type,
				  struct simple_work_queue **queue_ptr)
{}

/**
 * vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will
 *                         be distributed to them in round-robin fashion.
 *
 * Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless
 * of the actual number of queues and threads allocated here, code outside of the queue
 * implementation will treat this as a single zone.
 */
int vdo_make_work_queue(const char *thread_name_prefix, const char *name,
			struct vdo_thread *owner, const struct vdo_work_queue_type *type,
			unsigned int thread_count, void *thread_privates[],
			struct vdo_work_queue **queue_ptr)
{}

static void finish_simple_work_queue(struct simple_work_queue *queue)
{}

static void finish_round_robin_work_queue(struct round_robin_work_queue *queue)
{}

/* No enqueueing of completions should be done once this function is called. */
void vdo_finish_work_queue(struct vdo_work_queue *queue)
{}

/* Debugging dumps */

static void dump_simple_work_queue(struct simple_work_queue *queue)
{}

/*
 * Write to the buffer some info about the completion, for logging. Since the common use case is
 * dumping info about a lot of completions to syslog all at once, the format favors brevity over
 * readability.
 */
void vdo_dump_work_queue(struct vdo_work_queue *queue)
{}

static void get_function_name(void *pointer, char *buffer, size_t buffer_length)
{}

void vdo_dump_completion_to_buffer(struct vdo_completion *completion, char *buffer,
				   size_t length)
{}

/* Completion submission */
/*
 * If the completion has a timeout that has already passed, the timeout handler function may be
 * invoked by this function.
 */
void vdo_enqueue_work_queue(struct vdo_work_queue *queue,
			    struct vdo_completion *completion)
{}

/* Misc */

/*
 * Return the work queue pointer recorded at initialization time in the work-queue stack handle
 * initialized on the stack of the current thread, if any.
 */
static struct simple_work_queue *get_current_thread_work_queue(void)
{}

struct vdo_work_queue *vdo_get_current_work_queue(void)
{}

struct vdo_thread *vdo_get_work_queue_owner(struct vdo_work_queue *queue)
{}

/**
 * vdo_get_work_queue_private_data() - Returns the private data for the current thread's work
 *                                     queue, or NULL if none or if the current thread is not a
 *                                     work queue thread.
 */
void *vdo_get_work_queue_private_data(void)
{}

bool vdo_work_queue_type_is(struct vdo_work_queue *queue,
			    const struct vdo_work_queue_type *type)
{}