linux/io_uring/io-wq.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Basic worker thread pool for io_uring
 *
 * Copyright (C) 2019 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
#include <linux/task_work.h>
#include <linux/audit.h>
#include <linux/mmu_context.h>
#include <uapi/linux/io_uring.h>

#include "io-wq.h"
#include "slist.h"
#include "io_uring.h"

#define WORKER_IDLE_TIMEOUT
#define WORKER_INIT_LIMIT

enum {};

enum {};

enum {};

/*
 * One for each thread in a wq pool
 */
struct io_worker {};

#if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER
#else
#define IO_WQ_HASH_ORDER
#endif

#define IO_WQ_NR_HASH_BUCKETS

struct io_wq_acct {};

enum {};

/*
 * Per io_wq state
  */
struct io_wq {};

static enum cpuhp_state io_wq_online;

struct io_cb_cancel_data {};

static bool create_io_worker(struct io_wq *wq, int index);
static void io_wq_dec_running(struct io_worker *worker);
static bool io_acct_cancel_pending_work(struct io_wq *wq,
					struct io_wq_acct *acct,
					struct io_cb_cancel_data *match);
static void create_worker_cb(struct callback_head *cb);
static void io_wq_cancel_tw_create(struct io_wq *wq);

static bool io_worker_get(struct io_worker *worker)
{}

static void io_worker_release(struct io_worker *worker)
{}

static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
{}

static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
						  struct io_wq_work *work)
{}

static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
{}

static void io_worker_ref_put(struct io_wq *wq)
{}

bool io_wq_worker_stopped(void)
{}

static void io_worker_cancel_cb(struct io_worker *worker)
{}

static bool io_task_worker_match(struct callback_head *cb, void *data)
{}

static void io_worker_exit(struct io_worker *worker)
{}

static inline bool __io_acct_run_queue(struct io_wq_acct *acct)
{}

/*
 * If there's work to do, returns true with acct->lock acquired. If not,
 * returns false with no lock held.
 */
static inline bool io_acct_run_queue(struct io_wq_acct *acct)
	__acquires(&acct->lock)
{}

/*
 * Check head of free list for an available worker. If one isn't available,
 * caller must create one.
 */
static bool io_wq_activate_free_worker(struct io_wq *wq,
					struct io_wq_acct *acct)
	__must_hold(RCU)
{}

/*
 * We need a worker. If we find a free one, we're good. If not, and we're
 * below the max number of workers, create one.
 */
static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
{}

static void io_wq_inc_running(struct io_worker *worker)
{}

static void create_worker_cb(struct callback_head *cb)
{}

static bool io_queue_worker_create(struct io_worker *worker,
				   struct io_wq_acct *acct,
				   task_work_func_t func)
{}

static void io_wq_dec_running(struct io_worker *worker)
{}

/*
 * Worker will start processing some work. Move it to the busy list, if
 * it's currently on the freelist
 */
static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
{}

/*
 * No work, worker going to sleep. Move to freelist.
 */
static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
	__must_hold(wq->lock)
{}

static inline unsigned int io_get_work_hash(struct io_wq_work *work)
{}

static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
{}

static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
					   struct io_worker *worker)
	__must_hold(acct->lock)
{}

static void io_assign_current_work(struct io_worker *worker,
				   struct io_wq_work *work)
{}

/*
 * Called with acct->lock held, drops it before returning
 */
static void io_worker_handle_work(struct io_wq_acct *acct,
				  struct io_worker *worker)
	__releases(&acct->lock)
{}

static int io_wq_worker(void *data)
{}

/*
 * Called when a worker is scheduled in. Mark us as currently running.
 */
void io_wq_worker_running(struct task_struct *tsk)
{}

/*
 * Called when worker is going to sleep. If there are no workers currently
 * running and we have work pending, wake up a free one or create a new one.
 */
void io_wq_worker_sleeping(struct task_struct *tsk)
{}

static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
			       struct task_struct *tsk)
{}

static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
{}

static inline bool io_should_retry_thread(struct io_worker *worker, long err)
{}

static void create_worker_cont(struct callback_head *cb)
{}

static void io_workqueue_create(struct work_struct *work)
{}

static bool create_io_worker(struct io_wq *wq, int index)
{}

/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wq *wq,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{}

static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{}

static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
{}

static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
{}

static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
{}

void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{}

/*
 * Work items that hash to the same value will not be done in parallel.
 * Used to limit concurrent writes, generally hashed by inode.
 */
void io_wq_hash_work(struct io_wq_work *work, void *val)
{}

static bool __io_wq_worker_cancel(struct io_worker *worker,
				  struct io_cb_cancel_data *match,
				  struct io_wq_work *work)
{}

static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{}

static inline void io_wq_remove_pending(struct io_wq *wq,
					 struct io_wq_work *work,
					 struct io_wq_work_node *prev)
{}

static bool io_acct_cancel_pending_work(struct io_wq *wq,
					struct io_wq_acct *acct,
					struct io_cb_cancel_data *match)
{}

static void io_wq_cancel_pending_work(struct io_wq *wq,
				      struct io_cb_cancel_data *match)
{}

static void io_wq_cancel_running_work(struct io_wq *wq,
				       struct io_cb_cancel_data *match)
{}

enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
				  void *data, bool cancel_all)
{}

static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
			    int sync, void *key)
{}

struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{}

static bool io_task_work_match(struct callback_head *cb, void *data)
{}

void io_wq_exit_start(struct io_wq *wq)
{}

static void io_wq_cancel_tw_create(struct io_wq *wq)
{}

static void io_wq_exit_workers(struct io_wq *wq)
{}

static void io_wq_destroy(struct io_wq *wq)
{}

void io_wq_put_and_exit(struct io_wq *wq)
{}

struct online_data {};

static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
{}

static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
{}

static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
{}

static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
{}

int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{}

/*
 * Set max number of unbounded workers, returns old value. If new_count is 0,
 * then just return the old value.
 */
int io_wq_max_workers(struct io_wq *wq, int *new_count)
{}

static __init int io_wq_init(void)
{}
subsys_initcall(io_wq_init);