linux/block/blk-ioc.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/sched/task.h>

#include "blk.h"
#include "blk-mq-sched.h"

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

#ifdef CONFIG_BLK_ICQ
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
 *
 * Increment reference count to @ioc.
 */
static void get_io_context(struct io_context *ioc)
{}

static void icq_free_icq_rcu(struct rcu_head *head)
{}

/*
 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
 */
static void ioc_exit_icq(struct io_cq *icq)
{}

static void ioc_exit_icqs(struct io_context *ioc)
{}

/*
 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
 */
static void ioc_destroy_icq(struct io_cq *icq)
{}

/*
 * Slow path for ioc release in put_io_context().  Performs double-lock
 * dancing to unlink all icq's and then frees ioc.
 */
static void ioc_release_fn(struct work_struct *work)
{}

/*
 * Releasing icqs requires reverse order double locking and we may already be
 * holding a queue_lock.  Do it asynchronously from a workqueue.
 */
static bool ioc_delay_free(struct io_context *ioc)
{}

/**
 * ioc_clear_queue - break any ioc association with the specified queue
 * @q: request_queue being cleared
 *
 * Walk @q->icq_list and exit all io_cq's.
 */
void ioc_clear_queue(struct request_queue *q)
{}
#else /* CONFIG_BLK_ICQ */
static inline void ioc_exit_icqs(struct io_context *ioc)
{
}
static inline bool ioc_delay_free(struct io_context *ioc)
{
	return false;
}
#endif /* CONFIG_BLK_ICQ */

/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 *
 * Decrement reference count of @ioc and release it if the count reaches
 * zero.
 */
void put_io_context(struct io_context *ioc)
{}
EXPORT_SYMBOL_GPL();

/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{}

static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{}

int set_task_ioprio(struct task_struct *task, int ioprio)
{}
EXPORT_SYMBOL_GPL();

int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
{}

#ifdef CONFIG_BLK_ICQ
/**
 * ioc_lookup_icq - lookup io_cq from ioc
 * @q: the associated request_queue
 *
 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
 * with @q->queue_lock held.
 */
struct io_cq *ioc_lookup_icq(struct request_queue *q)
{}
EXPORT_SYMBOL();

/**
 * ioc_create_icq - create and link io_cq
 * @q: request_queue of interest
 *
 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
 * will be created using @gfp_mask.
 *
 * The caller is responsible for ensuring @ioc won't go away and @q is
 * alive and will stay alive until this function returns.
 */
static struct io_cq *ioc_create_icq(struct request_queue *q)
{}

struct io_cq *ioc_find_get_icq(struct request_queue *q)
{}
EXPORT_SYMBOL_GPL();
#endif /* CONFIG_BLK_ICQ */

static int __init blk_ioc_init(void)
{}
subsys_initcall(blk_ioc_init);