linux/drivers/dma/dmaengine.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
 */

/*
 * This code implements the DMA subsystem. It provides a HW-neutral interface
 * for other kernel code to use asynchronous memory copy capabilities,
 * if present, and allows different HW DMA drivers to register as providing
 * this capability.
 *
 * Due to the fact we are accelerating what is already a relatively fast
 * operation, the code goes to great lengths to avoid additional overhead,
 * such as locking.
 *
 * LOCKING:
 *
 * The subsystem keeps a global list of dma_device structs it is protected by a
 * mutex, dma_list_mutex.
 *
 * A subsystem can get access to a channel by calling dmaengine_get() followed
 * by dma_find_channel(), or if it has need for an exclusive channel it can call
 * dma_request_channel().  Once a channel is allocated a reference is taken
 * against its corresponding driver to disable removal.
 *
 * Each device has a channels list, which runs unlocked but is never modified
 * once the device is registered, it's just setup by the driver.
 *
 * See Documentation/driver-api/dmaengine for more details
 */

#define pr_fmt(fmt)

#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/mempool.h>
#include <linux/numa.h>

#include "dmaengine.h"

static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;

/* --- debugfs implementation --- */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>

static struct dentry *rootdir;

static void dmaengine_debug_register(struct dma_device *dma_dev)
{}

static void dmaengine_debug_unregister(struct dma_device *dma_dev)
{}

static void dmaengine_dbg_summary_show(struct seq_file *s,
				       struct dma_device *dma_dev)
{}

static int dmaengine_summary_show(struct seq_file *s, void *data)
{}
DEFINE_SHOW_ATTRIBUTE();

static void __init dmaengine_debugfs_init(void)
{}
#else
static inline void dmaengine_debugfs_init(void) { }
static inline int dmaengine_debug_register(struct dma_device *dma_dev)
{
	return 0;
}

static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
#endif	/* DEBUG_FS */

/* --- sysfs implementation --- */

#define DMA_SLAVE_NAME

/**
 * dev_to_dma_chan - convert a device pointer to its sysfs container object
 * @dev:	device node
 *
 * Must be called under dma_list_mutex.
 */
static struct dma_chan *dev_to_dma_chan(struct device *dev)
{}

static ssize_t memcpy_count_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(memcpy_count);

static ssize_t bytes_transferred_show(struct device *dev,
				      struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(bytes_transferred);

static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{}
static DEVICE_ATTR_RO(in_use);

static struct attribute *dma_dev_attrs[] =;
ATTRIBUTE_GROUPS();

static void chan_dev_release(struct device *dev)
{}

static struct class dma_devclass =;

/* --- client and device registration --- */

/* enable iteration over all operation types */
static dma_cap_mask_t dma_cap_mask_all;

/**
 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
 * @chan:	associated channel for this entry
 */
struct dma_chan_tbl_ent {};

/* percpu lookup table for memory-to-memory offload providers */
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];

static int __init dma_channel_table_init(void)
{}
arch_initcall(dma_channel_table_init);

/**
 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
 * @chan:	DMA channel to test
 * @cpu:	CPU index which the channel should be close to
 *
 * Returns true if the channel is in the same NUMA-node as the CPU.
 */
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{}

/**
 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
 * @cap:	capability to match
 * @cpu:	CPU index which the channel should be close to
 *
 * If some channels are close to the given CPU, the one with the lowest
 * reference count is returned. Otherwise, CPU is ignored and only the
 * reference count is taken into account.
 *
 * Must be called under dma_list_mutex.
 */
static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{}

/**
 * dma_channel_rebalance - redistribute the available channels
 *
 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
 * operation type) in the SMP case, and operation isolation (avoid
 * multi-tasking channels) in the non-SMP case.
 *
 * Must be called under dma_list_mutex.
 */
static void dma_channel_rebalance(void)
{}

static int dma_device_satisfies_mask(struct dma_device *device,
				     const dma_cap_mask_t *want)
{}

static struct module *dma_chan_to_owner(struct dma_chan *chan)
{}

/**
 * balance_ref_count - catch up the channel reference count
 * @chan:	channel to balance ->client_count versus dmaengine_ref_count
 *
 * Must be called under dma_list_mutex.
 */
static void balance_ref_count(struct dma_chan *chan)
{}

static void dma_device_release(struct kref *ref)
{}

static void dma_device_put(struct dma_device *device)
{}

/**
 * dma_chan_get - try to grab a DMA channel's parent driver module
 * @chan:	channel to grab
 *
 * Must be called under dma_list_mutex.
 */
static int dma_chan_get(struct dma_chan *chan)
{}

/**
 * dma_chan_put - drop a reference to a DMA channel's parent driver module
 * @chan:	channel to release
 *
 * Must be called under dma_list_mutex.
 */
static void dma_chan_put(struct dma_chan *chan)
{}

enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{}
EXPORT_SYMBOL();

/**
 * dma_find_channel - find a channel to carry out the operation
 * @tx_type:	transaction type
 */
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{}
EXPORT_SYMBOL();

/**
 * dma_issue_pending_all - flush all pending operations across all channels
 */
void dma_issue_pending_all(void)
{}
EXPORT_SYMBOL();

int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{}
EXPORT_SYMBOL_GPL();

static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
					  struct dma_device *dev,
					  dma_filter_fn fn, void *fn_param)
{}

static struct dma_chan *find_candidate(struct dma_device *device,
				       const dma_cap_mask_t *mask,
				       dma_filter_fn fn, void *fn_param)
{}

/**
 * dma_get_slave_channel - try to get specific channel exclusively
 * @chan:	target channel
 */
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
{}
EXPORT_SYMBOL_GPL();

struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{}
EXPORT_SYMBOL_GPL();

/**
 * __dma_request_channel - try to allocate an exclusive channel
 * @mask:	capabilities that the channel must satisfy
 * @fn:		optional callback to disposition available channels
 * @fn_param:	opaque parameter to pass to dma_filter_fn()
 * @np:		device node to look for DMA channels
 *
 * Returns pointer to appropriate DMA channel on success or NULL.
 */
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
				       dma_filter_fn fn, void *fn_param,
				       struct device_node *np)
{}
EXPORT_SYMBOL_GPL();

static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
						    const char *name,
						    struct device *dev)
{}

/**
 * dma_request_chan - try to allocate an exclusive slave channel
 * @dev:	pointer to client device structure
 * @name:	slave channel name
 *
 * Returns pointer to appropriate DMA channel on success or an error pointer.
 */
struct dma_chan *dma_request_chan(struct device *dev, const char *name)
{}
EXPORT_SYMBOL_GPL();

/**
 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 * @mask:	capabilities that the channel must satisfy
 *
 * Returns pointer to appropriate DMA channel on success or an error pointer.
 */
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
{}
EXPORT_SYMBOL_GPL();

void dma_release_channel(struct dma_chan *chan)
{}
EXPORT_SYMBOL_GPL();

/**
 * dmaengine_get - register interest in dma_channels
 */
void dmaengine_get(void)
{}
EXPORT_SYMBOL();

/**
 * dmaengine_put - let DMA drivers be removed when ref_count == 0
 */
void dmaengine_put(void)
{}
EXPORT_SYMBOL();

static bool device_has_all_tx_types(struct dma_device *device)
{}

static int get_dma_id(struct dma_device *device)
{}

static int __dma_async_device_channel_register(struct dma_device *device,
					       struct dma_chan *chan,
					       const char *name)
{}

int dma_async_device_channel_register(struct dma_device *device,
				      struct dma_chan *chan,
				      const char *name)
{}
EXPORT_SYMBOL_GPL();

static void __dma_async_device_channel_unregister(struct dma_device *device,
						  struct dma_chan *chan)
{}

void dma_async_device_channel_unregister(struct dma_device *device,
					 struct dma_chan *chan)
{}
EXPORT_SYMBOL_GPL();

/**
 * dma_async_device_register - registers DMA devices found
 * @device:	pointer to &struct dma_device
 *
 * After calling this routine the structure should not be freed except in the
 * device_release() callback which will be called after
 * dma_async_device_unregister() is called and no further references are taken.
 */
int dma_async_device_register(struct dma_device *device)
{}
EXPORT_SYMBOL();

/**
 * dma_async_device_unregister - unregister a DMA device
 * @device:	pointer to &struct dma_device
 *
 * This routine is called by dma driver exit routines, dmaengine holds module
 * references to prevent it being called while channels are in use.
 */
void dma_async_device_unregister(struct dma_device *device)
{}
EXPORT_SYMBOL();

static void dmaenginem_async_device_unregister(void *device)
{}

/**
 * dmaenginem_async_device_register - registers DMA devices found
 * @device:	pointer to &struct dma_device
 *
 * The operation is managed and will be undone on driver detach.
 */
int dmaenginem_async_device_register(struct dma_device *device)
{}
EXPORT_SYMBOL();

struct dmaengine_unmap_pool {};

#define __UNMAP_POOL(x)
static struct dmaengine_unmap_pool unmap_pool[] =;

static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
{}

static void dmaengine_unmap(struct kref *kref)
{}

void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
{}
EXPORT_SYMBOL_GPL();

static void dmaengine_destroy_unmap_pool(void)
{}

static int __init dmaengine_init_unmap_pool(void)
{}

struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
{}
EXPORT_SYMBOL();

void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
	struct dma_chan *chan)
{}
EXPORT_SYMBOL();

static inline int desc_check_and_set_metadata_mode(
	struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
{}

int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
				   void *data, size_t len)
{}
EXPORT_SYMBOL_GPL();

void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
				      size_t *payload_len, size_t *max_len)
{}
EXPORT_SYMBOL_GPL();

int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
				    size_t payload_len)
{}
EXPORT_SYMBOL_GPL();

/**
 * dma_wait_for_async_tx - spin wait for a transaction to complete
 * @tx:		in-flight transaction to wait on
 */
enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{}
EXPORT_SYMBOL_GPL();

/**
 * dma_run_dependencies - process dependent operations on the target channel
 * @tx:		transaction with dependencies
 *
 * Helper routine for DMA drivers to process (start) dependent operations
 * on their target channel.
 */
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{}
EXPORT_SYMBOL_GPL();

static int __init dma_bus_init(void)
{}
arch_initcall(dma_bus_init);