linux/drivers/dma/ep93xx_dma.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Driver for the Cirrus Logic EP93xx DMA Controller
 *
 * Copyright (C) 2011 Mika Westerberg
 *
 * DMA M2P implementation is based on the original
 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
 *
 *   Copyright (C) 2006 Lennert Buytenhek <[email protected]>
 *   Copyright (C) 2006 Applied Data Systems
 *   Copyright (C) 2009 Ryan Mallon <[email protected]>
 *
 * This driver is based on dw_dmac and amba-pl08x drivers.
 */

#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of_dma.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>

#include "dmaengine.h"

/* M2P registers */
#define M2P_CONTROL
#define M2P_CONTROL_STALLINT
#define M2P_CONTROL_NFBINT
#define M2P_CONTROL_CH_ERROR_INT
#define M2P_CONTROL_ENABLE
#define M2P_CONTROL_ICE

#define M2P_INTERRUPT
#define M2P_INTERRUPT_STALL
#define M2P_INTERRUPT_NFB
#define M2P_INTERRUPT_ERROR

#define M2P_PPALLOC
#define M2P_STATUS

#define M2P_MAXCNT0
#define M2P_BASE0
#define M2P_MAXCNT1
#define M2P_BASE1

#define M2P_STATE_IDLE
#define M2P_STATE_STALL
#define M2P_STATE_ON
#define M2P_STATE_NEXT

/* M2M registers */
#define M2M_CONTROL
#define M2M_CONTROL_DONEINT
#define M2M_CONTROL_ENABLE
#define M2M_CONTROL_START
#define M2M_CONTROL_DAH
#define M2M_CONTROL_SAH
#define M2M_CONTROL_PW_SHIFT
#define M2M_CONTROL_PW_8
#define M2M_CONTROL_PW_16
#define M2M_CONTROL_PW_32
#define M2M_CONTROL_PW_MASK
#define M2M_CONTROL_TM_SHIFT
#define M2M_CONTROL_TM_TX
#define M2M_CONTROL_TM_RX
#define M2M_CONTROL_NFBINT
#define M2M_CONTROL_RSS_SHIFT
#define M2M_CONTROL_RSS_SSPRX
#define M2M_CONTROL_RSS_SSPTX
#define M2M_CONTROL_RSS_IDE
#define M2M_CONTROL_NO_HDSK
#define M2M_CONTROL_PWSC_SHIFT

#define M2M_INTERRUPT
#define M2M_INTERRUPT_MASK

#define M2M_STATUS
#define M2M_STATUS_CTL_SHIFT
#define M2M_STATUS_CTL_IDLE
#define M2M_STATUS_CTL_STALL
#define M2M_STATUS_CTL_MEMRD
#define M2M_STATUS_CTL_MEMWR
#define M2M_STATUS_CTL_BWCWAIT
#define M2M_STATUS_CTL_MASK
#define M2M_STATUS_BUF_SHIFT
#define M2M_STATUS_BUF_NO
#define M2M_STATUS_BUF_ON
#define M2M_STATUS_BUF_NEXT
#define M2M_STATUS_BUF_MASK
#define M2M_STATUS_DONE

#define M2M_BCR0
#define M2M_BCR1
#define M2M_SAR_BASE0
#define M2M_SAR_BASE1
#define M2M_DAR_BASE0
#define M2M_DAR_BASE1

#define DMA_MAX_CHAN_BYTES
#define DMA_MAX_CHAN_DESCRIPTORS

/*
 * M2P channels.
 *
 * Note that these values are also directly used for setting the PPALLOC
 * register.
 */
#define EP93XX_DMA_I2S1
#define EP93XX_DMA_I2S2
#define EP93XX_DMA_AAC1
#define EP93XX_DMA_AAC2
#define EP93XX_DMA_AAC3
#define EP93XX_DMA_I2S3
#define EP93XX_DMA_UART1
#define EP93XX_DMA_UART2
#define EP93XX_DMA_UART3
#define EP93XX_DMA_IRDA
/* M2M channels */
#define EP93XX_DMA_SSP
#define EP93XX_DMA_IDE

enum ep93xx_dma_type {};

struct ep93xx_dma_engine;
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
					 enum dma_transfer_direction dir,
					 struct dma_slave_config *config);

/**
 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 * @src_addr: source address of the transaction
 * @dst_addr: destination address of the transaction
 * @size: size of the transaction (in bytes)
 * @complete: this descriptor is completed
 * @txd: dmaengine API descriptor
 * @tx_list: list of linked descriptors
 * @node: link used for putting this into a channel queue
 */
struct ep93xx_dma_desc {};

struct ep93xx_dma_chan_cfg {};

/**
 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 * @chan: dmaengine API channel
 * @edma: pointer to the engine device
 * @regs: memory mapped registers
 * @dma_cfg: channel number, direction
 * @irq: interrupt number of the channel
 * @clk: clock used by this channel
 * @tasklet: channel specific tasklet used for callbacks
 * @lock: lock protecting the fields following
 * @flags: flags for the channel
 * @buffer: which buffer to use next (0/1)
 * @active: flattened chain of descriptors currently being processed
 * @queue: pending descriptors which are handled next
 * @free_list: list of free descriptors which can be used
 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 *                is set via .device_config before slave operation is
 *                prepared
 * @runtime_ctrl: M2M runtime values for the control register.
 * @slave_config: slave configuration
 *
 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 * will have slightly different scheme here: @active points to a head of
 * flattened DMA descriptor chain.
 *
 * @queue holds pending transactions. These are linked through the first
 * descriptor in the chain. When a descriptor is moved to the @active queue,
 * the first and chained descriptors are flattened into a single list.
 *
 */
struct ep93xx_dma_chan {};

/**
 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 * @dma_dev: holds the dmaengine device
 * @m2m: is this an M2M or M2P device
 * @hw_setup: method which sets the channel up for operation
 * @hw_synchronize: synchronizes DMA channel termination to current context
 * @hw_shutdown: shuts the channel down and flushes whatever is left
 * @hw_submit: pushes active descriptor(s) to the hardware
 * @hw_interrupt: handle the interrupt
 * @num_channels: number of channels for this instance
 * @channels: array of channels
 *
 * There is one instance of this struct for the M2P channels and one for the
 * M2M channels. hw_xxx() methods are used to perform operations which are
 * different on M2M and M2P channels. These methods are called with channel
 * lock held and interrupts disabled so they cannot sleep.
 */
struct ep93xx_dma_engine {};

struct ep93xx_edma_data {};

static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
{}

static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
{}

static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
{}

/*
 * ep93xx_dma_chan_direction - returns direction the channel can be used
 *
 * This function can be used in filter functions to find out whether the
 * channel supports given DMA direction. Only M2P channels have such
 * limitation, for M2M channels the direction is configurable.
 */
static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{}

/**
 * ep93xx_dma_set_active - set new active descriptor chain
 * @edmac: channel
 * @desc: head of the new active descriptor chain
 *
 * Sets @desc to be the head of the new active descriptor chain. This is the
 * chain which is processed next. The active list must be empty before calling
 * this function.
 *
 * Called with @edmac->lock held and interrupts disabled.
 */
static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
				  struct ep93xx_dma_desc *desc)
{}

/* Called with @edmac->lock held and interrupts disabled */
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{}

/**
 * ep93xx_dma_advance_active - advances to the next active descriptor
 * @edmac: channel
 *
 * Function advances active descriptor to the next in the @edmac->active and
 * returns %true if we still have descriptors in the chain to process.
 * Otherwise returns %false.
 *
 * When the channel is in cyclic mode always returns %true.
 *
 * Called with @edmac->lock held and interrupts disabled.
 */
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{}

/*
 * M2P DMA implementation
 */

static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
{}

static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
{}

static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
{}

static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
{}

static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
{}

static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{}

static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
{}

static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
{}

/*
 * M2M DMA implementation
 */

static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
{}

static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
{}

static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{}

static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
{}

/*
 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 * M2M DMA controller transactions complete normally. This is not always the
 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 * In effect, disabling the channel when only DONE bit is set could stop
 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 * Control FSM to check current state of DMA channel.
 */
static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
{}

/*
 * DMA engine API implementation
 */

static struct ep93xx_dma_desc *
ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
{}

static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
				struct ep93xx_dma_desc *desc)
{}

/**
 * ep93xx_dma_advance_work - start processing the next pending transaction
 * @edmac: channel
 *
 * If we have pending transactions queued and we are currently idling, this
 * function takes the next queued transaction from the @edmac->queue and
 * pushes it to the hardware for execution.
 */
static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
{}

static void ep93xx_dma_tasklet(struct tasklet_struct *t)
{}

static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{}

/**
 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 * @tx: descriptor to be executed
 *
 * Function will execute given descriptor on the hardware or if the hardware
 * is busy, queue the descriptor to be executed later on. Returns cookie which
 * can be used to poll the status of the descriptor.
 */
static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{}

/**
 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 * @chan: channel to allocate resources
 *
 * Function allocates necessary resources for the given DMA channel and
 * returns number of allocated descriptors for the channel. Negative errno
 * is returned in case of failure.
 */
static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
{}

/**
 * ep93xx_dma_free_chan_resources - release resources for the channel
 * @chan: channel
 *
 * Function releases all the resources allocated for the given channel.
 * The channel must be idle when this is called.
 */
static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
{}

/**
 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 * @chan: channel
 * @dest: destination bus address
 * @src: source bus address
 * @len: size of the transaction
 * @flags: flags for the descriptor
 *
 * Returns a valid DMA descriptor or %NULL in case of failure.
 */
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
			   dma_addr_t src, size_t len, unsigned long flags)
{}

/**
 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
 * @chan: channel
 * @sgl: list of buffers to transfer
 * @sg_len: number of entries in @sgl
 * @dir: direction of the DMA transfer
 * @flags: flags for the descriptor
 * @context: operation context (ignored)
 *
 * Returns a valid DMA descriptor or %NULL in case of failure.
 */
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
			 unsigned int sg_len, enum dma_transfer_direction dir,
			 unsigned long flags, void *context)
{}

/**
 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
 * @chan: channel
 * @dma_addr: DMA mapped address of the buffer
 * @buf_len: length of the buffer (in bytes)
 * @period_len: length of a single period
 * @dir: direction of the operation
 * @flags: tx descriptor status flags
 *
 * Prepares a descriptor for cyclic DMA operation. This means that once the
 * descriptor is submitted, we will be submitting in a @period_len sized
 * buffers and calling callback once the period has been elapsed. Transfer
 * terminates only when client calls dmaengine_terminate_all() for this
 * channel.
 *
 * Returns a valid DMA descriptor or %NULL in case of failure.
 */
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
			   size_t buf_len, size_t period_len,
			   enum dma_transfer_direction dir, unsigned long flags)
{}

/**
 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
 * current context.
 * @chan: channel
 *
 * Synchronizes the DMA channel termination to the current context. When this
 * function returns it is guaranteed that all transfers for previously issued
 * descriptors have stopped and it is safe to free the memory associated
 * with them. Furthermore it is guaranteed that all complete callback functions
 * for a previously submitted descriptor have finished running and it is safe to
 * free resources accessed from within the complete callbacks.
 */
static void ep93xx_dma_synchronize(struct dma_chan *chan)
{}

/**
 * ep93xx_dma_terminate_all - terminate all transactions
 * @chan: channel
 *
 * Stops all DMA transactions. All descriptors are put back to the
 * @edmac->free_list and callbacks are _not_ called.
 */
static int ep93xx_dma_terminate_all(struct dma_chan *chan)
{}

static int ep93xx_dma_slave_config(struct dma_chan *chan,
				   struct dma_slave_config *config)
{}

static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
					 enum dma_transfer_direction dir,
					 struct dma_slave_config *config)
{}

/**
 * ep93xx_dma_tx_status - check if a transaction is completed
 * @chan: channel
 * @cookie: transaction specific cookie
 * @state: state of the transaction is stored here if given
 *
 * This function can be used to query state of a given transaction.
 */
static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    struct dma_tx_state *state)
{}

/**
 * ep93xx_dma_issue_pending - push pending transactions to the hardware
 * @chan: channel
 *
 * When this function is called, all pending transactions are pushed to the
 * hardware and executed.
 */
static void ep93xx_dma_issue_pending(struct dma_chan *chan)
{}

static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev)
{}

static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param)
{}

static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec,
					    struct of_dma *ofdma)
{}

static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param)
{}

static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec,
					    struct of_dma *ofdma)
{}

static int ep93xx_dma_probe(struct platform_device *pdev)
{}

static const struct ep93xx_edma_data edma_m2p =;

static const struct ep93xx_edma_data edma_m2m =;

static const struct of_device_id ep93xx_dma_of_ids[] =;
MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids);

static const struct platform_device_id ep93xx_dma_driver_ids[] =;

static struct platform_driver ep93xx_dma_driver =;

module_platform_driver();

MODULE_AUTHOR();
MODULE_DESCRIPTION();