// SPDX-License-Identifier: GPL-2.0 /* * Xilinx ZynqMP DPDMA Engine driver * * Copyright (C) 2015 - 2020 Xilinx, Inc. * * Author: Hyun Woo Kwon <[email protected]> */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dma/xilinx_dpdma.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h> #include "../dmaengine.h" #include "../virt-dma.h" /* DPDMA registers */ #define XILINX_DPDMA_ERR_CTRL … #define XILINX_DPDMA_ISR … #define XILINX_DPDMA_IMR … #define XILINX_DPDMA_IEN … #define XILINX_DPDMA_IDS … #define XILINX_DPDMA_INTR_DESC_DONE(n) … #define XILINX_DPDMA_INTR_DESC_DONE_MASK … #define XILINX_DPDMA_INTR_NO_OSTAND(n) … #define XILINX_DPDMA_INTR_NO_OSTAND_MASK … #define XILINX_DPDMA_INTR_AXI_ERR(n) … #define XILINX_DPDMA_INTR_AXI_ERR_MASK … #define XILINX_DPDMA_INTR_DESC_ERR(n) … #define XILINX_DPDMA_INTR_DESC_ERR_MASK … #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL … #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL … #define XILINX_DPDMA_INTR_AXI_4K_CROSS … #define XILINX_DPDMA_INTR_VSYNC … #define XILINX_DPDMA_INTR_CHAN_ERR_MASK … #define XILINX_DPDMA_INTR_CHAN_ERR … #define XILINX_DPDMA_INTR_GLOBAL_ERR … #define XILINX_DPDMA_INTR_ERR_ALL … #define XILINX_DPDMA_INTR_CHAN_MASK … #define XILINX_DPDMA_INTR_GLOBAL_MASK … #define XILINX_DPDMA_INTR_ALL … #define XILINX_DPDMA_EISR … #define XILINX_DPDMA_EIMR … #define XILINX_DPDMA_EIEN … #define XILINX_DPDMA_EIDS … #define XILINX_DPDMA_EINTR_INV_APB … #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) … #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK … #define XILINX_DPDMA_EINTR_PRE_ERR(n) … #define XILINX_DPDMA_EINTR_PRE_ERR_MASK … #define XILINX_DPDMA_EINTR_CRC_ERR(n) … #define XILINX_DPDMA_EINTR_CRC_ERR_MASK … #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) … #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK … #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) … #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK … #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL … #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK … #define XILINX_DPDMA_EINTR_CHAN_ERR … #define XILINX_DPDMA_EINTR_GLOBAL_ERR … #define XILINX_DPDMA_EINTR_ALL … #define XILINX_DPDMA_CNTL … #define XILINX_DPDMA_GBL … #define XILINX_DPDMA_GBL_TRIG_MASK(n) … #define XILINX_DPDMA_GBL_RETRIG_MASK(n) … #define XILINX_DPDMA_ALC0_CNTL … #define XILINX_DPDMA_ALC0_STATUS … #define XILINX_DPDMA_ALC0_MAX … #define XILINX_DPDMA_ALC0_MIN … #define XILINX_DPDMA_ALC0_ACC … #define XILINX_DPDMA_ALC0_ACC_TRAN … #define XILINX_DPDMA_ALC1_CNTL … #define XILINX_DPDMA_ALC1_STATUS … #define XILINX_DPDMA_ALC1_MAX … #define XILINX_DPDMA_ALC1_MIN … #define XILINX_DPDMA_ALC1_ACC … #define XILINX_DPDMA_ALC1_ACC_TRAN … /* Channel register */ #define XILINX_DPDMA_CH_BASE … #define XILINX_DPDMA_CH_OFFSET … #define XILINX_DPDMA_CH_DESC_START_ADDRE … #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK … #define XILINX_DPDMA_CH_DESC_START_ADDR … #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE … #define XILINX_DPDMA_CH_DESC_NEXT_ADDR … #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE … #define XILINX_DPDMA_CH_PYLD_CUR_ADDR … #define XILINX_DPDMA_CH_CNTL … #define XILINX_DPDMA_CH_CNTL_ENABLE … #define XILINX_DPDMA_CH_CNTL_PAUSE … #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK … #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK … #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK … #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS … #define XILINX_DPDMA_CH_STATUS … #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK … #define XILINX_DPDMA_CH_VDO … #define XILINX_DPDMA_CH_PYLD_SZ … #define XILINX_DPDMA_CH_DESC_ID … #define XILINX_DPDMA_CH_DESC_ID_MASK … /* DPDMA descriptor fields */ #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE … #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR … #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE … #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE … #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE … #define XILINX_DPDMA_DESC_CONTROL_LAST … #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC … #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME … #define XILINX_DPDMA_DESC_ID_MASK … #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK … #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK … #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK … #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK … #define XILINX_DPDMA_ALIGN_BYTES … #define XILINX_DPDMA_LINESIZE_ALIGN_BITS … #define XILINX_DPDMA_NUM_CHAN … struct xilinx_dpdma_chan; /** * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor * @control: control configuration field * @desc_id: descriptor ID * @xfer_size: transfer size * @hsize_stride: horizontal size and stride * @timestamp_lsb: LSB of time stamp * @timestamp_msb: MSB of time stamp * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) * @next_desc: next descriptor 32 bit address * @src_addr: payload source address (1st page, 32 LSB) * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs) * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) * @src_addr2: payload source address (2nd page, 32 LSB) * @src_addr3: payload source address (3rd page, 32 LSB) * @src_addr4: payload source address (4th page, 32 LSB) * @src_addr5: payload source address (5th page, 32 LSB) * @crc: descriptor CRC */ struct xilinx_dpdma_hw_desc { … } __aligned(…); /** * struct xilinx_dpdma_sw_desc - DPDMA software descriptor * @hw: DPDMA hardware descriptor * @node: list node for software descriptors * @dma_addr: DMA address of the software descriptor */ struct xilinx_dpdma_sw_desc { … }; /** * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor * @vdesc: virtual DMA descriptor * @chan: DMA channel * @descriptors: list of software descriptors * @error: an error has been detected with this descriptor */ struct xilinx_dpdma_tx_desc { … }; #define to_dpdma_tx_desc(_desc) … /** * struct xilinx_dpdma_chan - DPDMA channel * @vchan: virtual DMA channel * @reg: register base address * @id: channel ID * @wait_to_stop: queue to wait for outstanding transactions before stopping * @running: true if the channel is running * @first_frame: flag for the first frame of stream * @video_group: flag if multi-channel operation is needed for video channels * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before * @vchan.lock, if both are to be held. * @desc_pool: descriptor allocation pool * @err_task: error IRQ bottom half handler * @desc: References to descriptors being processed * @desc.pending: Descriptor schedule to the hardware, pending execution * @desc.active: Descriptor being executed by the hardware * @xdev: DPDMA device */ struct xilinx_dpdma_chan { … }; #define to_xilinx_chan(_chan) … /** * struct xilinx_dpdma_device - DPDMA device * @common: generic dma device structure * @reg: register base address * @dev: generic device structure * @irq: the interrupt number * @axi_clk: axi clock * @chan: DPDMA channels * @ext_addr: flag for 64 bit system (48 bit addressing) */ struct xilinx_dpdma_device { … }; /* ----------------------------------------------------------------------------- * DebugFS */ #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE … #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR … /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ enum xilinx_dpdma_testcases { … }; struct xilinx_dpdma_debugfs { … }; static struct xilinx_dpdma_debugfs dpdma_debugfs; struct xilinx_dpdma_debugfs_request { … }; static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) { … } static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf) { … } static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args) { … } /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = …; static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { … } static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { … } static const struct file_operations fops_xilinx_dpdma_dbgfs = …; static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) { … } /* ----------------------------------------------------------------------------- * I/O Accessors */ static inline u32 dpdma_read(void __iomem *base, u32 offset) { … } static inline void dpdma_write(void __iomem *base, u32 offset, u32 val) { … } static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr) { … } static inline void dpdma_set(void __iomem *base, u32 offset, u32 set) { … } /* ----------------------------------------------------------------------------- * Descriptor Operations */ /** * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor * @xdev: DPDMA device * @sw_desc: The software descriptor in which to set DMA addresses * @prev: The previous descriptor * @dma_addr: array of dma addresses * @num_src_addr: number of addresses in @dma_addr * * Set all the DMA addresses in the hardware descriptor corresponding to @dev * from @dma_addr. If a previous descriptor is specified in @prev, its next * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be * identical to @sw_desc for cyclic transfers. */ static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev, struct xilinx_dpdma_sw_desc *sw_desc, struct xilinx_dpdma_sw_desc *prev, dma_addr_t dma_addr[], unsigned int num_src_addr) { … } /** * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor * @chan: DPDMA channel * * Allocate a software descriptor from the channel's descriptor pool. * * Return: a software descriptor or NULL. */ static struct xilinx_dpdma_sw_desc * xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor * @chan: DPDMA channel * @sw_desc: software descriptor to free * * Free a software descriptor from the channel's descriptor pool. */ static void xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan, struct xilinx_dpdma_sw_desc *sw_desc) { … } /** * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor * @chan: DPDMA channel * @tx_desc: tx descriptor to dump * * Dump contents of a tx descriptor */ static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan, struct xilinx_dpdma_tx_desc *tx_desc) { … } /** * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor * @chan: DPDMA channel * * Allocate a tx descriptor. * * Return: a tx descriptor or NULL. */ static struct xilinx_dpdma_tx_desc * xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor * @vdesc: virtual DMA descriptor * * Free the virtual DMA descriptor @vdesc including its software descriptors. */ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) { … } /** * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor * @chan: DPDMA channel * @buf_addr: buffer address * @buf_len: buffer length * @period_len: number of periods * @flags: tx flags argument passed in to prepare function * * Prepare a tx descriptor incudling internal software/hardware descriptors * for the given cyclic transaction. * * Return: A dma async tx descriptor on success, or NULL. */ static struct dma_async_tx_descriptor * xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, unsigned long flags) { … } /** * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma * descriptor * @chan: DPDMA channel * @xt: dma interleaved template * * Prepare a tx descriptor including internal software/hardware descriptors * based on @xt. * * Return: A DPDMA TX descriptor on success, or NULL. */ static struct xilinx_dpdma_tx_desc * xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan, struct dma_interleaved_template *xt) { … } /* ----------------------------------------------------------------------------- * DPDMA Channel Operations */ /** * xilinx_dpdma_chan_enable - Enable the channel * @chan: DPDMA channel * * Enable the channel and its interrupts. Set the QoS values for video class. */ static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_disable - Disable the channel * @chan: DPDMA channel * * Disable the channel and its interrupts. */ static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_pause - Pause the channel * @chan: DPDMA channel * * Pause the channel. */ static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_unpause - Unpause the channel * @chan: DPDMA channel * * Unpause the channel. */ static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan) { … } static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_queue_transfer - Queue the next transfer * @chan: DPDMA channel * * Queue the next descriptor, if any, to the hardware. If the channel is * stopped, start it first. Otherwise retrigger it with the next descriptor. */ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_ostand - Number of outstanding transactions * @chan: DPDMA channel * * Read and return the number of outstanding transactions from register. * * Return: Number of outstanding transactions from the status register. */ static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event * @chan: DPDMA channel * * Notify waiters for no outstanding event, so waiters can stop the channel * safely. This function is supposed to be called when 'no outstanding' * interrupt is generated. The 'no outstanding' interrupt is disabled and * should be re-enabled when this event is handled. If the channel status * register still shows some number of outstanding transactions, the interrupt * remains enabled. * * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding * transaction(s). */ static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq * @chan: DPDMA channel * * Wait for the no outstanding transaction interrupt. This functions can sleep * for 50ms. * * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code * from wait_event_interruptible_timeout(). */ static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status * @chan: DPDMA channel * * Poll the outstanding transaction status, and return when there's no * outstanding transaction. This functions can be used in the interrupt context * or where the atomicity is required. Calling thread may wait more than 50ms. * * Return: 0 on success, or -ETIMEDOUT. */ static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_stop - Stop the channel * @chan: DPDMA channel * * Stop a previously paused channel by first waiting for completion of all * outstanding transaction and then disabling the channel. * * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. */ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion * @chan: DPDMA channel * * Handle completion of the currently active descriptor (@chan->desc.active). As * we currently support cyclic transfers only, this just invokes the cyclic * callback. The descriptor will be completed at the VSYNC interrupt when a new * descriptor replaces it. */ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling * @chan: DPDMA channel * * At VSYNC the active descriptor may have been replaced by the pending * descriptor. Detect this through the DESC_ID and perform appropriate * bookkeeping. */ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) { … } /** * xilinx_dpdma_chan_err - Detect any channel error * @chan: DPDMA channel * @isr: masked Interrupt Status Register * @eisr: Error Interrupt Status Register * * Return: true if any channel error occurs, or false otherwise. */ static bool xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr) { … } /** * xilinx_dpdma_chan_handle_err - DPDMA channel error handling * @chan: DPDMA channel * * This function is called when any channel error or any global error occurs. * The function disables the paused channel by errors and determines * if the current active descriptor can be rescheduled depending on * the descriptor status. */ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) { … } /* ----------------------------------------------------------------------------- * DMA Engine Operations */ static struct dma_async_tx_descriptor * xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { … } static struct dma_async_tx_descriptor * xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, struct dma_interleaved_template *xt, unsigned long flags) { … } /** * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel * @dchan: DMA channel * * Allocate a descriptor pool for the channel. * * Return: 0 on success, or -ENOMEM if failed to allocate a pool. */ static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan) { … } /** * xilinx_dpdma_free_chan_resources - Free all resources for the channel * @dchan: DMA channel * * Free resources associated with the virtual DMA channel, and destroy the * descriptor pool. */ static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan) { … } static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) { … } static int xilinx_dpdma_config(struct dma_chan *dchan, struct dma_slave_config *config) { … } static int xilinx_dpdma_pause(struct dma_chan *dchan) { … } static int xilinx_dpdma_resume(struct dma_chan *dchan) { … } /** * xilinx_dpdma_terminate_all - Terminate the channel and descriptors * @dchan: DMA channel * * Pause the channel without waiting for ongoing transfers to complete. Waiting * for completion is performed by xilinx_dpdma_synchronize() that will disable * the channel to complete the stop. * * All the descriptors associated with the channel that are guaranteed not to * be touched by the hardware. The pending and active descriptor are not * touched, and will be freed either upon completion, or by * xilinx_dpdma_synchronize(). * * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. */ static int xilinx_dpdma_terminate_all(struct dma_chan *dchan) { … } /** * xilinx_dpdma_synchronize - Synchronize callback execution * @dchan: DMA channel * * Synchronizing callback execution ensures that all previously issued * transfers have completed and all associated callbacks have been called and * have returned. * * This function waits for the DMA channel to stop. It assumes it has been * paused by a previous call to dmaengine_terminate_async(), and that no new * pending descriptors have been issued with dma_async_issue_pending(). The * behaviour is undefined otherwise. */ static void xilinx_dpdma_synchronize(struct dma_chan *dchan) { … } /* ----------------------------------------------------------------------------- * Interrupt and Tasklet Handling */ /** * xilinx_dpdma_err - Detect any global error * @isr: Interrupt Status Register * @eisr: Error Interrupt Status Register * * Return: True if any global error occurs, or false otherwise. */ static bool xilinx_dpdma_err(u32 isr, u32 eisr) { … } /** * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt * @xdev: DPDMA device * @isr: masked Interrupt Status Register * @eisr: Error Interrupt Status Register * * Handle if any error occurs based on @isr and @eisr. This function disables * corresponding error interrupts, and those should be re-enabled once handling * is done. */ static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev, u32 isr, u32 eisr) { … } /** * xilinx_dpdma_enable_irq - Enable interrupts * @xdev: DPDMA device * * Enable interrupts. */ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev) { … } /** * xilinx_dpdma_disable_irq - Disable interrupts * @xdev: DPDMA device * * Disable interrupts. */ static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) { … } /** * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling * @t: pointer to the tasklet associated with this handler * * Per channel error handling tasklet. This function waits for the outstanding * transaction to complete and triggers error handling. After error handling, * re-enable channel error interrupts, and restart the channel if needed. */ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t) { … } static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data) { … } /* ----------------------------------------------------------------------------- * Initialization & Cleanup */ static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev, unsigned int chan_id) { … } static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan) { … } static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { … } static void dpdma_hw_init(struct xilinx_dpdma_device *xdev) { … } static int xilinx_dpdma_probe(struct platform_device *pdev) { … } static void xilinx_dpdma_remove(struct platform_device *pdev) { … } static const struct of_device_id xilinx_dpdma_of_match[] = …; MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); static struct platform_driver xilinx_dpdma_driver = …; module_platform_driver(…) …; MODULE_AUTHOR(…) …; MODULE_DESCRIPTION(…) …; MODULE_LICENSE(…) …;