linux/drivers/dma/nbpfaxi.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
 * Author: Guennadi Liakhovetski <[email protected]>
 */

#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>

#include <dt-bindings/dma/nbpfaxi.h>

#include "dmaengine.h"

#define NBPF_REG_CHAN_OFFSET
#define NBPF_REG_CHAN_SIZE

/* Channel Current Transaction Byte register */
#define NBPF_CHAN_CUR_TR_BYTE

/* Channel Status register */
#define NBPF_CHAN_STAT
#define NBPF_CHAN_STAT_EN
#define NBPF_CHAN_STAT_TACT
#define NBPF_CHAN_STAT_ERR
#define NBPF_CHAN_STAT_END
#define NBPF_CHAN_STAT_TC
#define NBPF_CHAN_STAT_DER

/* Channel Control register */
#define NBPF_CHAN_CTRL
#define NBPF_CHAN_CTRL_SETEN
#define NBPF_CHAN_CTRL_CLREN
#define NBPF_CHAN_CTRL_STG
#define NBPF_CHAN_CTRL_SWRST
#define NBPF_CHAN_CTRL_CLRRQ
#define NBPF_CHAN_CTRL_CLREND
#define NBPF_CHAN_CTRL_CLRTC
#define NBPF_CHAN_CTRL_SETSUS
#define NBPF_CHAN_CTRL_CLRSUS

/* Channel Configuration register */
#define NBPF_CHAN_CFG
#define NBPF_CHAN_CFG_SEL
#define NBPF_CHAN_CFG_REQD
#define NBPF_CHAN_CFG_LOEN
#define NBPF_CHAN_CFG_HIEN
#define NBPF_CHAN_CFG_LVL
#define NBPF_CHAN_CFG_AM
#define NBPF_CHAN_CFG_SDS
#define NBPF_CHAN_CFG_DDS
#define NBPF_CHAN_CFG_SAD
#define NBPF_CHAN_CFG_DAD
#define NBPF_CHAN_CFG_TM
#define NBPF_CHAN_CFG_DEM
#define NBPF_CHAN_CFG_TCM
#define NBPF_CHAN_CFG_SBE
#define NBPF_CHAN_CFG_RSEL
#define NBPF_CHAN_CFG_RSW
#define NBPF_CHAN_CFG_REN
#define NBPF_CHAN_CFG_DMS

#define NBPF_CHAN_NXLA
#define NBPF_CHAN_CRLA

/* Link Header field */
#define NBPF_HEADER_LV
#define NBPF_HEADER_LE
#define NBPF_HEADER_WBD
#define NBPF_HEADER_DIM

#define NBPF_CTRL
#define NBPF_CTRL_PR
#define NBPF_CTRL_LVINT

#define NBPF_DSTAT_ER
#define NBPF_DSTAT_END

#define NBPF_DMA_BUSWIDTHS

struct nbpf_config {};

/*
 * We've got 3 types of objects, used to describe DMA transfers:
 * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
 *	in it, used to communicate with the user
 * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
 *	queuing, these must be DMAable, using either the streaming DMA API or
 *	allocated from coherent memory - one per SG segment
 * 3. one per SG segment descriptors, used to manage HW link descriptors from
 *	(2). They do not have to be DMAable. They can either be (a) allocated
 *	together with link descriptors as mixed (DMA / CPU) objects, or (b)
 *	separately. Even if allocated separately it would be best to link them
 *	to link descriptors once during channel resource allocation and always
 *	use them as a single object.
 * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
 * treated as a single SG segment descriptor.
 */

struct nbpf_link_reg {} __packed;

struct nbpf_device;
struct nbpf_channel;
struct nbpf_desc;

struct nbpf_link_desc {};

/**
 * struct nbpf_desc - DMA transfer descriptor
 * @async_tx:	dmaengine object
 * @user_wait:	waiting for a user ack
 * @length:	total transfer length
 * @chan:	associated DMAC channel
 * @sg:		list of hardware descriptors, represented by struct nbpf_link_desc
 * @node:	member in channel descriptor lists
 */
struct nbpf_desc {};

/* Take a wild guess: allocate 4 segments per descriptor */
#define NBPF_SEGMENTS_PER_DESC
#define NBPF_DESCS_PER_PAGE
#define NBPF_SEGMENTS_PER_PAGE

struct nbpf_desc_page {};

/**
 * struct nbpf_channel - one DMAC channel
 * @dma_chan:	standard dmaengine channel object
 * @tasklet:	channel specific tasklet used for callbacks
 * @base:	register address base
 * @nbpf:	DMAC
 * @name:	IRQ name
 * @irq:	IRQ number
 * @slave_src_addr:	source address for slave DMA
 * @slave_src_width:	source slave data size in bytes
 * @slave_src_burst:	maximum source slave burst size in bytes
 * @slave_dst_addr:	destination address for slave DMA
 * @slave_dst_width:	destination slave data size in bytes
 * @slave_dst_burst:	maximum destination slave burst size in bytes
 * @terminal:	DMA terminal, assigned to this channel
 * @dmarq_cfg:	DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
 * @flags:	configuration flags from DT
 * @lock:	protect descriptor lists
 * @free_links:	list of free link descriptors
 * @free:	list of free descriptors
 * @queued:	list of queued descriptors
 * @active:	list of descriptors, scheduled for processing
 * @done:	list of completed descriptors, waiting post-processing
 * @desc_page:	list of additionally allocated descriptor pages - if any
 * @running:	linked descriptor of running transaction
 * @paused:	are translations on this channel paused?
 */
struct nbpf_channel {};

struct nbpf_device {};

enum nbpf_model {};

static struct nbpf_config nbpf_cfg[] =;

#define nbpf_to_chan(d)

/*
 * dmaengine drivers seem to have a lot in common and instead of sharing more
 * code, they reimplement those common algorithms independently. In this driver
 * we try to separate the hardware-specific part from the (largely) generic
 * part. This improves code readability and makes it possible in the future to
 * reuse the generic code in form of a helper library. That generic code should
 * be suitable for various DMA controllers, using transfer descriptors in RAM
 * and pushing one SG list at a time to the DMA controller.
 */

/*		Hardware-specific part		*/

static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
				 unsigned int offset)
{}

static inline void nbpf_chan_write(struct nbpf_channel *chan,
				   unsigned int offset, u32 data)
{}

static inline u32 nbpf_read(struct nbpf_device *nbpf,
			    unsigned int offset)
{}

static inline void nbpf_write(struct nbpf_device *nbpf,
			      unsigned int offset, u32 data)
{}

static void nbpf_chan_halt(struct nbpf_channel *chan)
{}

static bool nbpf_status_get(struct nbpf_channel *chan)
{}

static void nbpf_status_ack(struct nbpf_channel *chan)
{}

static u32 nbpf_error_get(struct nbpf_device *nbpf)
{}

static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
{}

static void nbpf_error_clear(struct nbpf_channel *chan)
{}

static int nbpf_start(struct nbpf_desc *desc)
{}

static void nbpf_chan_prepare(struct nbpf_channel *chan)
{}

static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
{}

static void nbpf_chan_configure(struct nbpf_channel *chan)
{}

static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
			enum dma_transfer_direction direction)
{}

static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
			     enum dma_slave_buswidth width, u32 burst)
{}

/*
 * We need a way to recognise slaves, whose data is sent "raw" over the bus,
 * i.e. it isn't known in advance how many bytes will be received. Therefore
 * the slave driver has to provide a "large enough" buffer and either read the
 * buffer, when it is full, or detect, that some data has arrived, then wait for
 * a timeout, if no more data arrives - receive what's already there. We want to
 * handle such slaves in a special way to allow an optimised mode for other
 * users, for whom the amount of data is known in advance. So far there's no way
 * to recognise such slaves. We use a data-width check to distinguish between
 * the SD host and the PL011 UART.
 */

static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
			 enum dma_transfer_direction direction,
			 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
{}

static size_t nbpf_bytes_left(struct nbpf_channel *chan)
{}

static void nbpf_configure(struct nbpf_device *nbpf)
{}

/*		Generic part			*/

/* DMA ENGINE functions */
static void nbpf_issue_pending(struct dma_chan *dchan)
{}

static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
		dma_cookie_t cookie, struct dma_tx_state *state)
{}

static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
{}

static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
{}

static void nbpf_desc_put(struct nbpf_desc *desc)
{}

static void nbpf_scan_acked(struct nbpf_channel *chan)
{}

/*
 * We have to allocate descriptors with the channel lock dropped. This means,
 * before we re-acquire the lock buffers can be taken already, so we have to
 * re-check after re-acquiring the lock and possibly retry, if buffers are gone
 * again.
 */
static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
{}

static void nbpf_chan_idle(struct nbpf_channel *chan)
{}

static int nbpf_pause(struct dma_chan *dchan)
{}

static int nbpf_terminate_all(struct dma_chan *dchan)
{}

static int nbpf_config(struct dma_chan *dchan,
		       struct dma_slave_config *config)
{}

static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
		struct scatterlist *src_sg, struct scatterlist *dst_sg,
		size_t len, enum dma_transfer_direction direction,
		unsigned long flags)
{}

static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
	struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
	size_t len, unsigned long flags)
{}

static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
	enum dma_transfer_direction direction, unsigned long flags, void *context)
{}

static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
{}

static void nbpf_free_chan_resources(struct dma_chan *dchan)
{}

static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
				      struct of_dma *ofdma)
{}

static void nbpf_chan_tasklet(struct tasklet_struct *t)
{}

static irqreturn_t nbpf_chan_irq(int irq, void *dev)
{}

static irqreturn_t nbpf_err_irq(int irq, void *dev)
{}

static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
{}

static const struct of_device_id nbpf_match[] =;
MODULE_DEVICE_TABLE(of, nbpf_match);

static int nbpf_probe(struct platform_device *pdev)
{}

static void nbpf_remove(struct platform_device *pdev)
{}

static const struct platform_device_id nbpf_ids[] =;
MODULE_DEVICE_TABLE(platform, nbpf_ids);

#ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev)
{}

static int nbpf_runtime_resume(struct device *dev)
{}
#endif

static const struct dev_pm_ops nbpf_pm_ops =;

static struct platform_driver nbpf_driver =;

module_platform_driver();

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();