linux/drivers/dma/mv_xor.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * offload engine driver for the Marvell XOR engine
 * Copyright (C) 2007, 2008, Marvell International Ltd.
 */

#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/memory.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/cpumask.h>
#include <linux/platform_data/dma-mv_xor.h>

#include "dmaengine.h"
#include "mv_xor.h"

enum mv_xor_type {};

enum mv_xor_mode {};

static void mv_xor_issue_pending(struct dma_chan *chan);

#define to_mv_xor_chan(chan)

#define to_mv_xor_slot(tx)

#define mv_chan_to_devp(chan)

static void mv_desc_init(struct mv_xor_desc_slot *desc,
			 dma_addr_t addr, u32 byte_count,
			 enum dma_ctrl_flags flags)
{}

static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{}

static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
				  u32 next_desc_addr)
{}

static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
				 int index, dma_addr_t addr)
{}

static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{}

static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
					u32 next_desc_addr)
{}

static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{}

static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{}

static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
{}

static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
{}

static void mv_chan_set_mode(struct mv_xor_chan *chan,
			     u32 op_mode)
{}

static void mv_chan_activate(struct mv_xor_chan *chan)
{}

static char mv_chan_is_busy(struct mv_xor_chan *chan)
{}

/*
 * mv_chan_start_new_chain - program the engine to operate on new
 * chain headed by sw_desc
 * Caller must hold &mv_chan->lock while calling this function
 */
static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
				    struct mv_xor_desc_slot *sw_desc)
{}

static dma_cookie_t
mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
				struct mv_xor_chan *mv_chan,
				dma_cookie_t cookie)
{}

static int
mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
{}

static int
mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
		   struct mv_xor_chan *mv_chan)
{}

/* This function must be called with the mv_xor_chan spinlock held */
static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
{}

static void mv_xor_tasklet(struct tasklet_struct *t)
{}

static struct mv_xor_desc_slot *
mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
{}

/************************ DMA engine API functions ****************************/
static dma_cookie_t
mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
{}

/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{}

/*
 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
 * a new MBus window if necessary. Use a cache for these check so that
 * the MMIO mapped registers don't have to be accessed for this check
 * to speed up this process.
 */
static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
{}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
		    unsigned int src_cnt, size_t len, unsigned long flags)
{}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
{}

static void mv_xor_free_chan_resources(struct dma_chan *chan)
{}

/**
 * mv_xor_status - poll the status of an XOR transaction
 * @chan: XOR channel handle
 * @cookie: XOR transaction identifier
 * @txstate: XOR transactions state holder (or NULL)
 */
static enum dma_status mv_xor_status(struct dma_chan *chan,
					  dma_cookie_t cookie,
					  struct dma_tx_state *txstate)
{}

static void mv_chan_dump_regs(struct mv_xor_chan *chan)
{}

static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
					  u32 intr_cause)
{}

static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
{}

static void mv_xor_issue_pending(struct dma_chan *chan)
{}

/*
 * Perform a transaction to verify the HW works.
 */

static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
{}

#define MV_XOR_NUM_SRC_TEST
static int
mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
{}

static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{}

static struct mv_xor_chan *
mv_xor_channel_add(struct mv_xor_device *xordev,
		   struct platform_device *pdev,
		   int idx, dma_cap_mask_t cap_mask, int irq)
{}

static void
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
			 const struct mbus_dram_target_info *dram)
{}

static void
mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
{}

/*
 * Since this XOR driver is basically used only for RAID5, we don't
 * need to care about synchronizing ->suspend with DMA activity,
 * because the DMA engine will naturally be quiet due to the block
 * devices being suspended.
 */
static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
{}

static int mv_xor_resume(struct platform_device *dev)
{}

static const struct of_device_id mv_xor_dt_ids[] =;

static unsigned int mv_xor_engine_count;

static int mv_xor_probe(struct platform_device *pdev)
{}

static struct platform_driver mv_xor_driver =;

builtin_platform_driver();

/*
MODULE_AUTHOR("Saeed Bishara <[email protected]>");
MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
MODULE_LICENSE("GPL");
*/