linux/drivers/dma/qcom/hidma_ll.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Qualcomm Technologies HIDMA DMA engine low level code
 *
 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
 */

#include <linux/dmaengine.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/iopoll.h>
#include <linux/kfifo.h>
#include <linux/bitops.h>

#include "hidma.h"

#define HIDMA_EVRE_SIZE

#define HIDMA_TRCA_CTRLSTS_REG
#define HIDMA_TRCA_RING_LOW_REG
#define HIDMA_TRCA_RING_HIGH_REG
#define HIDMA_TRCA_RING_LEN_REG
#define HIDMA_TRCA_DOORBELL_REG

#define HIDMA_EVCA_CTRLSTS_REG
#define HIDMA_EVCA_INTCTRL_REG
#define HIDMA_EVCA_RING_LOW_REG
#define HIDMA_EVCA_RING_HIGH_REG
#define HIDMA_EVCA_RING_LEN_REG
#define HIDMA_EVCA_WRITE_PTR_REG
#define HIDMA_EVCA_DOORBELL_REG

#define HIDMA_EVCA_IRQ_STAT_REG
#define HIDMA_EVCA_IRQ_CLR_REG
#define HIDMA_EVCA_IRQ_EN_REG

#define HIDMA_EVRE_CFG_IDX

#define HIDMA_EVRE_ERRINFO_BIT_POS
#define HIDMA_EVRE_CODE_BIT_POS

#define HIDMA_EVRE_ERRINFO_MASK
#define HIDMA_EVRE_CODE_MASK

#define HIDMA_CH_CONTROL_MASK
#define HIDMA_CH_STATE_MASK
#define HIDMA_CH_STATE_BIT_POS

#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS
#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS
#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS
#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS
#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS
#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS

#define ENABLE_IRQS

#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size)

#define HIDMA_CH_STATE(val)

#define HIDMA_ERR_INT_MASK

enum ch_command {};

enum ch_state {};

enum err_code {};

static int hidma_is_chan_enabled(int state)
{}

void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
{}

int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
		     void (*callback)(void *data), void *data, u32 *tre_ch)
{}

/*
 * Multiple TREs may be queued and waiting in the pending queue.
 */
static void hidma_ll_tre_complete(struct tasklet_struct *t)
{}

static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
				u8 err_code)
{}

/*
 * Called to handle the interrupt for the channel.
 * Return a positive number if TRE or EVRE were consumed on this run.
 * Return a positive number if there are pending TREs or EVREs.
 * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
 */
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
{}

void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
			       u8 err_code)
{}

static int hidma_ll_reset(struct hidma_lldev *lldev)
{}

/*
 * The interrupt handler for HIDMA will try to consume as many pending
 * EVRE from the event queue as possible. Each EVRE has an associated
 * TRE that holds the user interface parameters. EVRE reports the
 * result of the transaction. Hardware guarantees ordering between EVREs
 * and TREs. We use last processed offset to figure out which TRE is
 * associated with which EVRE. If two TREs are consumed by HW, the EVREs
 * are in order in the event ring.
 *
 * This handler will do a one pass for consuming EVREs. Other EVREs may
 * be delivered while we are working. It will try to consume incoming
 * EVREs one more time and return.
 *
 * For unprocessed EVREs, hardware will trigger another interrupt until
 * all the interrupt bits are cleared.
 *
 * Hardware guarantees that by the time interrupt is observed, all data
 * transactions in flight are delivered to their respective places and
 * are visible to the CPU.
 *
 * On demand paging for IOMMU is only supported for PCIe via PRI
 * (Page Request Interface) not for HIDMA. All other hardware instances
 * including HIDMA work on pinned DMA addresses.
 *
 * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
 * IOMMU latency will be built into the data movement time. By the time
 * interrupt happens, IOMMU lookups + data movement has already taken place.
 *
 * While the first read in a typical PCI endpoint ISR flushes all outstanding
 * requests traditionally to the destination, this concept does not apply
 * here for this HW.
 */
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{}

irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
{}

irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
{}

int hidma_ll_enable(struct hidma_lldev *lldev)
{}

void hidma_ll_start(struct hidma_lldev *lldev)
{}

bool hidma_ll_isenabled(struct hidma_lldev *lldev)
{}

void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
{}

/*
 * Note that even though we stop this channel if there is a pending transaction
 * in flight it will complete and follow the callback. This request will
 * prevent further requests to be made.
 */
int hidma_ll_disable(struct hidma_lldev *lldev)
{}

void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
				  dma_addr_t src, dma_addr_t dest, u32 len,
				  u32 flags, u32 txntype)
{}

/*
 * Called during initialization and after an error condition
 * to restore hardware state.
 */
int hidma_ll_setup(struct hidma_lldev *lldev)
{}

void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
{}

struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
				  void __iomem *trca, void __iomem *evca,
				  u8 chidx)
{}

int hidma_ll_uninit(struct hidma_lldev *lldev)
{}

enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
{}