linux/drivers/dma/amd/qdma/qdma.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * DMA driver for AMD Queue-based DMA Subsystem
 *
 * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
 */
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_qdma.h>
#include <linux/regmap.h>

#include "qdma.h"

#define CHAN_STR(q)
#define QDMA_REG_OFF(d, r)

/* MMIO regmap config for all QDMA registers */
static const struct regmap_config qdma_regmap_config =;

static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
{}

static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
{}

static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
{}

static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
			  enum qdma_reg_fields field)
{}

static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
			   enum qdma_reg_fields field, u64 value)
{}

static inline int qdma_reg_write(const struct qdma_device *qdev,
				 const u32 *data, enum qdma_regs reg)
{}

static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
				enum qdma_regs reg)
{}

static int qdma_context_cmd_execute(const struct qdma_device *qdev,
				    enum qdma_ctxt_type type,
				    enum qdma_ctxt_cmd cmd, u16 index)
{}

static int qdma_context_write_data(const struct qdma_device *qdev,
				   const u32 *data)
{}

static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
				      const struct qdma_ctxt_sw_desc *ctxt,
				      u32 *data)
{}

static void qdma_prep_intr_context(const struct qdma_device *qdev,
				   const struct qdma_ctxt_intr *ctxt,
				   u32 *data)
{}

static void qdma_prep_fmap_context(const struct qdma_device *qdev,
				   const struct qdma_ctxt_fmap *ctxt,
				   u32 *data)
{}

/*
 * Program the indirect context register space
 *
 * Once the queue is enabled, context is dynamically updated by hardware. Any
 * modification of the context through this API when the queue is enabled can
 * result in unexpected behavior. Reading the context when the queue is enabled
 * is not recommended as it can result in reduced performance.
 */
static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
			     enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
{}

static int qdma_check_queue_status(struct qdma_device *qdev,
				   enum dma_transfer_direction dir, u16 qid)
{}

static int qdma_clear_queue_context(const struct qdma_queue *queue)
{}

static int qdma_setup_fmap_context(struct qdma_device *qdev)
{}

static int qdma_setup_queue_context(struct qdma_device *qdev,
				    const struct qdma_ctxt_sw_desc *sw_desc,
				    enum dma_transfer_direction dir, u16 qid)
{}

/*
 * Enable or disable memory-mapped DMA engines
 * 1: enable, 0: disable
 */
static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
{}

static int qdma_get_hw_info(struct qdma_device *qdev)
{}

static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
{}

static inline int qdma_update_cidx(const struct qdma_queue *queue,
				   u16 ridx, u16 cidx)
{}

/**
 * qdma_free_vdesc - Free descriptor
 * @vdesc: Virtual DMA descriptor
 */
static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
{}

static int qdma_alloc_queues(struct qdma_device *qdev,
			     enum dma_transfer_direction dir)
{}

static int qdma_device_verify(struct qdma_device *qdev)
{}

static int qdma_device_setup(struct qdma_device *qdev)
{}

/**
 * qdma_free_queue_resources() - Free queue resources
 * @chan: DMA channel
 */
static void qdma_free_queue_resources(struct dma_chan *chan)
{}

/**
 * qdma_alloc_queue_resources() - Allocate queue resources
 * @chan: DMA channel
 */
static int qdma_alloc_queue_resources(struct dma_chan *chan)
{}

static bool qdma_filter_fn(struct dma_chan *chan, void *param)
{}

static int qdma_xfer_start(struct qdma_queue *queue)
{}

static void qdma_issue_pending(struct dma_chan *chan)
{}

static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
{}

static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
{}

static void qdma_fill_pending_vdesc(struct qdma_queue *q)
{}

static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
{}

static struct dma_async_tx_descriptor *
qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
		    unsigned int sg_len, enum dma_transfer_direction dir,
		    unsigned long flags, void *context)
{}

static int qdma_device_config(struct dma_chan *chan,
			      struct dma_slave_config *cfg)
{}

static int qdma_arm_err_intr(const struct qdma_device *qdev)
{}

static irqreturn_t qdma_error_isr(int irq, void *data)
{}

static irqreturn_t qdma_queue_isr(int irq, void *data)
{}

static int qdma_init_error_irq(struct qdma_device *qdev)
{}

static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
{}

static int qdma_intr_init(struct qdma_device *qdev)
{}

static void amd_qdma_remove(struct platform_device *pdev)
{}

static int amd_qdma_probe(struct platform_device *pdev)
{}

static struct platform_driver amd_qdma_driver =;

module_platform_driver();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();