linux/drivers/usb/mtu3/mtu3_qmu.c

// SPDX-License-Identifier: GPL-2.0
/*
 * mtu3_qmu.c - Queue Management Unit driver for device controller
 *
 * Copyright (C) 2016 MediaTek Inc.
 *
 * Author: Chunfeng Yun <[email protected]>
 */

/*
 * Queue Management Unit (QMU) is designed to unload SW effort
 * to serve DMA interrupts.
 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
 * SW links data buffers and triggers QMU to send / receive data to
 * host / from device at a time.
 * And now only GPD is supported.
 *
 * For more detailed information, please refer to QMU Programming Guide
 */

#include <linux/dmapool.h>
#include <linux/iopoll.h>

#include "mtu3.h"
#include "mtu3_trace.h"

#define QMU_CHECKSUM_LEN

#define GPD_FLAGS_HWO
#define GPD_FLAGS_BDP
#define GPD_FLAGS_BPS
#define GPD_FLAGS_ZLP
#define GPD_FLAGS_IOC
#define GET_GPD_HWO(gpd)

#define GPD_RX_BUF_LEN_OG(x)
#define GPD_RX_BUF_LEN_EL(x)
#define GPD_RX_BUF_LEN(mtu, x)

#define GPD_DATA_LEN_OG(x)
#define GPD_DATA_LEN_EL(x)
#define GPD_DATA_LEN(mtu, x)

#define GPD_EXT_FLAG_ZLP
#define GPD_EXT_NGP_OG(x)
#define GPD_EXT_BUF_OG(x)
#define GPD_EXT_NGP_EL(x)
#define GPD_EXT_BUF_EL(x)
#define GPD_EXT_NGP(mtu, x)

#define GPD_EXT_BUF(mtu, x)

#define HILO_GEN64(hi, lo)
#define HILO_DMA(hi, lo)

static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
{}

static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
{}

static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
{}

static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
{}

static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
		dma_addr_t dma_addr)
{}

static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
		struct qmu_gpd *gpd)
{}

static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
{}

static void reset_gpd_list(struct mtu3_ep *mep)
{}

int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
{}

void mtu3_gpd_ring_free(struct mtu3_ep *mep)
{}

void mtu3_qmu_resume(struct mtu3_ep *mep)
{}

static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
{}

/* @dequeue may be NULL if ring is unallocated or freed */
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
{}

/* check if a ring is emtpy */
static bool gpd_ring_empty(struct mtu3_gpd_ring *ring)
{}

int mtu3_prepare_transfer(struct mtu3_ep *mep)
{}

static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{}

static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{}

void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{}

int mtu3_qmu_start(struct mtu3_ep *mep)
{}

/* may called in atomic context */
void mtu3_qmu_stop(struct mtu3_ep *mep)
{}

void mtu3_qmu_flush(struct mtu3_ep *mep)
{}

/*
 * QMU can't transfer zero length packet directly (a hardware limit
 * on old SoCs), so when needs to send ZLP, we intentionally trigger
 * a length error interrupt, and in the ISR sends a ZLP by BMU.
 */
static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
{}

/*
 * when rx error happens (except zlperr), QMU will stop, and RQCPR saves
 * the GPD encountered error, Done irq will arise after resuming QMU again.
 */
static void qmu_error_rx(struct mtu3 *mtu, u8 epnum)
{}

/*
 * NOTE: request list maybe is already empty as following case:
 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
 * tasklet process both of them)-->qmu_interrupt for second one.
 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
 */
static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
{}

static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
{}

static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
{}

static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
{}

irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
{}

int mtu3_qmu_init(struct mtu3 *mtu)
{}

void mtu3_qmu_exit(struct mtu3 *mtu)
{}