#ifndef __MT76_DMA_H
#define __MT76_DMA_H
#define DMA_DUMMY_DATA …
#define MT_RING_SIZE …
#define MT_DMA_CTL_SD_LEN1 …
#define MT_DMA_CTL_LAST_SEC1 …
#define MT_DMA_CTL_BURST …
#define MT_DMA_CTL_SD_LEN0 …
#define MT_DMA_CTL_LAST_SEC0 …
#define MT_DMA_CTL_DMA_DONE …
#define MT_DMA_CTL_TO_HOST …
#define MT_DMA_CTL_TO_HOST_A …
#define MT_DMA_CTL_DROP …
#define MT_DMA_CTL_TOKEN …
#define MT_DMA_CTL_SDP1_H …
#define MT_DMA_CTL_SDP0_H …
#define MT_DMA_CTL_WO_DROP …
#define MT_DMA_PPE_CPU_REASON …
#define MT_DMA_PPE_ENTRY …
#define MT_DMA_INFO_DMA_FRAG …
#define MT_DMA_INFO_PPE_VLD …
#define MT_DMA_CTL_PN_CHK_FAIL …
#define MT_DMA_CTL_VER_MASK …
#define MT_DMA_RRO_EN …
#define MT_DMA_WED_IND_CMD_CNT …
#define MT_DMA_WED_IND_REASON …
#define MT_DMA_HDR_LEN …
#define MT_RX_INFO_LEN …
#define MT_FCE_INFO_LEN …
#define MT_RX_RXWI_LEN …
struct mt76_desc { … } __packed __aligned(…);
struct mt76_wed_rro_desc { … } __packed __aligned(…);
enum mt76_qsel { … };
enum mt76_mcu_evt_type { … };
enum mt76_dma_wed_ind_reason { … };
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
bool reset_idx);
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{ … }
static inline void
mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
{ … }
static inline void *mt76_priv(struct net_device *dev)
{ … }
#endif