#include <linux/dma-mapping.h>
#include "mt76.h"
#include "dma.h"
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
#define Q_READ(_q, _field) …
#define Q_WRITE(_q, _field, _val) …
#else
#define Q_READ …
#define Q_WRITE …
#endif
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{ … }
static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev *dev)
{ … }
static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{ … }
static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev *dev)
{ … }
static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{ … }
struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{ … }
EXPORT_SYMBOL_GPL(…);
void
mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{ … }
EXPORT_SYMBOL_GPL(…);
static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{ … }
void
mt76_free_pending_rxwi(struct mt76_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{ … }
void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
bool reset_idx)
{ … }
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{ … }
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
{ … }
static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{ … }
static void
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{ … }
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{ … }
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
{ … }
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
int *len, u32 *info, bool *more, bool *drop)
{ … }
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more, bool *drop)
{ … }
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{ … }
static int
mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{ … }
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{ … }
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{ … }
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{ … }
static void
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
{ … }
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more, u32 info, bool allow_direct)
{ … }
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{ … }
int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{ … }
EXPORT_SYMBOL_GPL(…);
static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
{ … }
static const struct mt76_queue_ops mt76_dma_ops = …;
void mt76_dma_attach(struct mt76_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void mt76_dma_cleanup(struct mt76_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);