linux/drivers/net/wireless/mediatek/mt76/mt76.h

/* SPDX-License-Identifier: ISC */
/*
 * Copyright (C) 2016 Felix Fietkau <[email protected]>
 */

#ifndef __MT76_H
#define __MT76_H

#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include <net/mac80211.h>
#include <net/page_pool/helpers.h>
#include "util.h"
#include "testmode.h"

#define MT_MCU_RING_SIZE
#define MT_RX_BUF_SIZE
#define MT_SKB_HEAD_LEN

#define MT_MAX_NON_AQL_PKT
#define MT_TXQ_FREE_THR

#define MT76_TOKEN_FREE_THR

#define MT_QFLAG_WED_RING
#define MT_QFLAG_WED_TYPE
#define MT_QFLAG_WED
#define MT_QFLAG_WED_RRO
#define MT_QFLAG_WED_RRO_EN

#define __MT_WED_Q(_type, _n)
#define __MT_WED_RRO_Q(_type, _n)

#define MT_WED_Q_TX(_n)
#define MT_WED_Q_RX(_n)
#define MT_WED_Q_TXFREE
#define MT_WED_RRO_Q_DATA(_n)
#define MT_WED_RRO_Q_MSDU_PG(_n)
#define MT_WED_RRO_Q_IND

struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
struct mt76s_intr;

struct mt76_reg_pair {};

enum mt76_bus_type {};

enum mt76_wed_type {};

struct mt76_bus_ops {};

#define mt76_is_usb(dev)
#define mt76_is_mmio(dev)
#define mt76_is_sdio(dev)

enum mt76_txq_id {};

enum mt76_mcuq_id {};

enum mt76_rxq_id {};

enum mt76_band_id {};

enum mt76_cipher_type {};

enum mt76_dfs_state {};

struct mt76_queue_buf {};

struct mt76_tx_info {};

struct mt76_queue_entry {};

struct mt76_queue_regs {} __packed __aligned();

struct mt76_queue {};

struct mt76_mcu_ops {};

struct mt76_queue_ops {};

enum mt76_phy_type {};

struct mt76_sta_stats {};

enum mt76_wcid_flags {};

#define MT76_N_WCIDS

/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_PHY

DECLARE_EWMA(signal, 10, 8);

#define MT_WCID_TX_INFO_RATE
#define MT_WCID_TX_INFO_NSS
#define MT_WCID_TX_INFO_TXPWR_ADJ
#define MT_WCID_TX_INFO_SET

struct mt76_wcid {};

struct mt76_txq {};

struct mt76_wed_rro_ind {};

struct mt76_txwi_cache {};

struct mt76_rx_tid {};

#define MT_TX_CB_DMA_DONE
#define MT_TX_CB_TXS_DONE
#define MT_TX_CB_TXS_FAILED

#define MT_PACKET_ID_MASK
#define MT_PACKET_ID_NO_ACK
#define MT_PACKET_ID_NO_SKB
#define MT_PACKET_ID_WED
#define MT_PACKET_ID_FIRST
#define MT_PACKET_ID_HAS_RATE
/* This is timer for when to give up when waiting for TXS callback,
 * with starting time being the time at which the DMA_DONE callback
 * was seen (so, we know packet was processed then, it should not take
 * long after that for firmware to send the TXS callback if it is going
 * to do so.)
 */
#define MT_TX_STATUS_SKB_TIMEOUT

struct mt76_tx_cb {};

enum {};

struct mt76_hw_cap {};

#define MT_DRV_TXWI_NO_FREE
#define MT_DRV_TX_ALIGNED4_SKBS
#define MT_DRV_SW_RX_AIRTIME
#define MT_DRV_RX_DMA_HDR
#define MT_DRV_HW_MGMT_TXQ
#define MT_DRV_AMSDU_OFFLOAD

struct mt76_driver_ops {};

struct mt76_channel_state {};

struct mt76_sband {};

/* addr req mask */
#define MT_VEND_TYPE_EEPROM
#define MT_VEND_TYPE_CFG
#define MT_VEND_TYPE_MASK

#define MT_VEND_ADDR(type, n)
enum mt_vendor_req {};

enum mt76u_in_ep {};

enum mt76u_out_ep {};

struct mt76_mcu {};

#define MT_TX_SG_MAX_SIZE
#define MT_RX_SG_MAX_SIZE
#define MT_NUM_TX_ENTRIES
#define MT_NUM_RX_ENTRIES
#define MCU_RESP_URB_SIZE
struct mt76_usb {};

#define MT76S_XMIT_BUF_SZ
#define MT76S_NUM_TX_ENTRIES
#define MT76S_NUM_RX_ENTRIES
struct mt76_sdio {};

struct mt76_mmio {};

struct mt76_rx_status {};

struct mt76_freq_range_power {};

struct mt76_testmode_ops {};

struct mt76_testmode_data {};

struct mt76_vif {};

struct mt76_phy {};

struct mt76_dev {};

/* per-phy stats.  */
struct mt76_mib_stats {};

struct mt76_power_limits {};

struct mt76_ethtool_worker_info {};

#define CCK_RATE(_idx, _rate)

#define OFDM_RATE(_idx, _rate)

extern struct ieee80211_rate mt76_rates[12];

#define __mt76_rr(dev, ...)
#define __mt76_wr(dev, ...)
#define __mt76_rmw(dev, ...)
#define __mt76_wr_copy(dev, ...)
#define __mt76_rr_copy(dev, ...)

#define __mt76_set(dev, offset, val)
#define __mt76_clear(dev, offset, val)

#define mt76_rr(dev, ...)
#define mt76_wr(dev, ...)
#define mt76_rmw(dev, ...)
#define mt76_wr_copy(dev, ...)
#define mt76_rr_copy(dev, ...)
#define mt76_wr_rp(dev, ...)
#define mt76_rd_rp(dev, ...)


#define mt76_mcu_restart(dev, ...)

#define mt76_set(dev, offset, val)
#define mt76_clear(dev, offset, val)

#define mt76_get_field(_dev, _reg, _field)

#define mt76_rmw_field(_dev, _reg, _field, _val)

#define __mt76_rmw_field(_dev, _reg, _field, _val)

#define mt76_hw(dev)

bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
		 int timeout);

#define mt76_poll(dev, ...)

bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
			int timeout, int kick);
#define __mt76_poll_msec(...)
#define mt76_poll_msec(dev, ...)
#define mt76_poll_msec_tick(dev, ...)

void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
bool mt76_pci_aspm_supported(struct pci_dev *pdev);

static inline u16 mt76_chip(struct mt76_dev *dev)
{}

static inline u16 mt76_rev(struct mt76_dev *dev)
{}

void mt76_wed_release_rx_buf(struct mtk_wed_device *wed);
void mt76_wed_offload_disable(struct mtk_wed_device *wed);
void mt76_wed_reset_complete(struct mtk_wed_device *wed);
void mt76_wed_dma_reset(struct mt76_dev *dev);
int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
			  struct net_device *netdev, enum tc_setup_type type,
			  void *type_data);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
int mt76_wed_offload_enable(struct mtk_wed_device *wed);
int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
#else
static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
	return 0;
}

static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed)
{
	return 0;
}

static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
				     bool reset)
{
	return 0;
}
#endif /* CONFIG_NET_MEDIATEK_SOC_WED */

#define mt76xx_chip(dev)
#define mt76xx_rev(dev)

#define mt76_init_queues(dev, ...)
#define mt76_queue_alloc(dev, ...)
#define mt76_tx_queue_skb_raw(dev, ...)
#define mt76_tx_queue_skb(dev, ...)
#define mt76_queue_rx_reset(dev, ...)
#define mt76_queue_tx_cleanup(dev, ...)
#define mt76_queue_rx_cleanup(dev, ...)
#define mt76_queue_kick(dev, ...)
#define mt76_queue_reset(dev, ...)

#define mt76_for_each_q_rx(dev, i)

struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
				   const struct ieee80211_ops *ops,
				   const struct mt76_driver_ops *drv_ops);
int mt76_register_device(struct mt76_dev *dev, bool vht,
			 struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);

struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
				const struct ieee80211_ops *ops,
				u8 band_idx);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
		      struct ieee80211_rate *rates, int n_rates);

struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
					  const struct file_operations *ops);
static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
{}

int mt76_queues_read(struct seq_file *s, void *data);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
			 s8 *val, int len);

int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_phy *phy);
int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len);
int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
				const char *cell_name, int len);

struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
		int ring_base, void *wed, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy,
				struct ieee80211_vif *vif, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
				     int n_desc, int ring_base, void *wed,
				     u32 flags)
{}

static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
				      int n_desc, int ring_base)
{}

static inline struct mt76_phy *
mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx)
{}

static inline struct ieee80211_hw *
mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx)
{}

static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{}

/* increment with wrap-around */
static inline int mt76_incr(int val, int size)
{}

/* decrement with wrap-around */
static inline int mt76_decr(int val, int size)
{}

u8 mt76_ac_to_hwq(u8 ac);

static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{}

static inline struct ieee80211_sta *
wcid_to_sta(struct mt76_wcid *wcid)
{}

static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
{}

static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
{}

static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
{}

static inline bool mt76_is_skb_pktid(u8 pktid)
{}

static inline u8 mt76_tx_power_nss_delta(u8 nss)
{}

static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
{}

static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
					struct sk_buff *skb,
					struct ieee80211_hw **hw)
{}

void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
	     struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
			 bool send_bar);
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_tx_worker_run(struct mt76_dev *dev);
void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
				  struct ieee80211_sta *sta,
				  u16 tids, int nframes,
				  enum ieee80211_frame_release_type reason,
				  bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
		    struct survey_info *survey);
int mt76_rx_signal(u8 chain_mask, s8 *chain_signal);
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);

int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
		       u16 ssn, u16 size);
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);

void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
			 struct ieee80211_key_conf *key);

void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
			 __acquires();
void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
			   __releases();

int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
			   struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
				       struct mt76_wcid *wcid, int pktid,
				       struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
			     struct sk_buff_head *list);
void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
			    struct list_head *free_list);
static inline void
mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
{}

void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		   struct ieee80211_sta *sta,
		   enum ieee80211_sta_state old_state,
		   enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
		       struct ieee80211_sta *sta);
void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
			     struct ieee80211_sta *sta);

int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);

int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		     int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
			const struct cfg80211_sar_specs *sar);
int mt76_get_sar_power(struct mt76_phy *phy,
		       struct ieee80211_channel *chan,
		       int power);

void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);

int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
		  struct ieee80211_supported_band *sband,
		  int idx, bool cck);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		  const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
			   struct ieee80211_vif *vif);
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		      void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
		       struct netlink_callback *cb, void *data, int len);
int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);

static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{}


/* internal */
static inline struct ieee80211_hw *
mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
{}

void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
void mt76_free_pending_rxwi(struct mt76_dev *dev);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
		      struct napi_struct *napi);
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
			   struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
			    struct mt76_queue_entry *e);

/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
{}

static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
	       int timeout, int ep)
{}

void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
			 struct mt76_sta_stats *stats, bool eht);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
			   u16 val, u16 offset, void *buf, size_t len);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
			 u8 req_type, u16 val, u16 offset,
			 void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
		     const u16 offset, const u32 val);
void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
		     void *data, int len);
u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
		 u32 addr, u32 val);
int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
		 struct mt76_bus_ops *ops);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);

int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
	       const struct mt76_bus_ops *bus_ops);
int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
int mt76s_alloc_tx(struct mt76_dev *dev);
void mt76s_deinit(struct mt76_dev *dev);
void mt76s_sdio_irq(struct sdio_func *func);
void mt76s_txrx_worker(struct mt76_sdio *sdio);
bool mt76s_txqs_empty(struct mt76_dev *dev);
int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
		  int hw_ver);
u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
u32 mt76s_read_pcr(struct mt76_dev *dev);
void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
		      const void *data, int len);
void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
		     void *data, int len);
int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
		const struct mt76_reg_pair *data,
		int len);
int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
		struct mt76_reg_pair *data, int len);

struct sk_buff *
__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
		     int len, int data_len, gfp_t gfp);
static inline struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
		   int data_len)
{}

void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
				      unsigned long expires);
int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
			      int len, bool wait_resp, struct sk_buff **ret);
int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
				  int cmd, bool wait_resp, struct sk_buff **ret);
int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
			     int len, int max_len);
static inline int
mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
		       int len)
{}

static inline int
mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
		  bool wait_resp)
{}

static inline int
mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
		      bool wait_resp)
{}

void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);

struct device_node *
mt76_find_power_limits_node(struct mt76_dev *dev);
struct device_node *
mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan);

s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
			      struct ieee80211_channel *chan,
			      struct mt76_power_limits *dest,
			      s8 target_power);

static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
{}

static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{}

static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q)
{}

static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
{}

static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
{}

static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
{}

struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
			  struct mt76_txwi_cache *r, dma_addr_t phys);
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
{}

static inline void *
mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
{}

static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{}

static inline int
mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
{}

static inline struct mt76_txwi_cache *
mt76_token_put(struct mt76_dev *dev, int token)
{}

void mt76_wcid_init(struct mt76_wcid *wcid);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid);

#endif