linux/drivers/net/wireless/ti/wlcore/tx.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * This file is part of wl1271
 *
 * Copyright (C) 2009 Nokia Corporation
 *
 * Contact: Luciano Coelho <[email protected]>
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>

#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
#include "hw_ops.h"

/*
 * TODO: this is here just for now, it must be removed when the data
 * operations are in place.
 */
#include "../wl12xx/reg.h"

static int wl1271_set_default_wep_key(struct wl1271 *wl,
				      struct wl12xx_vif *wlvif, u8 id)
{}

static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{}

void wl1271_free_tx_id(struct wl1271 *wl, int id)
{}
EXPORT_SYMBOL();

static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
						 struct wl12xx_vif *wlvif,
						 struct sk_buff *skb)
{}

static void wl1271_tx_regulate_link(struct wl1271 *wl,
				    struct wl12xx_vif *wlvif,
				    u8 hlid)
{}

bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				struct sk_buff *skb, struct ieee80211_sta *sta)
{}

u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
		      struct sk_buff *skb, struct ieee80211_sta *sta)
{}

unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
					  unsigned int packet_length)
{}
EXPORT_SYMBOL();

static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			      struct sk_buff *skb, u32 extra, u32 buf_offset,
			      u8 hlid, bool is_gem)
{}

static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			       struct sk_buff *skb, u32 extra,
			       struct ieee80211_tx_info *control, u8 hlid)
{}

/* caller must hold wl->mutex */
static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
{}

u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
				enum nl80211_band rate_band)
{}

void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{}

static int wlcore_select_ac(struct wl1271 *wl)
{}

static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
					  struct wl1271_link *lnk, u8 q)
{}

static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
						    u8 hlid, u8 ac,
						    u8 *low_prio_hlid)
{}

static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
						    struct wl12xx_vif *wlvif,
						    u8 ac, u8 *hlid,
						    u8 *low_prio_hlid)
{}

static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
{}

static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				  struct sk_buff *skb, u8 hlid)
{}

static bool wl1271_tx_is_data_present(struct sk_buff *skb)
{}

void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
{}

/*
 * Returns failure values only in case of failed bus ops within this function.
 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
 * triggering recovery by higher layers when not necessary.
 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
 * within prepare_tx_frame code but there's nothing we should do about those
 * as well.
 */
int wlcore_tx_work_locked(struct wl1271 *wl)
{}

void wl1271_tx_work(struct work_struct *work)
{}

static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
{}

static void wl1271_tx_complete_packet(struct wl1271 *wl,
				      struct wl1271_tx_hw_res_descr *result)
{}

/* Called upon reception of a TX complete interrupt */
int wlcore_tx_complete(struct wl1271 *wl)
{}
EXPORT_SYMBOL();

void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{}

/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{}
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl)
{}

#define WL1271_TX_FLUSH_TIMEOUT

/* caller must *NOT* hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{}
EXPORT_SYMBOL_GPL();

u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{}
EXPORT_SYMBOL_GPL();

void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
			      u8 queue, enum wlcore_queue_stop_reason reason)
{}

void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{}

void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{}

void wlcore_stop_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{}

void wlcore_wake_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{}

bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
				       struct wl12xx_vif *wlvif, u8 queue,
				       enum wlcore_queue_stop_reason reason)
{}

bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
				       struct wl12xx_vif *wlvif, u8 queue,
				       enum wlcore_queue_stop_reason reason)
{}

bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				    u8 queue)
{}