linux/drivers/net/wireless/intel/iwlwifi/pcie/tx.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
 * Copyright (C) 2016-2017 Intel Deutschland GmbH
 */
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/tcp.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>

#include "fw/api/commands.h"
#include "fw/api/datapath.h"
#include "fw/api/debug.h"
#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-io.h"
#include "iwl-scd.h"
#include "iwl-op-mode.h"
#include "internal.h"
#include "fw/api/tx.h"

/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/


int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size)
{}

void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
{}

/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
 */
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
{}

void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{}

static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
					    u8 idx, dma_addr_t addr, u16 len)
{}

static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
{}

static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
				  dma_addr_t addr, u16 len, bool reset)
{}

static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{}

static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
					     struct page *page)
{}

void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
			     struct iwl_cmd_meta *cmd_meta)
{}

static inline dma_addr_t
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{}

static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
					 struct iwl_tfd *tfd)
{}

static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
				   struct iwl_cmd_meta *meta,
				   struct iwl_txq *txq, int index)
{}

/**
 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 * @trans: transport private data
 * @txq: tx queue
 * @read_ptr: the TXQ read_ptr to free
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
			     int read_ptr)
{}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{}

void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{}

static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
{}

/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{}

void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
{}

static void iwl_txq_stuck_timer(struct timer_list *t)
{}

int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
		       int slots_num, bool cmd_queue)
{}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
static int iwl_queue_init(struct iwl_txq *q, int slots_num)
{}

int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
		 int slots_num, bool cmd_queue)
{}

int iwl_pcie_tx_init(struct iwl_trans *trans)
{}

static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
				      const struct iwl_host_cmd *cmd)
{}

static void iwl_txq_progress(struct iwl_txq *txq)
{}

static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
				int read_ptr, int write_ptr)
{}

/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{}

static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
				 u16 txq_id)
{}

/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)

bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout)
{}

void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode)
{}

void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
				bool configure_scd)
{}

/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{}

/*
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a pointer to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
 * command queue.
 */
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
			  struct iwl_host_cmd *cmd)
{}

/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
 */
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb)
{}

static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
			     struct iwl_txq *txq, u8 hdr_len,
			     struct iwl_cmd_meta *out_meta)
{}

#ifdef CONFIG_INET
static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
				   size_t len, struct sk_buff *skb)
{}

/**
 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
 * @sgt: scatter gather table
 * @addr: Virtual address
 *
 * Find the entry that includes the address for the given address and return
 * correct physical address for the TB entry.
 *
 * Returns: Address for TB entry
 */
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
{}

/**
 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
 * @trans: transport private data
 * @skb: the SKB to map
 * @cmd_meta: command meta to store the scatter list information for unmapping
 * @hdr: output argument for TSO headers
 * @hdr_room: requested length for TSO headers
 *
 * Allocate space for a scatter gather list and TSO headers and map the SKB
 * using the scatter gather list. The SKB is unmapped again when the page is
 * free'ed again at the end of the operation.
 *
 * Returns: newly allocated and mapped scatter gather table with list
 */
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_cmd_meta *cmd_meta,
				   u8 **hdr, unsigned int hdr_room)
{}

static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
				   struct iwl_device_tx_cmd *dev_cmd,
				   u16 tb1_len)
{}
#else /* CONFIG_INET */
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
				   struct iwl_device_tx_cmd *dev_cmd,
				   u16 tb1_len)
{
	/* No A-MSDU without CONFIG_INET */
	WARN_ON(1);

	return -1;
}
#endif /* CONFIG_INET */

#define IWL_TX_CRC_SIZE
#define IWL_TX_DELIMITER_SIZE

/*
 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt,
					     int num_tbs)
{}

int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{}

static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq,
					    int read_ptr)
{}

/* Frees buffers until index _not_ inclusive */
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
		      struct sk_buff_head *skbs, bool is_flush)
{}

/* Set wr_ptr of specific device and txq  */
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
{}

void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
			       unsigned long txqs, bool freeze)
{}

#define HOST_COMPLETE_TIMEOUT

static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
					 struct iwl_host_cmd *cmd)
{}

int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
			     struct iwl_host_cmd *cmd)
{}
IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);