linux/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c

/*
 * Copyright (c) 2010 Broadcom Corporation
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>

#include <brcmu_utils.h>
#include <aiutils.h>
#include "types.h"
#include "main.h"
#include "dma.h"
#include "soc.h"
#include "scb.h"
#include "ampdu.h"
#include "debug.h"
#include "brcms_trace_events.h"

/*
 * dma register field offset calculation
 */
#define DMA64REGOFFS(field)
#define DMA64TXREGOFFS(di, field)
#define DMA64RXREGOFFS(di, field)

/*
 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
 * a contiguous 8kB physical address.
 */
#define D64RINGALIGN_BITS
#define D64MAXRINGSZ
#define D64RINGALIGN

#define D64MAXDD

/* transmit channel control */
#define D64_XC_XE
#define D64_XC_SE
#define D64_XC_LE
#define D64_XC_FL
#define D64_XC_PD
#define D64_XC_AE
#define D64_XC_AE_SHIFT

/* transmit descriptor table pointer */
#define D64_XP_LD_MASK

/* transmit channel status */
#define D64_XS0_CD_MASK
#define D64_XS0_XS_MASK
#define D64_XS0_XS_SHIFT
#define D64_XS0_XS_DISABLED
#define D64_XS0_XS_ACTIVE
#define D64_XS0_XS_IDLE
#define D64_XS0_XS_STOPPED
#define D64_XS0_XS_SUSP

#define D64_XS1_AD_MASK
#define D64_XS1_XE_MASK
#define D64_XS1_XE_SHIFT
#define D64_XS1_XE_NOERR
#define D64_XS1_XE_DPE
#define D64_XS1_XE_DFU
#define D64_XS1_XE_DTE
#define D64_XS1_XE_DESRE
#define D64_XS1_XE_COREE

/* receive channel control */
/* receive enable */
#define D64_RC_RE
/* receive frame offset */
#define D64_RC_RO_MASK
#define D64_RC_RO_SHIFT
/* direct fifo receive (pio) mode */
#define D64_RC_FM
/* separate rx header descriptor enable */
#define D64_RC_SH
/* overflow continue */
#define D64_RC_OC
/* parity check disable */
#define D64_RC_PD
/* address extension bits */
#define D64_RC_AE
#define D64_RC_AE_SHIFT

/* flags for dma controller */
/* partity enable */
#define DMA_CTRL_PEN
/* rx overflow continue */
#define DMA_CTRL_ROC
/* allow rx scatter to multiple descriptors */
#define DMA_CTRL_RXMULTI
/* Unframed Rx/Tx data */
#define DMA_CTRL_UNFRAMED

/* receive descriptor table pointer */
#define D64_RP_LD_MASK

/* receive channel status */
#define D64_RS0_CD_MASK
#define D64_RS0_RS_MASK
#define D64_RS0_RS_SHIFT
#define D64_RS0_RS_DISABLED
#define D64_RS0_RS_ACTIVE
#define D64_RS0_RS_IDLE
#define D64_RS0_RS_STOPPED
#define D64_RS0_RS_SUSP

#define D64_RS1_AD_MASK
#define D64_RS1_RE_MASK
#define D64_RS1_RE_SHIFT
#define D64_RS1_RE_NOERR
#define D64_RS1_RE_DPO
#define D64_RS1_RE_DFU
#define D64_RS1_RE_DTE
#define D64_RS1_RE_DESRE
#define D64_RS1_RE_COREE

/* fifoaddr */
#define D64_FA_OFF_MASK
#define D64_FA_SEL_MASK
#define D64_FA_SEL_SHIFT
#define D64_FA_SEL_XDD
#define D64_FA_SEL_XDP
#define D64_FA_SEL_RDD
#define D64_FA_SEL_RDP
#define D64_FA_SEL_XFD
#define D64_FA_SEL_XFP
#define D64_FA_SEL_RFD
#define D64_FA_SEL_RFP
#define D64_FA_SEL_RSD
#define D64_FA_SEL_RSP

/* descriptor control flags 1 */
#define D64_CTRL_COREFLAGS
#define D64_CTRL1_EOT
#define D64_CTRL1_IOC
#define D64_CTRL1_EOF
#define D64_CTRL1_SOF

/* descriptor control flags 2 */
/* buffer byte count. real data len must <= 16KB */
#define D64_CTRL2_BC_MASK
/* address extension bits */
#define D64_CTRL2_AE
#define D64_CTRL2_AE_SHIFT
/* parity bit */
#define D64_CTRL2_PARITY

/* control flags in the range [27:20] are core-specific and not defined here */
#define D64_CTRL_CORE_MASK

#define D64_RX_FRM_STS_LEN
#define D64_RX_FRM_STS_OVFL
#define D64_RX_FRM_STS_DSCRCNT
#define D64_RX_FRM_STS_DATATYPE

/*
 * packet headroom necessary to accommodate the largest header
 * in the system, (i.e TXOFF). By doing, we avoid the need to
 * allocate an extra buffer for the header when bridging to WL.
 * There is a compile time check in wlc.c which ensure that this
 * value is at least as big as TXOFF. This value is used in
 * dma_rxfill().
 */

#define BCMEXTRAHDROOM

#define MAXNAMEL

/* macros to convert between byte offsets and indexes */
#define B2I(bytes, type)
#define I2B(index, type)

#define PCI32ADDR_HIGH
#define PCI32ADDR_HIGH_SHIFT

#define PCI64ADDR_HIGH
#define PCI64ADDR_HIGH_SHIFT

/*
 * DMA Descriptor
 * Descriptors are only read by the hardware, never written back.
 */
struct dma64desc {};

/* dma engine software state */
struct dma_info {};

/* Check for odd number of 1's */
static u32 parity32(__le32 data)
{}

static bool dma64_dd_parity(struct dma64desc *dd)
{}

/* descriptor bumping functions */

static uint xxd(uint x, uint n)
{}

static uint txd(struct dma_info *di, uint x)
{}

static uint rxd(struct dma_info *di, uint x)
{}

static uint nexttxd(struct dma_info *di, uint i)
{}

static uint prevtxd(struct dma_info *di, uint i)
{}

static uint nextrxd(struct dma_info *di, uint i)
{}

static uint ntxdactive(struct dma_info *di, uint h, uint t)
{}

static uint nrxdactive(struct dma_info *di, uint h, uint t)
{}

static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
{}

static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{}

/*
 * return true if this dma engine supports DmaExtendedAddrChanges,
 * otherwise false
 */
static bool _dma_isaddrext(struct dma_info *di)
{}

static bool _dma_descriptor_align(struct dma_info *di)
{}

/*
 * Descriptor table must start at the DMA hardware dictated alignment, so
 * allocated memory must be large enough to support this requirement.
 */
static void *dma_alloc_consistent(struct dma_info *di, uint size,
				  u16 align_bits, uint *alloced,
				  dma_addr_t *pap)
{}

static
u8 dma_align_sizetobits(uint size)
{}

/* This function ensures that the DMA descriptor ring will not get allocated
 * across Page boundary. If the allocation is done across the page boundary
 * at the first time, then it is freed and the allocation is done at
 * descriptor ring size aligned location. This will ensure that the ring will
 * not cross page boundary
 */
static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
			   u16 *alignbits, uint *alloced,
			   dma_addr_t *descpa)
{}

static bool dma64_alloc(struct dma_info *di, uint direction)
{}

static bool _dma_alloc(struct dma_info *di, uint direction)
{}

struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
			   uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
			   uint rxbufsize, int rxextheadroom,
			   uint nrxpost, uint rxoffset)
{}

static inline void
dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
	     dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
{}

/* !! may be called with core in reset */
void dma_detach(struct dma_pub *pub)
{}

/* initialize descriptor table base address */
static void
_dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
{}

static void _dma_rxenable(struct dma_info *di)
{}

void dma_rxinit(struct dma_pub *pub)
{}

static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
{}

static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
{}

/*
 * !! rx entry routine
 * returns the number packages in the next frame, or 0 if there are no more
 *   if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
 *   supported with pkts chain
 *   otherwise, it's treated as giant pkt and will be tossed.
 *   The DMA scattering starts with normal DMA header, followed by first
 *   buffer data. After it reaches the max size of buffer, the data continues
 *   in next DMA descriptor buffer WITHOUT DMA header
 */
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{}

static bool dma64_rxidle(struct dma_info *di)
{}

static bool dma64_txidle(struct dma_info *di)
{}

/*
 * post receive buffers
 *  Return false if refill failed completely or dma mapping failed. The ring
 *  is empty, which will stall the rx dma and user might want to call rxfill
 *  again asap. This is unlikely to happen on a memory-rich NIC, but often on
 *  memory-constrained dongle.
 */
bool dma_rxfill(struct dma_pub *pub)
{}

void dma_rxreclaim(struct dma_pub *pub)
{}

void dma_counterreset(struct dma_pub *pub)
{}

/* get the address of the var in order to change later */
unsigned long dma_getvar(struct dma_pub *pub, const char *name)
{}

/* 64-bit DMA functions */

void dma_txinit(struct dma_pub *pub)
{}

void dma_txsuspend(struct dma_pub *pub)
{}

void dma_txresume(struct dma_pub *pub)
{}

bool dma_txsuspended(struct dma_pub *pub)
{}

void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
{}

bool dma_txreset(struct dma_pub *pub)
{}

bool dma_rxreset(struct dma_pub *pub)
{}

static void dma_txenq(struct dma_info *di, struct sk_buff *p)
{}

static void ampdu_finalize(struct dma_info *di)
{}

static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
{}

/* Update count of available tx descriptors based on current DMA state */
static void dma_update_txavail(struct dma_info *di)
{}

/*
 * !! tx entry routine
 * WARNING: call must check the return value for error.
 *   the error(toss frames) could be fatal and cause many subsequent hard
 *   to debug problems
 */
int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
	       struct sk_buff *p)
{}

void dma_txflush(struct dma_pub *pub)
{}

int dma_txpending(struct dma_pub *pub)
{}

/*
 * If we have an active AMPDU session and are not transmitting,
 * this function will force tx to start.
 */
void dma_kick_tx(struct dma_pub *pub)
{}

/*
 * Reclaim next completed txd (txds if using chained buffers) in the range
 * specified and return associated packet.
 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
 * transmitted as noted by the hardware "CurrDescr" pointer.
 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
 * return associated packet regardless of the value of hardware pointers.
 */
struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
{}

/*
 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
 * modified. The modified portion of the packet is not under control of the DMA
 * engine. This function calls a caller-supplied function for each packet in
 * the caller specified dma chain.
 */
void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
		      (void *pkt, void *arg_a), void *arg_a)
{}