linux/drivers/net/ethernet/sfc/siena/farch.c

// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
 * Driver for Solarflare network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2006-2013 Solarflare Communications Inc.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/crc32.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"

/* Falcon-architecture (SFC9000-family) support */

/**************************************************************************
 *
 * Configurable values
 *
 **************************************************************************
 */

/* This is set to 16 for a good reason.  In summary, if larger than
 * 16, the descriptor cache holds more than a default socket
 * buffer's worth of packets (for UDP we can only have at most one
 * socket buffer's worth outstanding).  This combined with the fact
 * that we only get 1 TX event per descriptor cache means the NIC
 * goes idle.
 */
#define TX_DC_ENTRIES
#define TX_DC_ENTRIES_ORDER

#define RX_DC_ENTRIES
#define RX_DC_ENTRIES_ORDER

/* If EFX_MAX_INT_ERRORS internal errors occur within
 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
 * disable it.
 */
#define EFX_INT_ERROR_EXPIRE
#define EFX_MAX_INT_ERRORS

/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT

/* Driver generated events */
#define _EFX_CHANNEL_MAGIC_TEST
#define _EFX_CHANNEL_MAGIC_FILL
#define _EFX_CHANNEL_MAGIC_RX_DRAIN
#define _EFX_CHANNEL_MAGIC_TX_DRAIN

#define _EFX_CHANNEL_MAGIC(_code, _data)
#define _EFX_CHANNEL_MAGIC_CODE(_magic)

#define EFX_CHANNEL_MAGIC_TEST(_channel)
#define EFX_CHANNEL_MAGIC_FILL(_rx_queue)
#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)
#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)

static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);

/**************************************************************************
 *
 * Hardware access
 *
 **************************************************************************/

static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
				     unsigned int index)
{}

static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
				     const efx_oword_t *mask)
{}

int efx_farch_test_registers(struct efx_nic *efx,
			     const struct efx_farch_register_test *regs,
			     size_t n_regs)
{}

/**************************************************************************
 *
 * Special buffer handling
 * Special buffers are used for event queues and the TX and RX
 * descriptor rings.
 *
 *************************************************************************/

/*
 * Initialise a special buffer
 *
 * This will define a buffer (previously allocated via
 * efx_alloc_special_buffer()) in the buffer table, allowing
 * it to be used for event queues, descriptor rings etc.
 */
static void
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{}

/* Unmaps a buffer and clears the buffer table entries */
static void
efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{}

/*
 * Allocate a new special buffer
 *
 * This allocates memory for a new buffer, clears it and allocates a
 * new buffer ID range.  It does not write into the buffer table.
 *
 * This call will allocate 4KB buffers, since 8KB buffers can't be
 * used for event queues and descriptor rings.
 */
static int efx_alloc_special_buffer(struct efx_nic *efx,
				    struct efx_special_buffer *buffer,
				    unsigned int len)
{}

static void
efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{}

/**************************************************************************
 *
 * TX path
 *
 **************************************************************************/

/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
{}

/* Write pointer and first descriptor for TX descriptor ring */
static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
					  const efx_qword_t *txd)
{}


/* For each entry inserted into the software descriptor ring, create a
 * descriptor in the hardware TX descriptor ring (in host memory), and
 * write a doorbell.
 */
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{}

unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
				    dma_addr_t dma_addr, unsigned int len)
{}


/* Allocate hardware resources for a TX queue */
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
{}

void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{}

static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
{}

void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
{}

/* Free buffers backing TX queue */
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
{}

/**************************************************************************
 *
 * RX path
 *
 **************************************************************************/

/* This creates an entry in the RX descriptor queue */
static inline void
efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
{}

/* This writes to the RX_DESC_WPTR register for the specified receive
 * descriptor ring.
 */
void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
{}

int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
{}

void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
{}

static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
{}

void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
{}

/* Free buffers backing RX queue */
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
{}

/**************************************************************************
 *
 * Flush handling
 *
 **************************************************************************/

/* efx_farch_flush_queues() must be woken up when all flushes are completed,
 * or more RX flushes can be kicked off.
 */
static bool efx_farch_flush_wake(struct efx_nic *efx)
{}

static bool efx_check_tx_flush_complete(struct efx_nic *efx)
{}

/* Flush all the transmit queues, and continue flushing receive queues until
 * they're all flushed. Wait for the DRAIN events to be received so that there
 * are no more RX and TX events left on any channel. */
static int efx_farch_do_flush(struct efx_nic *efx)
{}

int efx_farch_fini_dmaq(struct efx_nic *efx)
{}

/* Reset queue and flush accounting after FLR
 *
 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
 * mastering was disabled), in which case we don't receive (RXQ) flush
 * completion events.  This means that efx->rxq_flush_outstanding remained at 4
 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
 * events were received, and we didn't go through efx_check_tx_flush_complete())
 * If we don't fix this up, on the next call to efx_siena_realloc_channels() we
 * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
 * of 4 for batched flush requests; and the efx->active_queues gets messed up
 * because we keep incrementing for the newly initialised queues, but it never
 * went to zero previously.  Then we get a timeout every time we try to restart
 * the queues, as it doesn't go back to zero when we should be flushing the
 * queues.
 */
void efx_farch_finish_flr(struct efx_nic *efx)
{}


/**************************************************************************
 *
 * Event queue processing
 * Event queues are processed by per-channel tasklets.
 *
 **************************************************************************/

/* Update a channel's event queue's read pointer (RPTR) register
 *
 * This writes the EVQ_RPTR_REG register for the specified channel's
 * event queue.
 */
void efx_farch_ev_read_ack(struct efx_channel *channel)
{}

/* Use HW to insert a SW defined event */
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
			      efx_qword_t *event)
{}

static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
{}

/* Handle a transmit completion event
 *
 * The NIC batches TX completion events; the message we receive is of
 * the form "complete all TX events up to this index".
 */
static void
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{}

/* Detect errors included in the rx_evt_pkt_ok bit. */
static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
				      const efx_qword_t *event)
{}

/* Handle receive events that are not in-order. Return true if this
 * can be handled as a partial packet discard, false if it's more
 * serious.
 */
static bool
efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{}

/* Handle a packet received event
 *
 * The NIC gives a "discard" flag if it's a unicast packet with the
 * wrong destination address
 * Also "is multicast" and "matches multicast filter" flags can be used to
 * discard non-matching multicast packets.
 */
static void
efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
{}

/* If this flush done event corresponds to a &struct efx_tx_queue, then
 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
 * of all transmit completions.
 */
static void
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{}

/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
 * the RX queue back to the mask of RX queues in need of flushing.
 */
static void
efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{}

static void
efx_farch_handle_drain_event(struct efx_channel *channel)
{}

static void efx_farch_handle_generated_event(struct efx_channel *channel,
					     efx_qword_t *event)
{}

static void
efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{}

int efx_farch_ev_process(struct efx_channel *channel, int budget)
{}

/* Allocate buffer table entries for event queue */
int efx_farch_ev_probe(struct efx_channel *channel)
{}

int efx_farch_ev_init(struct efx_channel *channel)
{}

void efx_farch_ev_fini(struct efx_channel *channel)
{}

/* Free buffers backing event queue */
void efx_farch_ev_remove(struct efx_channel *channel)
{}


void efx_farch_ev_test_generate(struct efx_channel *channel)
{}

void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
{}

/**************************************************************************
 *
 * Hardware interrupts
 * The hardware interrupt handler does very little work; all the event
 * queue processing is carried out by per-channel tasklets.
 *
 **************************************************************************/

/* Enable/disable/generate interrupts */
static inline void efx_farch_interrupts(struct efx_nic *efx,
				      bool enabled, bool force)
{}

void efx_farch_irq_enable_master(struct efx_nic *efx)
{}

void efx_farch_irq_disable_master(struct efx_nic *efx)
{}

/* Generate a test interrupt
 * Interrupt must already have been enabled, otherwise nasty things
 * may happen.
 */
int efx_farch_irq_test_generate(struct efx_nic *efx)
{}

/* Process a fatal interrupt
 * Disable bus mastering ASAP and schedule a reset
 */
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
{}

/* Handle a legacy interrupt
 * Acknowledges the interrupt and schedule event queue processing.
 */
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{}

/* Handle an MSI interrupt
 *
 * Handle an MSI hardware interrupt.  This routine schedules event
 * queue processing.  No interrupt acknowledgement cycle is necessary.
 * Also, we never need to check that the interrupt is for us, since
 * MSI interrupts cannot be shared.
 */
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
{}

/* Setup RSS indirection table.
 * This maps from the hash value of the packet to RXQ
 */
void efx_farch_rx_push_indir_table(struct efx_nic *efx)
{}

void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
{}

/* Looks at available SRAM resources and works out how many queues we
 * can support, and where things like descriptor caches should live.
 *
 * SRAM is split up as follows:
 * 0                          buftbl entries for channels
 * efx->vf_buftbl_base        buftbl entries for SR-IOV
 * efx->rx_dc_base            RX descriptor caches
 * efx->tx_dc_base            TX descriptor caches
 */
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{}

u32 efx_farch_fpga_ver(struct efx_nic *efx)
{}

void efx_farch_init_common(struct efx_nic *efx)
{}

/**************************************************************************
 *
 * Filter tables
 *
 **************************************************************************
 */

/* "Fudge factors" - difference between programmed value and actual depth.
 * Due to pipelined implementation we need to program H/W with a value that
 * is larger than the hop limit we want.
 */
#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL

/* Hard maximum search limit.  Hardware will time-out beyond 200-something.
 * We also need to avoid infinite loops in efx_farch_filter_search() when the
 * table is full.
 */
#define EFX_FARCH_FILTER_CTL_SRCH_MAX

/* Don't try very hard to find space for performance hints, as this is
 * counter-productive. */
#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX

enum efx_farch_filter_type {};

enum efx_farch_filter_table_id {};

enum efx_farch_filter_index {};

struct efx_farch_filter_spec {};

struct efx_farch_filter_table {};

struct efx_farch_filter_state {};

static void
efx_farch_filter_table_clear_entry(struct efx_nic *efx,
				   struct efx_farch_filter_table *table,
				   unsigned int filter_idx);

/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
 * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
static u16 efx_farch_filter_hash(u32 key)
{}

/* To allow for hash collisions, filter search continues at these
 * increments from the first possible entry selected by the hash. */
static u16 efx_farch_filter_increment(u32 key)
{}

static enum efx_farch_filter_table_id
efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
{}

static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
{}

static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
{}

static int
efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
			       const struct efx_filter_spec *gen_spec)
{}

static void
efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
			     const struct efx_farch_filter_spec *spec)
{}

static void
efx_farch_filter_init_rx_auto(struct efx_nic *efx,
			      struct efx_farch_filter_spec *spec)
{}

/* Build a filter entry and return its n-tuple key. */
static u32 efx_farch_filter_build(efx_oword_t *filter,
				  struct efx_farch_filter_spec *spec)
{}

static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
				   const struct efx_farch_filter_spec *right)
{}

/*
 * Construct/deconstruct external filter IDs.  At least the RX filter
 * IDs must be ordered by matching priority, for RX NFC semantics.
 *
 * Deconstruction needs to be robust against invalid IDs so that
 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
 * accept user-provided IDs.
 */

#define EFX_FARCH_FILTER_MATCH_PRI_COUNT

static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] =;

static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] =;

#define EFX_FARCH_FILTER_INDEX_WIDTH
#define EFX_FARCH_FILTER_INDEX_MASK

static inline u32
efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
			 unsigned int index)
{}

static inline enum efx_farch_filter_table_id
efx_farch_filter_id_table_id(u32 id)
{}

static inline unsigned int efx_farch_filter_id_index(u32 id)
{}

u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
{}

s32 efx_farch_filter_insert(struct efx_nic *efx,
			    struct efx_filter_spec *gen_spec,
			    bool replace_equal)
{}

static void
efx_farch_filter_table_clear_entry(struct efx_nic *efx,
				   struct efx_farch_filter_table *table,
				   unsigned int filter_idx)
{}

static int efx_farch_filter_remove(struct efx_nic *efx,
				   struct efx_farch_filter_table *table,
				   unsigned int filter_idx,
				   enum efx_filter_priority priority)
{}

int efx_farch_filter_remove_safe(struct efx_nic *efx,
				 enum efx_filter_priority priority,
				 u32 filter_id)
{}

int efx_farch_filter_get_safe(struct efx_nic *efx,
			      enum efx_filter_priority priority,
			      u32 filter_id, struct efx_filter_spec *spec_buf)
{}

static void
efx_farch_filter_table_clear(struct efx_nic *efx,
			     enum efx_farch_filter_table_id table_id,
			     enum efx_filter_priority priority)
{}

int efx_farch_filter_clear_rx(struct efx_nic *efx,
			       enum efx_filter_priority priority)
{}

u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
				   enum efx_filter_priority priority)
{}

s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
				enum efx_filter_priority priority,
				u32 *buf, u32 size)
{}

/* Restore filter stater after reset */
void efx_farch_filter_table_restore(struct efx_nic *efx)
{}

void efx_farch_filter_table_remove(struct efx_nic *efx)
{}

int efx_farch_filter_table_probe(struct efx_nic *efx)
{}

/* Update scatter enable flags for filters pointing to our own RX queues */
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
{}

#ifdef CONFIG_RFS_ACCEL

bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
				     unsigned int index)
{}

#endif /* CONFIG_RFS_ACCEL */

void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
{}