linux/drivers/net/ethernet/sfc/falcon/farch.c

// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
 * Driver for Solarflare network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2006-2013 Solarflare Communications Inc.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/crc32.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"

/* Falcon-architecture (SFC4000) support */

/**************************************************************************
 *
 * Configurable values
 *
 **************************************************************************
 */

/* This is set to 16 for a good reason.  In summary, if larger than
 * 16, the descriptor cache holds more than a default socket
 * buffer's worth of packets (for UDP we can only have at most one
 * socket buffer's worth outstanding).  This combined with the fact
 * that we only get 1 TX event per descriptor cache means the NIC
 * goes idle.
 */
#define TX_DC_ENTRIES
#define TX_DC_ENTRIES_ORDER

#define RX_DC_ENTRIES
#define RX_DC_ENTRIES_ORDER

/* If EF4_MAX_INT_ERRORS internal errors occur within
 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
 * disable it.
 */
#define EF4_INT_ERROR_EXPIRE
#define EF4_MAX_INT_ERRORS

/* Depth of RX flush request fifo */
#define EF4_RX_FLUSH_COUNT

/* Driver generated events */
#define _EF4_CHANNEL_MAGIC_TEST
#define _EF4_CHANNEL_MAGIC_FILL
#define _EF4_CHANNEL_MAGIC_RX_DRAIN
#define _EF4_CHANNEL_MAGIC_TX_DRAIN

#define _EF4_CHANNEL_MAGIC(_code, _data)
#define _EF4_CHANNEL_MAGIC_CODE(_magic)

#define EF4_CHANNEL_MAGIC_TEST(_channel)
#define EF4_CHANNEL_MAGIC_FILL(_rx_queue)
#define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)
#define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)

static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);

/**************************************************************************
 *
 * Hardware access
 *
 **************************************************************************/

static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
				     unsigned int index)
{}

static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
				     const ef4_oword_t *mask)
{}

int ef4_farch_test_registers(struct ef4_nic *efx,
			     const struct ef4_farch_register_test *regs,
			     size_t n_regs)
{}

/**************************************************************************
 *
 * Special buffer handling
 * Special buffers are used for event queues and the TX and RX
 * descriptor rings.
 *
 *************************************************************************/

/*
 * Initialise a special buffer
 *
 * This will define a buffer (previously allocated via
 * ef4_alloc_special_buffer()) in the buffer table, allowing
 * it to be used for event queues, descriptor rings etc.
 */
static void
ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
{}

/* Unmaps a buffer and clears the buffer table entries */
static void
ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
{}

/*
 * Allocate a new special buffer
 *
 * This allocates memory for a new buffer, clears it and allocates a
 * new buffer ID range.  It does not write into the buffer table.
 *
 * This call will allocate 4KB buffers, since 8KB buffers can't be
 * used for event queues and descriptor rings.
 */
static int ef4_alloc_special_buffer(struct ef4_nic *efx,
				    struct ef4_special_buffer *buffer,
				    unsigned int len)
{}

static void
ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
{}

/**************************************************************************
 *
 * TX path
 *
 **************************************************************************/

/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
{}

/* Write pointer and first descriptor for TX descriptor ring */
static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
					  const ef4_qword_t *txd)
{}


/* For each entry inserted into the software descriptor ring, create a
 * descriptor in the hardware TX descriptor ring (in host memory), and
 * write a doorbell.
 */
void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
{}

unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
				    dma_addr_t dma_addr, unsigned int len)
{}


/* Allocate hardware resources for a TX queue */
int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
{}

void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
{}

static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
{}

void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
{}

/* Free buffers backing TX queue */
void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
{}

/**************************************************************************
 *
 * RX path
 *
 **************************************************************************/

/* This creates an entry in the RX descriptor queue */
static inline void
ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
{}

/* This writes to the RX_DESC_WPTR register for the specified receive
 * descriptor ring.
 */
void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
{}

int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
{}

void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
{}

static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
{}

void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
{}

/* Free buffers backing RX queue */
void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
{}

/**************************************************************************
 *
 * Flush handling
 *
 **************************************************************************/

/* ef4_farch_flush_queues() must be woken up when all flushes are completed,
 * or more RX flushes can be kicked off.
 */
static bool ef4_farch_flush_wake(struct ef4_nic *efx)
{}

static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
{}

/* Flush all the transmit queues, and continue flushing receive queues until
 * they're all flushed. Wait for the DRAIN events to be received so that there
 * are no more RX and TX events left on any channel. */
static int ef4_farch_do_flush(struct ef4_nic *efx)
{}

int ef4_farch_fini_dmaq(struct ef4_nic *efx)
{}

/* Reset queue and flush accounting after FLR
 *
 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
 * mastering was disabled), in which case we don't receive (RXQ) flush
 * completion events.  This means that efx->rxq_flush_outstanding remained at 4
 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
 * events were received, and we didn't go through ef4_check_tx_flush_complete())
 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
 * for batched flush requests; and the efx->active_queues gets messed up because
 * we keep incrementing for the newly initialised queues, but it never went to
 * zero previously.  Then we get a timeout every time we try to restart the
 * queues, as it doesn't go back to zero when we should be flushing the queues.
 */
void ef4_farch_finish_flr(struct ef4_nic *efx)
{}


/**************************************************************************
 *
 * Event queue processing
 * Event queues are processed by per-channel tasklets.
 *
 **************************************************************************/

/* Update a channel's event queue's read pointer (RPTR) register
 *
 * This writes the EVQ_RPTR_REG register for the specified channel's
 * event queue.
 */
void ef4_farch_ev_read_ack(struct ef4_channel *channel)
{}

/* Use HW to insert a SW defined event */
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
			      ef4_qword_t *event)
{}

static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
{}

/* Handle a transmit completion event
 *
 * The NIC batches TX completion events; the message we receive is of
 * the form "complete all TX events up to this index".
 */
static int
ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
{}

/* Detect errors included in the rx_evt_pkt_ok bit. */
static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
				      const ef4_qword_t *event)
{}

/* Handle receive events that are not in-order. Return true if this
 * can be handled as a partial packet discard, false if it's more
 * serious.
 */
static bool
ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
{}

/* Handle a packet received event
 *
 * The NIC gives a "discard" flag if it's a unicast packet with the
 * wrong destination address
 * Also "is multicast" and "matches multicast filter" flags can be used to
 * discard non-matching multicast packets.
 */
static void
ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
{}

/* If this flush done event corresponds to a &struct ef4_tx_queue, then
 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
 * of all transmit completions.
 */
static void
ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
{}

/* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
 * the RX queue back to the mask of RX queues in need of flushing.
 */
static void
ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
{}

static void
ef4_farch_handle_drain_event(struct ef4_channel *channel)
{}

static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
					     ef4_qword_t *event)
{}

static void
ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
{}

int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
{}

/* Allocate buffer table entries for event queue */
int ef4_farch_ev_probe(struct ef4_channel *channel)
{}

int ef4_farch_ev_init(struct ef4_channel *channel)
{}

void ef4_farch_ev_fini(struct ef4_channel *channel)
{}

/* Free buffers backing event queue */
void ef4_farch_ev_remove(struct ef4_channel *channel)
{}


void ef4_farch_ev_test_generate(struct ef4_channel *channel)
{}

void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
{}

/**************************************************************************
 *
 * Hardware interrupts
 * The hardware interrupt handler does very little work; all the event
 * queue processing is carried out by per-channel tasklets.
 *
 **************************************************************************/

/* Enable/disable/generate interrupts */
static inline void ef4_farch_interrupts(struct ef4_nic *efx,
				      bool enabled, bool force)
{}

void ef4_farch_irq_enable_master(struct ef4_nic *efx)
{}

void ef4_farch_irq_disable_master(struct ef4_nic *efx)
{}

/* Generate a test interrupt
 * Interrupt must already have been enabled, otherwise nasty things
 * may happen.
 */
int ef4_farch_irq_test_generate(struct ef4_nic *efx)
{}

/* Process a fatal interrupt
 * Disable bus mastering ASAP and schedule a reset
 */
irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
{}

/* Handle a legacy interrupt
 * Acknowledges the interrupt and schedule event queue processing.
 */
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{}

/* Handle an MSI interrupt
 *
 * Handle an MSI hardware interrupt.  This routine schedules event
 * queue processing.  No interrupt acknowledgement cycle is necessary.
 * Also, we never need to check that the interrupt is for us, since
 * MSI interrupts cannot be shared.
 */
irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
{}

/* Setup RSS indirection table.
 * This maps from the hash value of the packet to RXQ
 */
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
{}

/* Looks at available SRAM resources and works out how many queues we
 * can support, and where things like descriptor caches should live.
 *
 * SRAM is split up as follows:
 * 0                          buftbl entries for channels
 * efx->vf_buftbl_base        buftbl entries for SR-IOV
 * efx->rx_dc_base            RX descriptor caches
 * efx->tx_dc_base            TX descriptor caches
 */
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{}

u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{}

void ef4_farch_init_common(struct ef4_nic *efx)
{}

/**************************************************************************
 *
 * Filter tables
 *
 **************************************************************************
 */

/* "Fudge factors" - difference between programmed value and actual depth.
 * Due to pipelined implementation we need to program H/W with a value that
 * is larger than the hop limit we want.
 */
#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL

/* Hard maximum search limit.  Hardware will time-out beyond 200-something.
 * We also need to avoid infinite loops in ef4_farch_filter_search() when the
 * table is full.
 */
#define EF4_FARCH_FILTER_CTL_SRCH_MAX

/* Don't try very hard to find space for performance hints, as this is
 * counter-productive. */
#define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX

enum ef4_farch_filter_type {};

enum ef4_farch_filter_table_id {};

enum ef4_farch_filter_index {};

struct ef4_farch_filter_spec {};

struct ef4_farch_filter_table {};

struct ef4_farch_filter_state {};

static void
ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
				   struct ef4_farch_filter_table *table,
				   unsigned int filter_idx);

/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
 * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
static u16 ef4_farch_filter_hash(u32 key)
{}

/* To allow for hash collisions, filter search continues at these
 * increments from the first possible entry selected by the hash. */
static u16 ef4_farch_filter_increment(u32 key)
{}

static enum ef4_farch_filter_table_id
ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
{}

static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
{}

static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
{}

static int
ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
			       const struct ef4_filter_spec *gen_spec)
{}

static void
ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
			     const struct ef4_farch_filter_spec *spec)
{}

static void
ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
			      struct ef4_farch_filter_spec *spec)
{}

/* Build a filter entry and return its n-tuple key. */
static u32 ef4_farch_filter_build(ef4_oword_t *filter,
				  struct ef4_farch_filter_spec *spec)
{}

static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
				   const struct ef4_farch_filter_spec *right)
{}

/*
 * Construct/deconstruct external filter IDs.  At least the RX filter
 * IDs must be ordered by matching priority, for RX NFC semantics.
 *
 * Deconstruction needs to be robust against invalid IDs so that
 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
 * accept user-provided IDs.
 */

#define EF4_FARCH_FILTER_MATCH_PRI_COUNT

static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] =;

static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] =;

#define EF4_FARCH_FILTER_INDEX_WIDTH
#define EF4_FARCH_FILTER_INDEX_MASK

static inline u32
ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
			 unsigned int index)
{}

static inline enum ef4_farch_filter_table_id
ef4_farch_filter_id_table_id(u32 id)
{}

static inline unsigned int ef4_farch_filter_id_index(u32 id)
{}

u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
{}

s32 ef4_farch_filter_insert(struct ef4_nic *efx,
			    struct ef4_filter_spec *gen_spec,
			    bool replace_equal)
{}

static void
ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
				   struct ef4_farch_filter_table *table,
				   unsigned int filter_idx)
{}

static int ef4_farch_filter_remove(struct ef4_nic *efx,
				   struct ef4_farch_filter_table *table,
				   unsigned int filter_idx,
				   enum ef4_filter_priority priority)
{}

int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
				 enum ef4_filter_priority priority,
				 u32 filter_id)
{}

int ef4_farch_filter_get_safe(struct ef4_nic *efx,
			      enum ef4_filter_priority priority,
			      u32 filter_id, struct ef4_filter_spec *spec_buf)
{}

static void
ef4_farch_filter_table_clear(struct ef4_nic *efx,
			     enum ef4_farch_filter_table_id table_id,
			     enum ef4_filter_priority priority)
{}

int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
			       enum ef4_filter_priority priority)
{}

u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
				   enum ef4_filter_priority priority)
{}

s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
				enum ef4_filter_priority priority,
				u32 *buf, u32 size)
{}

/* Restore filter stater after reset */
void ef4_farch_filter_table_restore(struct ef4_nic *efx)
{}

void ef4_farch_filter_table_remove(struct ef4_nic *efx)
{}

int ef4_farch_filter_table_probe(struct ef4_nic *efx)
{}

/* Update scatter enable flags for filters pointing to our own RX queues */
void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
{}

#ifdef CONFIG_RFS_ACCEL

s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
				struct ef4_filter_spec *gen_spec)
{}

bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
				     unsigned int index)
{}

#endif /* CONFIG_RFS_ACCEL */

void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
{}