linux/drivers/net/ethernet/intel/fm10k/fm10k_main.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */

#include <linux/types.h>
#include <linux/module.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <linux/if_macvlan.h>
#include <linux/prefetch.h>

#include "fm10k.h"

#define DRV_SUMMARY
char fm10k_driver_name[] =;
static const char fm10k_driver_string[] =;
static const char fm10k_copyright[] =;

MODULE_DESCRIPTION();
MODULE_LICENSE();

/* single workqueue for entire fm10k driver */
struct workqueue_struct *fm10k_workqueue;

/**
 * fm10k_init_module - Driver Registration Routine
 *
 * fm10k_init_module is the first routine called when the driver is
 * loaded.  All it does is register with the PCI subsystem.
 **/
static int __init fm10k_init_module(void)
{}
module_init();

/**
 * fm10k_exit_module - Driver Exit Cleanup Routine
 *
 * fm10k_exit_module is called just before the driver is removed
 * from memory.
 **/
static void __exit fm10k_exit_module(void)
{}
module_exit(fm10k_exit_module);

static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
				    struct fm10k_rx_buffer *bi)
{}

/**
 * fm10k_alloc_rx_buffers - Replace used receive buffers
 * @rx_ring: ring to place buffers on
 * @cleaned_count: number of buffers to replace
 **/
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
{}

/**
 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
 * @rx_ring: rx descriptor ring to store buffers on
 * @old_buff: donor buffer to have page reused
 *
 * Synchronizes page for reuse by the interface
 **/
static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
				struct fm10k_rx_buffer *old_buff)
{}

static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
				    struct page *page,
				    unsigned int __maybe_unused truesize)
{}

/**
 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
 * @rx_buffer: buffer containing page to add
 * @size: packet size from rx_desc
 * @rx_desc: descriptor containing length of buffer written by hardware
 * @skb: sk_buff to place the data into
 *
 * This function will add the data contained in rx_buffer->page to the skb.
 * This is done either through a direct copy if the data in the buffer is
 * less than the skb header size, otherwise it will just attach the page as
 * a frag to the skb.
 *
 * The function will then update the page offset if necessary and return
 * true if the buffer can be reused by the interface.
 **/
static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
			      unsigned int size,
			      union fm10k_rx_desc *rx_desc,
			      struct sk_buff *skb)
{}

static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
					     union fm10k_rx_desc *rx_desc,
					     struct sk_buff *skb)
{}

static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
				     union fm10k_rx_desc *rx_desc,
				     struct sk_buff *skb)
{}

#define FM10K_RSS_L4_TYPES_MASK

static inline void fm10k_rx_hash(struct fm10k_ring *ring,
				 union fm10k_rx_desc *rx_desc,
				 struct sk_buff *skb)
{}

static void fm10k_type_trans(struct fm10k_ring *rx_ring,
			     union fm10k_rx_desc __maybe_unused *rx_desc,
			     struct sk_buff *skb)
{}

/**
 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
 * @rx_ring: rx descriptor ring packet is being transacted on
 * @rx_desc: pointer to the EOP Rx descriptor
 * @skb: pointer to current skb being populated
 *
 * This function checks the ring, descriptor, and packet information in
 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
 * other fields within the skb.
 **/
static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
					     union fm10k_rx_desc *rx_desc,
					     struct sk_buff *skb)
{}

/**
 * fm10k_is_non_eop - process handling of non-EOP buffers
 * @rx_ring: Rx ring being processed
 * @rx_desc: Rx descriptor for current buffer
 *
 * This function updates next to clean.  If the buffer is an EOP buffer
 * this function exits returning false, otherwise it will place the
 * sk_buff in the next buffer to be chained and return true indicating
 * that this is in fact a non-EOP buffer.
 **/
static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
			     union fm10k_rx_desc *rx_desc)
{}

/**
 * fm10k_cleanup_headers - Correct corrupted or empty headers
 * @rx_ring: rx descriptor ring packet is being transacted on
 * @rx_desc: pointer to the EOP Rx descriptor
 * @skb: pointer to current skb being fixed
 *
 * Address the case where we are pulling data in on pages only
 * and as such no data is present in the skb header.
 *
 * In addition if skb is not at least 60 bytes we need to pad it so that
 * it is large enough to qualify as a valid Ethernet frame.
 *
 * Returns true if an error was encountered and skb was freed.
 **/
static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
				  union fm10k_rx_desc *rx_desc,
				  struct sk_buff *skb)
{}

/**
 * fm10k_receive_skb - helper function to handle rx indications
 * @q_vector: structure containing interrupt and ring information
 * @skb: packet to send up
 **/
static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
			      struct sk_buff *skb)
{}

static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
			      struct fm10k_ring *rx_ring,
			      int budget)
{}

#define VXLAN_HLEN
static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
{}

#define FM10K_NVGRE_RESERVED0_FLAGS
#define NVGRE_TNI
struct fm10k_nvgre_hdr {};

static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
{}

__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
{}

static int fm10k_tso(struct fm10k_ring *tx_ring,
		     struct fm10k_tx_buffer *first)
{}

static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
			  struct fm10k_tx_buffer *first)
{}

#define FM10K_SET_FLAG(_input, _flag, _result)

static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
{}

static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
			       struct fm10k_tx_desc *tx_desc, u16 i,
			       dma_addr_t dma, unsigned int size, u8 desc_flags)
{}

static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{}

static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{}

static void fm10k_tx_map(struct fm10k_ring *tx_ring,
			 struct fm10k_tx_buffer *first)
{}

netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
				  struct fm10k_ring *tx_ring)
{}

static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
{}

/**
 * fm10k_get_tx_pending - how many Tx descriptors not processed
 * @ring: the ring structure
 * @in_sw: is tx_pending being checked in SW or in HW?
 */
u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
{}

bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
{}

/**
 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
 * @interface: driver private struct
 **/
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
{}

/**
 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
 * @q_vector: structure containing interrupt and ring information
 * @tx_ring: tx ring to clean
 * @napi_budget: Used to determine if we are in netpoll
 **/
static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
			       struct fm10k_ring *tx_ring, int napi_budget)
{}

/**
 * fm10k_update_itr - update the dynamic ITR value based on packet size
 *
 *      Stores a new ITR value based on strictly on packet size.  The
 *      divisors and thresholds used by this function were determined based
 *      on theoretical maximum wire speed and testing data, in order to
 *      minimize response time while increasing bulk throughput.
 *
 * @ring_container: Container for rings to have ITR updated
 **/
static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
{}

static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
{}

static int fm10k_poll(struct napi_struct *napi, int budget)
{}

/**
 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
 * @interface: board private structure to initialize
 *
 * When QoS (Quality of Service) is enabled, allocate queues for
 * each traffic class.  If multiqueue isn't available,then abort QoS
 * initialization.
 *
 * This function handles all combinations of Qos and RSS.
 *
 **/
static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
{}

/**
 * fm10k_set_rss_queues: Allocate queues for RSS
 * @interface: board private structure to initialize
 *
 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 *
 **/
static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
{}

/**
 * fm10k_set_num_queues: Allocate queues for device, feature dependent
 * @interface: board private structure to initialize
 *
 * This is the top level queue allocation routine.  The order here is very
 * important, starting with the "most" number of features turned on at once,
 * and ending with the smallest set of features.  This way large combinations
 * can be allocated if they're turned on, and smaller combinations are the
 * fall through conditions.
 *
 **/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
{}

/**
 * fm10k_reset_num_queues - Reset the number of queues to zero
 * @interface: board private structure
 *
 * This function should be called whenever we need to reset the number of
 * queues after an error condition.
 */
static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
{}

/**
 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
 * @interface: board private structure to initialize
 * @v_count: q_vectors allocated on interface, used for ring interleaving
 * @v_idx: index of vector in interface struct
 * @txr_count: total number of Tx rings to allocate
 * @txr_idx: index of first Tx ring to allocate
 * @rxr_count: total number of Rx rings to allocate
 * @rxr_idx: index of first Rx ring to allocate
 *
 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 **/
static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
				unsigned int v_count, unsigned int v_idx,
				unsigned int txr_count, unsigned int txr_idx,
				unsigned int rxr_count, unsigned int rxr_idx)
{}

/**
 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
 * @interface: board private structure to initialize
 * @v_idx: Index of vector to be freed
 *
 * This function frees the memory allocated to the q_vector.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/
static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
{}

/**
 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
 * @interface: board private structure to initialize
 *
 * We allocate one q_vector per queue interrupt.  If allocation fails we
 * return -ENOMEM.
 **/
static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
{}

/**
 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
 * @interface: board private structure to initialize
 *
 * This function frees the memory allocated to the q_vectors.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/
static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
{}

/**
 * fm10k_reset_msix_capability - reset MSI-X capability
 * @interface: board private structure to initialize
 *
 * Reset the MSI-X capability back to its starting state
 **/
static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
{}

/**
 * fm10k_init_msix_capability - configure MSI-X capability
 * @interface: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
{}

/**
 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
 * @interface: Interface structure continaining rings and devices
 *
 * Cache the descriptor ring offsets for Qos
 **/
static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
{}

/**
 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
 * @interface: Interface structure continaining rings and devices
 *
 * Cache the descriptor ring offsets for RSS
 **/
static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
{}

/**
 * fm10k_assign_rings - Map rings to network devices
 * @interface: Interface structure containing rings and devices
 *
 * This function is meant to go though and configure both the network
 * devices so that they contain rings, and configure the rings so that
 * they function with their network devices.
 **/
static void fm10k_assign_rings(struct fm10k_intfc *interface)
{}

static void fm10k_init_reta(struct fm10k_intfc *interface)
{}

/**
 * fm10k_init_queueing_scheme - Determine proper queueing scheme
 * @interface: board private structure to initialize
 *
 * We determine which queueing scheme to use based on...
 * - Hardware queue count (num_*_queues)
 *   - defined by miscellaneous hardware support/features (RSS, etc.)
 **/
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
{}

/**
 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
 * @interface: board private structure to clear queueing scheme on
 *
 * We go through and clear queueing specific resources and reset the structure
 * to pre-load conditions
 **/
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
{}