// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ #include <linux/etherdevice.h> #include <net/ip6_checksum.h> #include <net/page_pool/helpers.h> #include <net/inet_ecn.h> #include <linux/iopoll.h> #include <linux/sctp.h> #include <linux/pci.h> #include <net/tcp.h> #include <net/ip.h> #include "wx_type.h" #include "wx_lib.h" #include "wx_hw.h" /* Lookup table mapping the HW PTYPE to the bit field for decoding */ static struct wx_dec_ptype wx_ptype_lookup[256] = …; struct wx_dec_ptype wx_decode_ptype(const u8 ptype) { … } EXPORT_SYMBOL(…); /* wx_test_staterr - tests bits in Rx descriptor status and error fields */ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, const u32 stat_err_bits) { … } static void wx_dma_sync_frag(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer) { … } static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff **skb, int *rx_buffer_pgcnt) { … } static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer, struct sk_buff *skb, int rx_buffer_pgcnt) { … } static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer, union wx_rx_desc *rx_desc) { … } static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, struct wx_rx_buffer *bi) { … } /** * wx_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) { … } u16 wx_desc_unused(struct wx_ring *ring) { … } /** * wx_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ static bool wx_is_non_eop(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } static void wx_pull_tail(struct sk_buff *skb) { … } /** * wx_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ static bool wx_cleanup_headers(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } static void wx_rx_hash(struct wx_ring *ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } /** * wx_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified **/ static void wx_rx_checksum(struct wx_ring *ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } /** * wx_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, protocol, and * other fields within the skb. **/ static void wx_process_skb_fields(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { … } /** * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * * Returns amount of work completed. **/ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, struct wx_ring *rx_ring, int budget) { … } static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) { … } /** * wx_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean * @napi_budget: Used to determine if we are in netpoll **/ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, struct wx_ring *tx_ring, int napi_budget) { … } /** * wx_poll - NAPI polling RX/TX cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean all queues associated with a q_vector. **/ static int wx_poll(struct napi_struct *napi, int budget) { … } static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) { … } static u32 wx_tx_cmd_type(u32 tx_flags) { … } static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { … } static void wx_tx_map(struct wx_ring *tx_ring, struct wx_tx_buffer *first, const u8 hdr_len) { … } static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) { … } static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) { … } network_header; static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) { … } static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 *hdr_len, u8 ptype) { … } static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 ptype) { … } static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, struct wx_ring *tx_ring) { … } netdev_tx_t wx_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { … } EXPORT_SYMBOL(…); void wx_napi_enable_all(struct wx *wx) { … } EXPORT_SYMBOL(…); void wx_napi_disable_all(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_set_rss_queues: Allocate queues for RSS * @wx: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. * **/ static void wx_set_rss_queues(struct wx *wx) { … } static void wx_set_num_queues(struct wx *wx) { … } /** * wx_acquire_msix_vectors - acquire MSI-X vectors * @wx: board private structure * * Attempts to acquire a suitable range of MSI-X vector interrupts. Will * return a negative error code if unable to acquire MSI-X vectors for any * reason. */ static int wx_acquire_msix_vectors(struct wx *wx) { … } /** * wx_set_interrupt_capability - set MSI-X or MSI if supported * @wx: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static int wx_set_interrupt_capability(struct wx *wx) { … } /** * wx_cache_ring_rss - Descriptor ring to register mapping for RSS * @wx: board private structure to initialize * * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. * **/ static void wx_cache_ring_rss(struct wx *wx) { … } static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) { … } /** * wx_alloc_q_vector - Allocate memory for a single interrupt vector * @wx: board private structure to initialize * @v_count: q_vectors allocated on wx, used for ring interleaving * @v_idx: index of vector in wx struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int wx_alloc_q_vector(struct wx *wx, unsigned int v_count, unsigned int v_idx, unsigned int txr_count, unsigned int txr_idx, unsigned int rxr_count, unsigned int rxr_idx) { … } /** * wx_free_q_vector - Free memory allocated for specific interrupt vector * @wx: board private structure to initialize * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void wx_free_q_vector(struct wx *wx, int v_idx) { … } /** * wx_alloc_q_vectors - Allocate memory for interrupt vectors * @wx: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int wx_alloc_q_vectors(struct wx *wx) { … } /** * wx_free_q_vectors - Free memory allocated for interrupt vectors * @wx: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void wx_free_q_vectors(struct wx *wx) { … } void wx_reset_interrupt_capability(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings * @wx: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ void wx_clear_interrupt_scheme(struct wx *wx) { … } EXPORT_SYMBOL(…); int wx_init_interrupt_scheme(struct wx *wx) { … } EXPORT_SYMBOL(…); irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) { … } EXPORT_SYMBOL(…); void wx_free_irq(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_setup_isb_resources - allocate interrupt status resources * @wx: board private structure * * Return 0 on success, negative on failure **/ int wx_setup_isb_resources(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_free_isb_resources - allocate all queues Rx resources * @wx: board private structure * * Return 0 on success, negative on failure **/ void wx_free_isb_resources(struct wx *wx) { … } EXPORT_SYMBOL(…); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) { … } EXPORT_SYMBOL(…); /** * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @wx: pointer to wx struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue * **/ static void wx_set_ivar(struct wx *wx, s8 direction, u16 queue, u16 msix_vector) { … } /** * wx_write_eitr - write EITR register in hardware specific way * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ void wx_write_eitr(struct wx_q_vector *q_vector) { … } /** * wx_configure_vectors - Configure vectors for hardware * @wx: board private structure * * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx * interrupts. **/ void wx_configure_vectors(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void wx_clean_rx_ring(struct wx_ring *rx_ring) { … } /** * wx_clean_all_rx_rings - Free Rx Buffers for all queues * @wx: board private structure **/ void wx_clean_all_rx_rings(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ static void wx_free_rx_resources(struct wx_ring *rx_ring) { … } /** * wx_free_all_rx_resources - Free Rx Resources for All Queues * @wx: pointer to hardware structure * * Free all receive software resources **/ static void wx_free_all_rx_resources(struct wx *wx) { … } /** * wx_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void wx_clean_tx_ring(struct wx_ring *tx_ring) { … } /** * wx_clean_all_tx_rings - Free Tx Buffers for all queues * @wx: board private structure **/ void wx_clean_all_tx_rings(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ static void wx_free_tx_resources(struct wx_ring *tx_ring) { … } /** * wx_free_all_tx_resources - Free Tx Resources for All Queues * @wx: pointer to hardware structure * * Free all transmit software resources **/ static void wx_free_all_tx_resources(struct wx *wx) { … } void wx_free_resources(struct wx *wx) { … } EXPORT_SYMBOL(…); static int wx_alloc_page_pool(struct wx_ring *rx_ring) { … } /** * wx_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ static int wx_setup_rx_resources(struct wx_ring *rx_ring) { … } /** * wx_setup_all_rx_resources - allocate all queues Rx resources * @wx: pointer to hardware structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int wx_setup_all_rx_resources(struct wx *wx) { … } /** * wx_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ static int wx_setup_tx_resources(struct wx_ring *tx_ring) { … } /** * wx_setup_all_tx_resources - allocate all queues Tx resources * @wx: pointer to private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int wx_setup_all_tx_resources(struct wx *wx) { … } int wx_setup_resources(struct wx *wx) { … } EXPORT_SYMBOL(…); /** * wx_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: storage space for 64bit statistics */ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { … } EXPORT_SYMBOL(…); int wx_set_features(struct net_device *netdev, netdev_features_t features) { … } EXPORT_SYMBOL(…); #define NETIF_VLAN_STRIPPING_FEATURES … #define NETIF_VLAN_INSERTION_FEATURES … #define NETIF_VLAN_FILTERING_FEATURES … netdev_features_t wx_fix_features(struct net_device *netdev, netdev_features_t features) { … } EXPORT_SYMBOL(…); void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring) { … } EXPORT_SYMBOL(…); MODULE_DESCRIPTION(…) …; MODULE_LICENSE(…) …;