linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
 *
 * Copyright (C) 2014 Marvell
 *
 * Marcin Wojtas <[email protected]>
 */

#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/ptp_classify.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/regmap.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bpf_trace.h>

#include "mvpp2.h"
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"

enum mvpp2_bm_pool_log_num {};

static struct {} mvpp2_pools[MVPP2_BM_POOLS_NUM];

/* The prototype is added here to be used in start_dev when using ACPI. This
 * will be removed once phylink is used for all modes (dt+ACPI).
 */
static void mvpp2_acpi_start(struct mvpp2_port *port);

/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE
#define MVPP2_QDIST_MULTI_MODE

static int queue_mode =;

module_param(queue_mode, int, 0444);
MODULE_PARM_DESC();

/* Utility/helper methods */

void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
{}

u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
{}

static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
{}

static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
{}

static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
{}

static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
{}

static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
		       enum dma_data_direction dma_dir)
{}

/* These accessors should be used to access:
 *
 * - per-thread registers, where each thread has its own copy of the
 *   register.
 *
 *   MVPP2_BM_VIRT_ALLOC_REG
 *   MVPP2_BM_ADDR_HIGH_ALLOC
 *   MVPP22_BM_ADDR_HIGH_RLS_REG
 *   MVPP2_BM_VIRT_RLS_REG
 *   MVPP2_ISR_RX_TX_CAUSE_REG
 *   MVPP2_ISR_RX_TX_MASK_REG
 *   MVPP2_TXQ_NUM_REG
 *   MVPP2_AGGR_TXQ_UPDATE_REG
 *   MVPP2_TXQ_RSVD_REQ_REG
 *   MVPP2_TXQ_RSVD_RSLT_REG
 *   MVPP2_TXQ_SENT_REG
 *   MVPP2_RXQ_NUM_REG
 *
 * - global registers that must be accessed through a specific thread
 *   window, because they are related to an access to a per-thread
 *   register
 *
 *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
 *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
 *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
 *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
 *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
 *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
 */
static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
			       u32 offset, u32 data)
{}

static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
			     u32 offset)
{}

static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
				       u32 offset, u32 data)
{}

static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
				     u32 offset)
{}

static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
					    struct mvpp2_tx_desc *tx_desc)
{}

static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
				      struct mvpp2_tx_desc *tx_desc,
				      dma_addr_t dma_addr)
{}

static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
				    struct mvpp2_tx_desc *tx_desc)
{}

static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
				  struct mvpp2_tx_desc *tx_desc,
				  size_t size)
{}

static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
				 struct mvpp2_tx_desc *tx_desc,
				 unsigned int txq)
{}

static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
				 struct mvpp2_tx_desc *tx_desc,
				 unsigned int command)
{}

static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
					    struct mvpp2_tx_desc *tx_desc)
{}

static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
					    struct mvpp2_rx_desc *rx_desc)
{}

static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
					     struct mvpp2_rx_desc *rx_desc)
{}

static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
				    struct mvpp2_rx_desc *rx_desc)
{}

static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
				   struct mvpp2_rx_desc *rx_desc)
{}

static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
{}

static void mvpp2_txq_inc_put(struct mvpp2_port *port,
			      struct mvpp2_txq_pcpu *txq_pcpu,
			      void *data,
			      struct mvpp2_tx_desc *tx_desc,
			      enum mvpp2_tx_buf_type buf_type)
{}

/* Get number of maximum RXQ */
static int mvpp2_get_nrxqs(struct mvpp2 *priv)
{}

/* Get number of physical egress port */
static inline int mvpp2_egress_port(struct mvpp2_port *port)
{}

/* Get number of physical TXQ */
static inline int mvpp2_txq_phys(int port, int txq)
{}

/* Returns a struct page if page_pool is set, otherwise a buffer */
static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
			      struct page_pool *page_pool)
{}

static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
			    struct page_pool *page_pool, void *data)
{}

/* Buffer Manager configuration routines */

/* Create pool */
static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
				struct mvpp2_bm_pool *bm_pool, int size)
{}

/* Set pool buffer size */
static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
				      struct mvpp2_bm_pool *bm_pool,
				      int buf_size)
{}

static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
				    struct mvpp2_bm_pool *bm_pool,
				    dma_addr_t *dma_addr,
				    phys_addr_t *phys_addr)
{}

/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
			       struct mvpp2_bm_pool *bm_pool, int buf_num)
{}

/* Check number of buffers in BM pool */
static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
{}

/* Cleanup pool */
static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
				 struct mvpp2_bm_pool *bm_pool)
{}

static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
{}

/* Routine enable PPv23 8 pool mode */
static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
{}

/* Cleanup pool before actual initialization in the OS */
static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
{}

static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{}

static void mvpp2_setup_bm_pool(void)
{}

/* Attach long pool to rxq */
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
				    int lrxq, int long_pool)
{}

/* Attach short pool to rxq */
static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
				     int lrxq, int short_pool)
{}

static void *mvpp2_buf_alloc(struct mvpp2_port *port,
			     struct mvpp2_bm_pool *bm_pool,
			     struct page_pool *page_pool,
			     dma_addr_t *buf_dma_addr,
			     phys_addr_t *buf_phys_addr,
			     gfp_t gfp_mask)
{}

/* Routine enable flow control for RXQs condition */
static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
{}

/* Routine disable flow control for RXQs condition */
static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
{}

/* Routine disable/enable flow control for BM pool condition */
static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
				    struct mvpp2_bm_pool *pool,
				    bool en)
{}

/* disable/enable flow control for BM pool on all ports */
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{}

static int mvpp2_enable_global_fc(struct mvpp2 *priv)
{}

/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
				     dma_addr_t buf_dma_addr,
				     phys_addr_t buf_phys_addr)
{}

/* Allocate buffers for the pool */
static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
			     struct mvpp2_bm_pool *bm_pool, int buf_num)
{}

/* Notify the driver that BM pool is being used as specific type and return the
 * pool pointer on success
 */
static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
{}

static struct mvpp2_bm_pool *
mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
			 unsigned int pool, int pkt_size)
{}

/* Initialize pools for swf, shared buffers variant */
static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
{}

/* Initialize pools for swf, percpu buffers variant */
static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
{}

static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{}

static void mvpp2_set_hw_csum(struct mvpp2_port *port,
			      enum mvpp2_bm_pool_log_num new_long_pool)
{}

static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{}

static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
{}

static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
{}

static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
{}

static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
{}

/* Mask the current thread's Rx/Tx interrupts
 * Called by on_each_cpu(), guaranteed to run with migration disabled,
 * using smp_processor_id() is OK.
 */
static void mvpp2_interrupts_mask(void *arg)
{}

/* Unmask the current thread's Rx/Tx interrupts.
 * Called by on_each_cpu(), guaranteed to run with migration disabled,
 * using smp_processor_id() is OK.
 */
static void mvpp2_interrupts_unmask(void *arg)
{}

static void
mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
{}

/* Only GOP port 0 has an XLG MAC */
static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
{}

static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
{}

/* Port configuration routines */
static bool mvpp2_is_xlg(phy_interface_t interface)
{}

static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
{}

static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
{}

static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
{}

static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
{}

static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
{}

static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
{}

/* Set Flow Control timer x100 faster than pause quanta to ensure that link
 * partner won't send traffic if port is in XOFF mode.
 */
static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
{}

static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
{}

static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
{}

static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
{}

static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{}

/* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
 *
 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
 * differ.
 *
 * The COMPHY configures the serdes lanes regardless of the actual use of the
 * lanes by the physical layer. This is why configurations like
 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
 */
static int mvpp22_comphy_init(struct mvpp2_port *port,
			      phy_interface_t interface)
{}

static void mvpp2_port_enable(struct mvpp2_port *port)
{}

static void mvpp2_port_disable(struct mvpp2_port *port)
{}

/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
{}

/* Configure loopback port */
static void mvpp2_port_loopback_set(struct mvpp2_port *port,
				    const struct phylink_link_state *state)
{}

enum {};

struct mvpp2_ethtool_counter {};

static u64 mvpp2_read_count(struct mvpp2_port *port,
			    const struct mvpp2_ethtool_counter *counter)
{}

/* Some counters are accessed indirectly by first writing an index to
 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
 * register we access, it can be a hit counter for some classification tables,
 * a counter specific to a rxq, a txq or a buffer pool.
 */
static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
{}

/* Due to the fact that software statistics and hardware statistics are, by
 * design, incremented at different moments in the chain of packet processing,
 * it is very likely that incoming packets could have been dropped after being
 * counted by hardware but before reaching software statistics (most probably
 * multicast packets), and in the opposite way, during transmission, FCS bytes
 * are added in between as well as TSO skb will be split and header bytes added.
 * Hence, statistics gathered from userspace with ifconfig (software) and
 * ethtool (hardware) cannot be compared.
 */
static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] =;

static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] =;

static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] =;

static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] =;

static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] =;

#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)

static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
				      u8 *data)
{}

static void
mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
{}

static void mvpp2_read_stats(struct mvpp2_port *port)
{}

static void mvpp2_gather_hw_statistics(struct work_struct *work)
{}

static void mvpp2_ethtool_get_stats(struct net_device *dev,
				    struct ethtool_stats *stats, u64 *data)
{}

static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
{}

static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
{}

static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
{}

static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
				      phy_interface_t interface)
{}

/* Change maximum receive size of the port */
static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
{}

/* Change maximum receive size of the port */
static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
{}

/* Set defaults to the MVPP2 port */
static void mvpp2_defaults_set(struct mvpp2_port *port)
{}

/* Enable/disable receiving packets */
static void mvpp2_ingress_enable(struct mvpp2_port *port)
{}

static void mvpp2_ingress_disable(struct mvpp2_port *port)
{}

/* Enable transmit via physical egress queue
 * - HW starts take descriptors from DRAM
 */
static void mvpp2_egress_enable(struct mvpp2_port *port)
{}

/* Disable transmit via physical egress queue
 * - HW doesn't take descriptors from DRAM
 */
static void mvpp2_egress_disable(struct mvpp2_port *port)
{}

/* Rx descriptors helper methods */

/* Get number of Rx descriptors occupied by received packets */
static inline int
mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
{}

/* Update Rx queue status with the number of occupied and available
 * Rx descriptor slots.
 */
static inline void
mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
			int used_count, int free_count)
{}

/* Get pointer to next RX descriptor to be processed by SW */
static inline struct mvpp2_rx_desc *
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
{}

/* Set rx queue offset */
static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
				 int prxq, int offset)
{}

/* Tx descriptors helper methods */

/* Get pointer to next Tx descriptor to be processed (send) by HW */
static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
{}

/* Update HW with number of aggregated Tx descriptors to be sent
 *
 * Called only from mvpp2_tx(), so migration is disabled, using
 * smp_processor_id() is OK.
 */
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{}

/* Check if there are enough free descriptors in aggregated txq.
 * If not, update the number of occupied descriptors and repeat the check.
 *
 * Called only from mvpp2_tx(), so migration is disabled, using
 * smp_processor_id() is OK.
 */
static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
				     struct mvpp2_tx_queue *aggr_txq, int num)
{}

/* Reserved Tx descriptors allocation request
 *
 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
 * only by mvpp2_tx(), so migration is disabled, using
 * smp_processor_id() is OK.
 */
static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
					 struct mvpp2_tx_queue *txq, int num)
{}

/* Check if there are enough reserved descriptors for transmission.
 * If not, request chunk of reserved descriptors and check again.
 */
static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
					    struct mvpp2_tx_queue *txq,
					    struct mvpp2_txq_pcpu *txq_pcpu,
					    int num)
{}

/* Release the last allocated Tx descriptor. Useful to handle DMA
 * mapping failures in the Tx path.
 */
static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
{}

/* Set Tx descriptors fields relevant for CSUM calculation */
static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
			       int ip_hdr_len, int l4_proto)
{}

/* Get number of sent descriptors and decrement counter.
 * The number of sent descriptors is returned.
 * Per-thread access
 *
 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
 * (migration disabled) and from the TX completion tasklet (migration
 * disabled) so using smp_processor_id() is OK.
 */
static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
					   struct mvpp2_tx_queue *txq)
{}

/* Called through on_each_cpu(), so runs on all CPUs, with migration
 * disabled, therefore using smp_processor_id() is OK.
 */
static void mvpp2_txq_sent_counter_clear(void *arg)
{}

/* Set max sizes for Tx queues */
static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
{}

/* Set the number of non-occupied descriptors threshold */
static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
				     struct mvpp2_rx_queue *rxq)
{}

/* Set the number of packets that will be received before Rx interrupt
 * will be generated by HW.
 */
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
				   struct mvpp2_rx_queue *rxq)
{}

/* For some reason in the LSP this is done on each CPU. Why ? */
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
				   struct mvpp2_tx_queue *txq)
{}

static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
{}

static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
{}

/* Set the time delay in usec before Rx interrupt */
static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
				   struct mvpp2_rx_queue *rxq)
{}

static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
{}

/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
				struct mvpp2_tx_queue *txq,
				struct mvpp2_txq_pcpu *txq_pcpu, int num)
{}

static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
							u32 cause)
{}

static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
							u32 cause)
{}

/* Handle end of transmission */
static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
			   struct mvpp2_txq_pcpu *txq_pcpu)
{}

static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
				  unsigned int thread)
{}

/* Rx/Tx queue initialization/cleanup methods */

/* Allocate and initialize descriptors for aggr TXQ */
static int mvpp2_aggr_txq_init(struct platform_device *pdev,
			       struct mvpp2_tx_queue *aggr_txq,
			       unsigned int thread, struct mvpp2 *priv)
{}

/* Create a specified Rx queue */
static int mvpp2_rxq_init(struct mvpp2_port *port,
			  struct mvpp2_rx_queue *rxq)
{}

/* Push packets received by the RXQ to BM pool */
static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
				struct mvpp2_rx_queue *rxq)
{}

/* Cleanup Rx queue */
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
			     struct mvpp2_rx_queue *rxq)
{}

/* Create and initialize a Tx queue */
static int mvpp2_txq_init(struct mvpp2_port *port,
			  struct mvpp2_tx_queue *txq)
{}

/* Free allocated TXQ resources */
static void mvpp2_txq_deinit(struct mvpp2_port *port,
			     struct mvpp2_tx_queue *txq)
{}

/* Cleanup Tx ports */
static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
{}

/* Cleanup all Tx queues */
static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
{}

/* Cleanup all Rx queues */
static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
{}

/* Init all Rx queues for port */
static int mvpp2_setup_rxqs(struct mvpp2_port *port)
{}

/* Init all tx queues for port */
static int mvpp2_setup_txqs(struct mvpp2_port *port)
{}

/* The callback for per-port interrupt */
static irqreturn_t mvpp2_isr(int irq, void *dev_id)
{}

static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
{}

static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
{}

static void mvpp2_isr_handle_link(struct mvpp2_port *port,
				  struct phylink_pcs *pcs, bool link)
{}

static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
{}

static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
{}

/* Per-port interrupt for link status changes */
static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
{}

static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{}

/* Main RX/TX processing routines */

/* Display more error info */
static void mvpp2_rx_error(struct mvpp2_port *port,
			   struct mvpp2_rx_desc *rx_desc)
{}

/* Handle RX checksum offload */
static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
{}

/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
			   struct mvpp2_bm_pool *bm_pool,
			   struct page_pool *page_pool, int pool)
{}

/* Handle tx checksum */
static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
{}

static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
{}

static int
mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
		       struct xdp_frame *xdpf, bool dma_map)
{}

static int
mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
{}

static int
mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
	       struct xdp_frame **frames, u32 flags)
{}

static int
mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
	      struct xdp_buff *xdp, struct page_pool *pp,
	      struct mvpp2_pcpu_stats *stats)
{}

static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
				    int pool, u32 rx_status)
{}

/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
		    int rx_todo, struct mvpp2_rx_queue *rxq)
{}

static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
		  struct mvpp2_tx_desc *desc)
{}

static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
				   struct mvpp2_tx_desc *desc)
{}

static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
			       struct mvpp2_tx_desc *tx_desc,
			       struct sk_buff *skb)
{}

/* Handle tx fragmentation processing */
static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
				 struct mvpp2_tx_queue *aggr_txq,
				 struct mvpp2_tx_queue *txq)
{}

static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
				     struct net_device *dev,
				     struct mvpp2_tx_queue *txq,
				     struct mvpp2_tx_queue *aggr_txq,
				     struct mvpp2_txq_pcpu *txq_pcpu,
				     int hdr_sz)
{}

static inline int mvpp2_tso_put_data(struct sk_buff *skb,
				     struct net_device *dev, struct tso_t *tso,
				     struct mvpp2_tx_queue *txq,
				     struct mvpp2_tx_queue *aggr_txq,
				     struct mvpp2_txq_pcpu *txq_pcpu,
				     int sz, bool left, bool last)
{}

static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
			struct mvpp2_tx_queue *txq,
			struct mvpp2_tx_queue *aggr_txq,
			struct mvpp2_txq_pcpu *txq_pcpu)
{}

/* Main tx processing */
static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
{}

static inline void mvpp2_cause_error(struct net_device *dev, int cause)
{}

static int mvpp2_poll(struct napi_struct *napi, int budget)
{}

static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
				    phy_interface_t interface)
{}

/* Set hw internals when starting port */
static void mvpp2_start_dev(struct mvpp2_port *port)
{}

/* Set hw internals when stopping port */
static void mvpp2_stop_dev(struct mvpp2_port *port)
{}

static int mvpp2_check_ringparam_valid(struct net_device *dev,
				       struct ethtool_ringparam *ring)
{}

static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
{}

static int mvpp2_irqs_init(struct mvpp2_port *port)
{}

static void mvpp2_irqs_deinit(struct mvpp2_port *port)
{}

static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{}

static int mvpp2_open(struct net_device *dev)
{}

static int mvpp2_stop(struct net_device *dev)
{}

static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
					struct netdev_hw_addr_list *list)
{}

static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
{}

static void mvpp2_set_rx_mode(struct net_device *dev)
{}

static int mvpp2_set_mac_address(struct net_device *dev, void *p)
{}

/* Shut down all the ports, reconfigure the pools as percpu or shared,
 * then bring up again all ports.
 */
static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
{}

static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{}

static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
{}

static void
mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{}

static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
{}

static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
{}

static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
				     struct kernel_ethtool_ts_info *info)
{}

static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{}

static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{}

static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{}

static int mvpp2_set_features(struct net_device *dev,
			      netdev_features_t features)
{}

static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
{}

static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

/* Ethtool methods */

static int mvpp2_ethtool_nway_reset(struct net_device *dev)
{}

/* Set interrupt coalescing for ethtools */
static int
mvpp2_ethtool_set_coalesce(struct net_device *dev,
			   struct ethtool_coalesce *c,
			   struct kernel_ethtool_coalesce *kernel_coal,
			   struct netlink_ext_ack *extack)
{}

/* get coalescing for ethtools */
static int
mvpp2_ethtool_get_coalesce(struct net_device *dev,
			   struct ethtool_coalesce *c,
			   struct kernel_ethtool_coalesce *kernel_coal,
			   struct netlink_ext_ack *extack)
{}

static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
				      struct ethtool_drvinfo *drvinfo)
{}

static void
mvpp2_ethtool_get_ringparam(struct net_device *dev,
			    struct ethtool_ringparam *ring,
			    struct kernel_ethtool_ringparam *kernel_ring,
			    struct netlink_ext_ack *extack)
{}

static int
mvpp2_ethtool_set_ringparam(struct net_device *dev,
			    struct ethtool_ringparam *ring,
			    struct kernel_ethtool_ringparam *kernel_ring,
			    struct netlink_ext_ack *extack)
{}

static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
					  struct ethtool_pauseparam *pause)
{}

static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
					 struct ethtool_pauseparam *pause)
{}

static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
					    struct ethtool_link_ksettings *cmd)
{}

static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
					    const struct ethtool_link_ksettings *cmd)
{}

static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
				   struct ethtool_rxnfc *info, u32 *rules)
{}

static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
				   struct ethtool_rxnfc *info)
{}

static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{}

static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
				  struct ethtool_rxfh_param *rxfh)
{}

static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
				    const struct ethtool_rxfh_param *rxfh)
{}

static int mvpp2_create_rxfh_context(struct net_device *dev,
				     struct ethtool_rxfh_context *ctx,
				     const struct ethtool_rxfh_param *rxfh,
				     struct netlink_ext_ack *extack)
{}

static int mvpp2_modify_rxfh_context(struct net_device *dev,
				     struct ethtool_rxfh_context *ctx,
				     const struct ethtool_rxfh_param *rxfh,
				     struct netlink_ext_ack *extack)
{}

static int mvpp2_remove_rxfh_context(struct net_device *dev,
				     struct ethtool_rxfh_context *ctx,
				     u32 rss_context,
				     struct netlink_ext_ack *extack)
{}

static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
				  struct ethtool_rxfh_param *rxfh,
				  struct netlink_ext_ack *extack)
{}

/* Device ops */

static const struct net_device_ops mvpp2_netdev_ops =;

static const struct ethtool_ops mvpp2_eth_tool_ops =;

/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
 * had a single IRQ defined per-port.
 */
static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
					   struct device_node *port_node)
{}

static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
					  struct device_node *port_node)
{}

static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
				    struct device_node *port_node)
{}

static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
{}

/* Configure Rx queue group interrupt for this port */
static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
{}

/* Initialize port HW */
static int mvpp2_port_init(struct mvpp2_port *port)
{}

static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
					   unsigned long *flags)
{}

/* Checks if the port dt description has the required Tx interrupts:
 * - PPv2.1: there are no such interrupts.
 * - PPv2.2 and PPv2.3:
 *   - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
 *   - The new ones have: "hifX" with X in [0..8]
 *
 * All those variants are supported to keep the backward compatibility.
 */
static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
				struct device_node *port_node,
				unsigned long *flags)
{}

static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
				    struct fwnode_handle *fwnode,
				    char **mac_from)
{}

static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
{}

static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
{}

static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
{}

static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
				    struct phylink_link_state *state)
{}

static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
				phy_interface_t interface,
				const unsigned long *advertising,
				bool permit_pause_to_mac)
{}

static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops =;

static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
				   unsigned long *supported,
				   const struct phylink_link_state *state)
{}

static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
				     struct phylink_link_state *state)
{}

static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
				 phy_interface_t interface,
				 const unsigned long *advertising,
				 bool permit_pause_to_mac)
{}

static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{}

static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops =;

static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
			     const struct phylink_link_state *state)
{}

static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
			      const struct phylink_link_state *state)
{}

static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
					    phy_interface_t interface)
{}

static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
			     phy_interface_t interface)
{}

static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
			     const struct phylink_link_state *state)
{}

static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
			    phy_interface_t interface)
{}

static void mvpp2_mac_link_up(struct phylink_config *config,
			      struct phy_device *phy,
			      unsigned int mode, phy_interface_t interface,
			      int speed, int duplex,
			      bool tx_pause, bool rx_pause)
{}

static void mvpp2_mac_link_down(struct phylink_config *config,
				unsigned int mode, phy_interface_t interface)
{}

static const struct phylink_mac_ops mvpp2_phylink_ops =;

/* Work-around for ACPI */
static void mvpp2_acpi_start(struct mvpp2_port *port)
{}

/* In order to ensure backward compatibility for ACPI, check if the port
 * firmware node comprises the necessary description allowing to use phylink.
 */
static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
{}

/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
			    struct fwnode_handle *port_fwnode,
			    struct mvpp2 *priv)
{}

/* Ports removal routine */
static void mvpp2_port_remove(struct mvpp2_port *port)
{}

/* Initialize decoding windows */
static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
				    struct mvpp2 *priv)
{}

/* Initialize Rx FIFO's */
static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
{}

static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
{}

/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
 * 4kB fixed space must be assigned for the loopback port.
 * Redistribute remaining avialable 44kB space among all active ports.
 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
 * SGMII link.
 */
static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
{}

/* Configure Rx FIFO Flow control thresholds */
static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
{}

/* Configure Rx FIFO Flow control thresholds */
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
{}

static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
{}

/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
 * 1kB fixed space must be assigned for the loopback port.
 * Redistribute remaining avialable 18kB space among all active ports.
 * The 10G interface should use 10kB (which is maximum possible size
 * per single port).
 */
static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
{}

static void mvpp2_axi_init(struct mvpp2 *priv)
{}

/* Initialize network controller common part HW */
static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
{}

static int mvpp2_get_sram(struct platform_device *pdev,
			  struct mvpp2 *priv)
{}

static int mvpp2_probe(struct platform_device *pdev)
{}

static void mvpp2_remove(struct platform_device *pdev)
{}

static const struct of_device_id mvpp2_match[] =;
MODULE_DEVICE_TABLE(of, mvpp2_match);

#ifdef CONFIG_ACPI
static const struct acpi_device_id mvpp2_acpi_match[] =;
MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
#endif

static struct platform_driver mvpp2_driver =;

static int __init mvpp2_driver_init(void)
{}
module_init();

static void __exit mvpp2_driver_exit(void)
{}
module_exit(mvpp2_driver_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();