linux/drivers/net/ethernet/marvell/mvneta.c

/*
 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
 *
 * Copyright (C) 2012 Marvell
 *
 * Rami Rosen <[email protected]>
 * Thomas Petazzoni <[email protected]>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2. This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
#include "mvneta_bm.h"
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>

/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q)
#define MVNETA_RXQ_HW_BUF_ALLOC
#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT
#define MVNETA_RXQ_SHORT_POOL_ID_MASK
#define MVNETA_RXQ_LONG_POOL_ID_SHIFT
#define MVNETA_RXQ_LONG_POOL_ID_MASK
#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK
#define MVNETA_RXQ_PKT_OFFSET_MASK(offs)
#define MVNETA_RXQ_THRESHOLD_REG(q)
#define MVNETA_RXQ_NON_OCCUPIED(v)
#define MVNETA_RXQ_BASE_ADDR_REG(q)
#define MVNETA_RXQ_SIZE_REG(q)
#define MVNETA_RXQ_BUF_SIZE_SHIFT
#define MVNETA_RXQ_BUF_SIZE_MASK
#define MVNETA_RXQ_STATUS_REG(q)
#define MVNETA_RXQ_OCCUPIED_ALL_MASK
#define MVNETA_RXQ_STATUS_UPDATE_REG(q)
#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)
#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT
#define MVNETA_PORT_POOL_BUFFER_SZ_MASK
#define MVNETA_PORT_RX_RESET
#define MVNETA_PORT_RX_DMA_RESET
#define MVNETA_PHY_ADDR
#define MVNETA_PHY_ADDR_MASK
#define MVNETA_MBUS_RETRY
#define MVNETA_UNIT_INTR_CAUSE
#define MVNETA_UNIT_CONTROL
#define MVNETA_PHY_POLLING_ENABLE
#define MVNETA_WIN_BASE(w)
#define MVNETA_WIN_SIZE(w)
#define MVNETA_WIN_REMAP(w)
#define MVNETA_BASE_ADDR_ENABLE
#define MVNETA_AC5_CNM_DDR_TARGET
#define MVNETA_AC5_CNM_DDR_ATTR
#define MVNETA_ACCESS_PROTECT_ENABLE
#define MVNETA_PORT_CONFIG
#define MVNETA_UNI_PROMISC_MODE
#define MVNETA_DEF_RXQ(q)
#define MVNETA_DEF_RXQ_ARP(q)
#define MVNETA_TX_UNSET_ERR_SUM
#define MVNETA_DEF_RXQ_TCP(q)
#define MVNETA_DEF_RXQ_UDP(q)
#define MVNETA_DEF_RXQ_BPDU(q)
#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR
#define MVNETA_PORT_CONFIG_DEFL_VALUE(q)
#define MVNETA_PORT_CONFIG_EXTEND
#define MVNETA_MAC_ADDR_LOW
#define MVNETA_MAC_ADDR_HIGH
#define MVNETA_SDMA_CONFIG
#define MVNETA_SDMA_BRST_SIZE_16
#define MVNETA_RX_BRST_SZ_MASK(burst)
#define MVNETA_RX_NO_DATA_SWAP
#define MVNETA_TX_NO_DATA_SWAP
#define MVNETA_DESC_SWAP
#define MVNETA_TX_BRST_SZ_MASK(burst)
#define MVNETA_VLAN_PRIO_TO_RXQ
#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq)
#define MVNETA_PORT_STATUS
#define MVNETA_TX_IN_PRGRS
#define MVNETA_TX_FIFO_EMPTY
#define MVNETA_RX_MIN_FRAME_SIZE
/* Only exists on Armada XP and Armada 370 */
#define MVNETA_SERDES_CFG
#define MVNETA_SGMII_SERDES_PROTO
#define MVNETA_QSGMII_SERDES_PROTO
#define MVNETA_HSGMII_SERDES_PROTO
#define MVNETA_TYPE_PRIO
#define MVNETA_FORCE_UNI
#define MVNETA_TXQ_CMD_1
#define MVNETA_TXQ_CMD
#define MVNETA_TXQ_DISABLE_SHIFT
#define MVNETA_TXQ_ENABLE_MASK
#define MVNETA_RX_DISCARD_FRAME_COUNT
#define MVNETA_OVERRUN_FRAME_COUNT
#define MVNETA_GMAC_CLOCK_DIVIDER
#define MVNETA_GMAC_1MS_CLOCK_ENABLE
#define MVNETA_ACC_MODE
#define MVNETA_BM_ADDRESS
#define MVNETA_CPU_MAP(cpu)
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK
#define MVNETA_CPU_RXQ_ACCESS(rxq)
#define MVNETA_CPU_TXQ_ACCESS(txq)
#define MVNETA_RXQ_TIME_COAL_REG(q)

/* Exception Interrupt Port/Queue Cause register
 *
 * Their behavior depend of the mapping done using the PCPX2Q
 * registers. For a given CPU if the bit associated to a queue is not
 * set, then for the register a read from this CPU will always return
 * 0 and a write won't do anything
 */

#define MVNETA_INTR_NEW_CAUSE
#define MVNETA_INTR_NEW_MASK

/* bits  0..7  = TXQ SENT, one bit per queue.
 * bits  8..15 = RXQ OCCUP, one bit per queue.
 * bits 16..23 = RXQ FREE, one bit per queue.
 * bit  29 = OLD_REG_SUM, see old reg ?
 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 * bit  31 = MISC_SUM,   one bit for 4 ports
 */
#define MVNETA_TX_INTR_MASK(nr_txqs)
#define MVNETA_TX_INTR_MASK_ALL
#define MVNETA_RX_INTR_MASK(nr_rxqs)
#define MVNETA_RX_INTR_MASK_ALL
#define MVNETA_MISCINTR_INTR_MASK

#define MVNETA_INTR_OLD_CAUSE
#define MVNETA_INTR_OLD_MASK

/* Data Path Port/Queue Cause Register */
#define MVNETA_INTR_MISC_CAUSE
#define MVNETA_INTR_MISC_MASK

#define MVNETA_CAUSE_PHY_STATUS_CHANGE
#define MVNETA_CAUSE_LINK_CHANGE
#define MVNETA_CAUSE_PTP

#define MVNETA_CAUSE_INTERNAL_ADDR_ERR
#define MVNETA_CAUSE_RX_OVERRUN
#define MVNETA_CAUSE_RX_CRC_ERROR
#define MVNETA_CAUSE_RX_LARGE_PKT
#define MVNETA_CAUSE_TX_UNDERUN
#define MVNETA_CAUSE_PRBS_ERR
#define MVNETA_CAUSE_PSC_SYNC_CHANGE
#define MVNETA_CAUSE_SERDES_SYNC_ERR

#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT
#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK
#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool)

#define MVNETA_CAUSE_TXQ_ERROR_SHIFT
#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK
#define MVNETA_CAUSE_TXQ_ERROR_MASK(q)

#define MVNETA_INTR_ENABLE
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK
#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK

#define MVNETA_RXQ_CMD
#define MVNETA_RXQ_DISABLE_SHIFT
#define MVNETA_RXQ_ENABLE_MASK
#define MVETH_TXQ_TOKEN_COUNT_REG(q)
#define MVETH_TXQ_TOKEN_CFG_REG(q)
#define MVNETA_GMAC_CTRL_0
#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT
#define MVNETA_GMAC_MAX_RX_SIZE_MASK
#define MVNETA_GMAC0_PORT_1000BASE_X
#define MVNETA_GMAC0_PORT_ENABLE
#define MVNETA_GMAC_CTRL_2
#define MVNETA_GMAC2_INBAND_AN_ENABLE
#define MVNETA_GMAC2_PCS_ENABLE
#define MVNETA_GMAC2_PORT_RGMII
#define MVNETA_GMAC2_PORT_RESET
#define MVNETA_GMAC_STATUS
#define MVNETA_GMAC_LINK_UP
#define MVNETA_GMAC_SPEED_1000
#define MVNETA_GMAC_SPEED_100
#define MVNETA_GMAC_FULL_DUPLEX
#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE
#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE
#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE
#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE
#define MVNETA_GMAC_AN_COMPLETE
#define MVNETA_GMAC_SYNC_OK
#define MVNETA_GMAC_AUTONEG_CONFIG
#define MVNETA_GMAC_FORCE_LINK_DOWN
#define MVNETA_GMAC_FORCE_LINK_PASS
#define MVNETA_GMAC_INBAND_AN_ENABLE
#define MVNETA_GMAC_AN_BYPASS_ENABLE
#define MVNETA_GMAC_INBAND_RESTART_AN
#define MVNETA_GMAC_CONFIG_MII_SPEED
#define MVNETA_GMAC_CONFIG_GMII_SPEED
#define MVNETA_GMAC_AN_SPEED_EN
#define MVNETA_GMAC_CONFIG_FLOW_CTRL
#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
#define MVNETA_GMAC_AN_FLOW_CTRL_EN
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX
#define MVNETA_GMAC_AN_DUPLEX_EN
#define MVNETA_GMAC_CTRL_4
#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE
#define MVNETA_MIB_COUNTERS_BASE
#define MVNETA_MIB_LATE_COLLISION
#define MVNETA_DA_FILT_SPEC_MCAST
#define MVNETA_DA_FILT_OTH_MCAST
#define MVNETA_DA_FILT_UCAST_BASE
#define MVNETA_TXQ_BASE_ADDR_REG(q)
#define MVNETA_TXQ_SIZE_REG(q)
#define MVNETA_TXQ_SENT_THRESH_ALL_MASK
#define MVNETA_TXQ_SENT_THRESH_MASK(coal)
#define MVNETA_TXQ_UPDATE_REG(q)
#define MVNETA_TXQ_DEC_SENT_SHIFT
#define MVNETA_TXQ_DEC_SENT_MASK
#define MVNETA_TXQ_STATUS_REG(q)
#define MVNETA_TXQ_SENT_DESC_SHIFT
#define MVNETA_TXQ_SENT_DESC_MASK
#define MVNETA_PORT_TX_RESET
#define MVNETA_PORT_TX_DMA_RESET
#define MVNETA_TXQ_CMD1_REG
#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1
#define MVNETA_TXQ_CMD1_BW_LIM_EN
#define MVNETA_REFILL_NUM_CLK_REG
#define MVNETA_REFILL_MAX_NUM_CLK
#define MVNETA_TX_MTU
#define MVNETA_TX_TOKEN_SIZE
#define MVNETA_TX_TOKEN_SIZE_MAX
#define MVNETA_TXQ_BUCKET_REFILL_REG(q)
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT
#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX
#define MVNETA_TXQ_TOKEN_SIZE_REG(q)
#define MVNETA_TXQ_TOKEN_SIZE_MAX

/* The values of the bucket refill base period and refill period are taken from
 * the reference manual, and adds up to a base resolution of 10Kbps. This allows
 * to cover all rate-limit values from 10Kbps up to 5Gbps
 */

/* Base period for the rate limit algorithm */
#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS

/* Number of Base Period to wait between each bucket refill */
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD

/* The base resolution for rate limiting, in bps. Any max_rate value should be
 * a multiple of that value.
 */
#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION

#define MVNETA_LPI_CTRL_0
#define MVNETA_LPI_CTRL_1
#define MVNETA_LPI_REQUEST_ENABLE
#define MVNETA_LPI_CTRL_2
#define MVNETA_LPI_STATUS

#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK

/* Descriptor ring Macros */
#define MVNETA_QUEUE_NEXT_DESC(q, index)

/* Various constants */

/* Coalescing */
#define MVNETA_TXDONE_COAL_PKTS
#define MVNETA_RX_COAL_PKTS
#define MVNETA_RX_COAL_USEC

/* The two bytes Marvell header. Either contains a special value used
 * by Marvell switches when a specific hardware mode is enabled (not
 * supported by this driver) or is filled automatically by zeroes on
 * the RX side. Those two bytes being at the front of the Ethernet
 * header, they allow to have the IP header aligned on a 4 bytes
 * boundary automatically: the hardware skips those two bytes on its
 * own.
 */
#define MVNETA_MH_SIZE

#define MVNETA_VLAN_TAG_LEN

#define MVNETA_TX_CSUM_DEF_SIZE
#define MVNETA_TX_CSUM_MAX_SIZE
#define MVNETA_ACC_MODE_EXT1
#define MVNETA_ACC_MODE_EXT2

#define MVNETA_MAX_DECODE_WIN

/* Timeout constants */
#define MVNETA_TX_DISABLE_TIMEOUT_MSEC
#define MVNETA_RX_DISABLE_TIMEOUT_MSEC
#define MVNETA_TX_FIFO_EMPTY_TIMEOUT

#define MVNETA_TX_MTU_MAX

/* The RSS lookup table actually has 256 entries but we do not use
 * them yet
 */
#define MVNETA_RSS_LU_TABLE_SIZE

/* Max number of Rx descriptors */
#define MVNETA_MAX_RXD

/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD

/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS

#define MVNETA_MAX_SKB_DESCS

/* The size of a TSO header page */
#define MVNETA_TSO_PAGE_SIZE

/* Number of TSO headers per page. This should be a power of 2 */
#define MVNETA_TSO_PER_PAGE

/* Maximum number of TSO header pages */
#define MVNETA_MAX_TSO_PAGES

/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE

/* Number of bytes to be taken into account by HW when putting incoming data
 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 */
#define MVNETA_RX_PKT_OFFSET_CORRECTION

#define MVNETA_RX_PKT_SIZE(mtu)

/* Driver assumes that the last 3 bits are 0 */
#define MVNETA_SKB_HEADROOM
#define MVNETA_SKB_PAD
#define MVNETA_MAX_RX_BUF_SIZE

#define MVNETA_RX_GET_BM_POOL_ID(rxd)

enum {};

struct mvneta_statistic {};

#define T_REG_32
#define T_REG_64
#define T_SW

#define MVNETA_XDP_PASS
#define MVNETA_XDP_DROPPED
#define MVNETA_XDP_TX
#define MVNETA_XDP_REDIR

static const struct mvneta_statistic mvneta_statistics[] =;

struct mvneta_stats {};

struct mvneta_ethtool_stats {};

struct mvneta_pcpu_stats {};

struct mvneta_pcpu_port {};

enum {};

struct mvneta_port {};

/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 * layout of the transmit and reception DMA descriptors, and their
 * layout is therefore defined by the hardware design
 */

#define MVNETA_TX_L3_OFF_SHIFT
#define MVNETA_TX_IP_HLEN_SHIFT
#define MVNETA_TX_L4_UDP
#define MVNETA_TX_L3_IP6
#define MVNETA_TXD_IP_CSUM
#define MVNETA_TXD_Z_PAD
#define MVNETA_TXD_L_DESC
#define MVNETA_TXD_F_DESC
#define MVNETA_TXD_FLZ_DESC
#define MVNETA_TX_L4_CSUM_FULL
#define MVNETA_TX_L4_CSUM_NOT

#define MVNETA_RXD_ERR_CRC
#define MVNETA_RXD_BM_POOL_SHIFT
#define MVNETA_RXD_BM_POOL_MASK
#define MVNETA_RXD_ERR_SUMMARY
#define MVNETA_RXD_ERR_OVERRUN
#define MVNETA_RXD_ERR_LEN
#define MVNETA_RXD_ERR_RESOURCE
#define MVNETA_RXD_ERR_CODE_MASK
#define MVNETA_RXD_L3_IP4
#define MVNETA_RXD_LAST_DESC
#define MVNETA_RXD_FIRST_DESC
#define MVNETA_RXD_FIRST_LAST_DESC
#define MVNETA_RXD_L4_CSUM_OK

#if defined(__LITTLE_ENDIAN)
struct mvneta_tx_desc {};

struct mvneta_rx_desc {};
#else
struct mvneta_tx_desc {
	u16  data_size;		/* Data size of transmitted packet in bytes */
	u16  reserved1;		/* csum_l4 (for future use)		*/
	u32  command;		/* Options used by HW for packet transmitting.*/
	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
	u32  reserved3[4];	/* Reserved - (for future use)		*/
};

struct mvneta_rx_desc {
	u16  data_size;		/* Size of received packet in bytes	*/
	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
	u32  status;		/* Info about received packet		*/

	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
	u32  buf_phys_addr;	/* Physical address of the buffer	*/

	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
	u16  reserved3;		/* prefetch_cmd, for future use		*/
	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */

	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
};
#endif

enum mvneta_tx_buf_type {};

struct mvneta_tx_buf {};

struct mvneta_tx_queue {};

struct mvneta_rx_queue {};

static enum cpuhp_state online_hpstate;
/* The hardware supports eight (8) rx queues, but we are only allowing
 * the first one to be used. Therefore, let's just allocate one queue.
 */
static int rxq_number =;
static int txq_number =;

static int rxq_def;

static int rx_copybreak __read_mostly =;

/* HW BM need that each port be identify by a unique ID */
static int global_port_id;

#define MVNETA_DRIVER_NAME
#define MVNETA_DRIVER_VERSION

/* Utility/helper methods */

/* Write helper method */
static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
{}

/* Read helper method */
static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
{}

/* Increment txq get counter */
static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
{}

/* Increment txq put counter */
static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
{}


/* Clear all MIB counters */
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{}

/* Get System Network Statistics */
static void
mvneta_get_stats64(struct net_device *dev,
		   struct rtnl_link_stats64 *stats)
{}

/* Rx descriptors helper methods */

/* Checks whether the RX descriptor having this status is both the first
 * and the last descriptor for the RX packet. Each RX packet is currently
 * received through a single RX descriptor, so not having each RX
 * descriptor with its first and last bits set is an error
 */
static int mvneta_rxq_desc_is_first_last(u32 status)
{}

/* Add number of descriptors ready to receive new packets */
static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
					  struct mvneta_rx_queue *rxq,
					  int ndescs)
{}

/* Get number of RX descriptors occupied by received packets */
static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
					struct mvneta_rx_queue *rxq)
{}

/* Update num of rx desc called upon return from rx path or
 * from mvneta_rxq_drop_pkts().
 */
static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
				       struct mvneta_rx_queue *rxq,
				       int rx_done, int rx_filled)
{}

/* Get pointer to next RX descriptor to be processed by SW */
static struct mvneta_rx_desc *
mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
{}

/* Change maximum receive size of the port. */
static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
{}


/* Set rx queue offset */
static void mvneta_rxq_offset_set(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq,
				  int offset)
{}


/* Tx descriptors helper methods */

/* Update HW with number of TX descriptors to be sent */
static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int pend_desc)
{}

/* Get pointer to next TX descriptor to be processed (send) by HW */
static struct mvneta_tx_desc *
mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
{}

/* Release the last allocated TX descriptor. Useful to handle DMA
 * mapping failures in the TX path.
 */
static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
{}

/* Set rxq buf size */
static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq,
				    int buf_size)
{}

/* Disable buffer management (BM) */
static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq)
{}

/* Enable buffer management (BM) */
static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{}

/* Notify HW about port's assignment of pool for bigger packets */
static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
				     struct mvneta_rx_queue *rxq)
{}

/* Notify HW about port's assignment of pool for smaller packets */
static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
				      struct mvneta_rx_queue *rxq)
{}

/* Set port's receive buffer size for assigned BM pool */
static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
					      int buf_size,
					      u8 pool_id)
{}

/* Configure MBUS window in order to enable access BM internal SRAM */
static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
				  u8 target, u8 attr)
{}

static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
{}

/* Assign and initialize pools for port. In case of fail
 * buffer manager will remain disabled for current port.
 */
static int mvneta_bm_port_init(struct platform_device *pdev,
			       struct mvneta_port *pp)
{}

/* Update settings of a pool for bigger packets */
static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
{}

/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{}

/* Stop the Ethernet port activity */
static void mvneta_port_down(struct mvneta_port *pp)
{}

/* Enable the port by setting the port enable bit of the MAC control register */
static void mvneta_port_enable(struct mvneta_port *pp)
{}

/* Disable the port and wait for about 200 usec before retuning */
static void mvneta_port_disable(struct mvneta_port *pp)
{}

/* Multicast tables methods */

/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
{}

/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
{}

/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
{}

static void mvneta_percpu_unmask_interrupt(void *arg)
{}

static void mvneta_percpu_mask_interrupt(void *arg)
{}

static void mvneta_percpu_clear_intr_cause(void *arg)
{}

/* This method sets defaults to the NETA port:
 *	Clears interrupt Cause and Mask registers.
 *	Clears all MAC tables.
 *	Sets defaults to all registers.
 *	Resets RX and TX descriptor rings.
 *	Resets PHY.
 * This method can be called after mvneta_port_down() to return the port
 *	settings to defaults.
 */
static void mvneta_defaults_set(struct mvneta_port *pp)
{}

/* Set max sizes for tx queues */
static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)

{}

/* Set unicast address */
static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
				  int queue)
{}

/* Set mac address */
static void mvneta_mac_addr_set(struct mvneta_port *pp,
				const unsigned char *addr, int queue)
{}

/* Set the number of packets that will be received before RX interrupt
 * will be generated by HW.
 */
static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{}

/* Set the time delay in usec before RX interrupt will be generated by
 * HW.
 */
static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{}

/* Set threshold for TX_DONE pkts coalescing */
static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
					 struct mvneta_tx_queue *txq, u32 value)
{}

/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
				u32 phys_addr, void *virt_addr,
				struct mvneta_rx_queue *rxq)
{}

/* Decrement sent descriptors counter */
static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int sent_desc)
{}

/* Get number of TX descriptors already sent by HW */
static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
					struct mvneta_tx_queue *txq)
{}

/* Get number of sent descriptors and decrement counter.
 *  The number of sent descriptors is returned.
 */
static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq)
{}

/* Set TXQ descriptors fields relevant for CSUM calculation */
static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
				int ip_hdr_len, int l4_proto)
{}


/* Display more error info */
static void mvneta_rx_error(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc)
{}

/* Handle RX checksum offload based on the descriptor's status */
static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
{}

/* Return tx queue pointer (find last set bit) according to <cause> returned
 * form tx_done reg. <cause> must not be null. The return value is always a
 * valid queue for matching the first one found in <cause>.
 */
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
						     u32 cause)
{}

/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq, int num,
				 struct netdev_queue *nq, bool napi)
{}

/* Handle end of transmission */
static void mvneta_txq_done(struct mvneta_port *pp,
			   struct mvneta_tx_queue *txq)
{}

/* Refill processing for SW buffer management */
/* Allocate page per descriptor */
static int mvneta_rx_refill(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq,
			    gfp_t gfp_mask)
{}

/* Handle tx checksum */
static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
{}

/* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{}

static void
mvneta_update_stats(struct mvneta_port *pp,
		    struct mvneta_stats *ps)
{}

static inline
int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{}

static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		    struct xdp_buff *xdp, int sync_len)
{}

static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
			struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{}

static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{}

static int
mvneta_xdp_xmit(struct net_device *dev, int num_frame,
		struct xdp_frame **frames, u32 flags)
{}

static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	       struct bpf_prog *prog, struct xdp_buff *xdp,
	       u32 frame_sz, struct mvneta_stats *stats)
{}

static void
mvneta_swbm_rx_frame(struct mvneta_port *pp,
		     struct mvneta_rx_desc *rx_desc,
		     struct mvneta_rx_queue *rxq,
		     struct xdp_buff *xdp, int *size,
		     struct page *page)
{}

static void
mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq,
			    struct xdp_buff *xdp, int *size,
			    struct page *page)
{}

static struct sk_buff *
mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
		      struct xdp_buff *xdp, u32 desc_status)
{}

/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct napi_struct *napi,
			  struct mvneta_port *pp, int budget,
			  struct mvneta_rx_queue *rxq)
{}

/* Main rx processing when using hardware buffer management */
static int mvneta_rx_hwbm(struct napi_struct *napi,
			  struct mvneta_port *pp, int rx_todo,
			  struct mvneta_rx_queue *rxq)
{}

static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
{}

static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
{}

static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
{}

static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
			       struct tso_t *tso, int size, bool is_last)
{}

static inline int
mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
		    struct sk_buff *skb, char *data, int size,
		    bool last_tcp, bool is_last)
{}

static void mvneta_release_descs(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq,
				 int first, int num)
{}

static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
			 struct mvneta_tx_queue *txq)
{}

/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
				  struct mvneta_tx_queue *txq)
{}

/* Main tx processing */
static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{}


/* Free tx resources, when resetting a port */
static void mvneta_txq_done_force(struct mvneta_port *pp,
				  struct mvneta_tx_queue *txq)

{}

/* Handle tx done - called in softirq context. The <cause_tx_done> argument
 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
 */
static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{}

/* Compute crc8 of the specified address, using a unique algorithm ,
 * according to hw spec, different than generic crc8 algorithm
 */
static int mvneta_addr_crc(unsigned char *addr)
{}

/* This method controls the net device special MAC multicast support.
 * The Special Multicast Table for MAC addresses supports MAC of the form
 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 * Table entries in the DA-Filter table. This method set the Special
 * Multicast Table appropriate entry.
 */
static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
					  unsigned char last_byte,
					  int queue)
{}

/* This method controls the network device Other MAC multicast support.
 * The Other Multicast Table is used for multicast of another type.
 * A CRC-8 is used as an index to the Other Multicast Table entries
 * in the DA-Filter table.
 * The method gets the CRC-8 value from the calling routine and
 * sets the Other Multicast Table appropriate entry according to the
 * specified CRC-8 .
 */
static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
					unsigned char crc8,
					int queue)
{}

/* The network device supports multicast using two tables:
 *    1) Special Multicast Table for MAC addresses of the form
 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 *       Table entries in the DA-Filter table.
 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
 *       is used as an index to the Other Multicast Table entries in the
 *       DA-Filter table.
 */
static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
				 int queue)
{}

/* Configure Fitering mode of Ethernet port */
static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
					  int is_promisc)
{}

/* register unicast and multicast addresses */
static void mvneta_set_rx_mode(struct net_device *dev)
{}

/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
{}

/* Interrupt handling - the callback for request_percpu_irq() */
static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
{}

static void mvneta_link_change(struct mvneta_port *pp)
{}

/* NAPI handler
 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
 * Bits 8 -15 of the cause Rx Tx register indicate that are received
 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
 * Each CPU has its own causeRxTx register
 */
static int mvneta_poll(struct napi_struct *napi, int budget)
{}

static int mvneta_create_page_pool(struct mvneta_port *pp,
				   struct mvneta_rx_queue *rxq, int size)
{}

/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
			   int num)
{}

/* Free all packets pending transmit from all TXQs and reset TX port */
static void mvneta_tx_reset(struct mvneta_port *pp)
{}

static void mvneta_rx_reset(struct mvneta_port *pp)
{}

/* Rx/Tx queue initialization/cleanup methods */

static int mvneta_rxq_sw_init(struct mvneta_port *pp,
			      struct mvneta_rx_queue *rxq)
{}

static void mvneta_rxq_hw_init(struct mvneta_port *pp,
			       struct mvneta_rx_queue *rxq)
{}

/* Create a specified RX queue */
static int mvneta_rxq_init(struct mvneta_port *pp,
			   struct mvneta_rx_queue *rxq)

{}

/* Cleanup Rx queue */
static void mvneta_rxq_deinit(struct mvneta_port *pp,
			      struct mvneta_rx_queue *rxq)
{}

static int mvneta_txq_sw_init(struct mvneta_port *pp,
			      struct mvneta_tx_queue *txq)
{}

static void mvneta_txq_hw_init(struct mvneta_port *pp,
			       struct mvneta_tx_queue *txq)
{}

/* Create and initialize a tx queue */
static int mvneta_txq_init(struct mvneta_port *pp,
			   struct mvneta_tx_queue *txq)
{}

/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
{}

static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
{}

static void mvneta_txq_deinit(struct mvneta_port *pp,
			      struct mvneta_tx_queue *txq)
{}

/* Cleanup all Tx queues */
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
{}

/* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{}


/* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp)
{}

/* Init all tx queues */
static int mvneta_setup_txqs(struct mvneta_port *pp)
{}

static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
{}

static int mvneta_config_interface(struct mvneta_port *pp,
				   phy_interface_t interface)
{}

static void mvneta_start_dev(struct mvneta_port *pp)
{}

static void mvneta_stop_dev(struct mvneta_port *pp)
{}

static void mvneta_percpu_enable(void *arg)
{}

static void mvneta_percpu_disable(void *arg)
{}

/* Change the device mtu */
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{}

static netdev_features_t mvneta_fix_features(struct net_device *dev,
					     netdev_features_t features)
{}

/* Get mac address */
static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
{}

/* Handle setting mac address */
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{}

static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
{}

static int mvneta_pcs_validate(struct phylink_pcs *pcs,
			       unsigned long *supported,
			       const struct phylink_link_state *state)
{}

static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
				 struct phylink_link_state *state)
{}

static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
			     phy_interface_t interface,
			     const unsigned long *advertising,
			     bool permit_pause_to_mac)
{}

static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
{}

static const struct phylink_pcs_ops mvneta_phylink_pcs_ops =;

static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
						 phy_interface_t interface)
{}

static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
			      phy_interface_t interface)
{}

static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
			      const struct phylink_link_state *state)
{}

static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
			     phy_interface_t interface)
{}

static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
{}

static void mvneta_mac_link_down(struct phylink_config *config,
				 unsigned int mode, phy_interface_t interface)
{}

static void mvneta_mac_link_up(struct phylink_config *config,
			       struct phy_device *phy,
			       unsigned int mode, phy_interface_t interface,
			       int speed, int duplex,
			       bool tx_pause, bool rx_pause)
{}

static const struct phylink_mac_ops mvneta_phylink_ops =;

static int mvneta_mdio_probe(struct mvneta_port *pp)
{}

static void mvneta_mdio_remove(struct mvneta_port *pp)
{}

/* Electing a CPU must be done in an atomic way: it should be done
 * after or before the removal/insertion of a CPU and this function is
 * not reentrant.
 */
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
	int elected_cpu = 0, max_cpu, cpu;

	/* Use the cpu associated to the rxq when it is online, in all
	 * the other cases, use the cpu 0 which can't be offline.
	 */
	if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
		elected_cpu = pp->rxq_def;

	max_cpu = num_present_cpus();

	for_each_online_cpu(cpu) {
		int rxq_map = 0, txq_map = 0;
		int rxq;

		for (rxq = 0; rxq < rxq_number; rxq++)
			if ((rxq % max_cpu) == cpu)
				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);

		if (cpu == elected_cpu)
			/* Map the default receive queue to the elected CPU */
			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);

		/* We update the TX queue map only if we have one
		 * queue. In this case we associate the TX queue to
		 * the CPU bound to the default RX queue
		 */
		if (txq_number == 1)
			txq_map = (cpu == elected_cpu) ?
				MVNETA_CPU_TXQ_ACCESS(0) : 0;
		else
			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;

		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);

		/* Update the interrupt mask on each CPU according the
		 * new mapping
		 */
		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
					 pp, true);
	}
};

static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
{}

static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
{}

static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
{}

static int mvneta_open(struct net_device *dev)
{}

/* Stop the port, free port interrupt line */
static int mvneta_stop(struct net_device *dev)
{}

static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{}

static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
			    struct netlink_ext_ack *extack)
{}

static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

/* Ethtool methods */

/* Set link ksettings (phy address, speed) for ethtools */
static int
mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
				  const struct ethtool_link_ksettings *cmd)
{}

/* Get link ksettings for ethtools */
static int
mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
				  struct ethtool_link_ksettings *cmd)
{}

static int mvneta_ethtool_nway_reset(struct net_device *dev)
{}

/* Set interrupt coalescing for ethtools */
static int
mvneta_ethtool_set_coalesce(struct net_device *dev,
			    struct ethtool_coalesce *c,
			    struct kernel_ethtool_coalesce *kernel_coal,
			    struct netlink_ext_ack *extack)
{}

/* get coalescing for ethtools */
static int
mvneta_ethtool_get_coalesce(struct net_device *dev,
			    struct ethtool_coalesce *c,
			    struct kernel_ethtool_coalesce *kernel_coal,
			    struct netlink_ext_ack *extack)
{}


static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
{}


static void
mvneta_ethtool_get_ringparam(struct net_device *netdev,
			     struct ethtool_ringparam *ring,
			     struct kernel_ethtool_ringparam *kernel_ring,
			     struct netlink_ext_ack *extack)
{}

static int
mvneta_ethtool_set_ringparam(struct net_device *dev,
			     struct ethtool_ringparam *ring,
			     struct kernel_ethtool_ringparam *kernel_ring,
			     struct netlink_ext_ack *extack)
{}

static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
					  struct ethtool_pauseparam *pause)
{}

static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
					 struct ethtool_pauseparam *pause)
{}

static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
				       u8 *data)
{}

static void
mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
				 struct mvneta_ethtool_stats *es)
{}

static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{}

static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
{}

static void mvneta_ethtool_get_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 *data)
{}

static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{}

static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
{}

static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
				    struct ethtool_rxnfc *info,
				    u32 *rules __always_unused)
{}

static int  mvneta_config_rss(struct mvneta_port *pp)
{}

static int mvneta_ethtool_set_rxfh(struct net_device *dev,
				   struct ethtool_rxfh_param *rxfh,
				   struct netlink_ext_ack *extack)
{}

static int mvneta_ethtool_get_rxfh(struct net_device *dev,
				   struct ethtool_rxfh_param *rxfh)
{}

static void mvneta_ethtool_get_wol(struct net_device *dev,
				   struct ethtool_wolinfo *wol)
{}

static int mvneta_ethtool_set_wol(struct net_device *dev,
				  struct ethtool_wolinfo *wol)
{}

static int mvneta_ethtool_get_eee(struct net_device *dev,
				  struct ethtool_keee *eee)
{}

static int mvneta_ethtool_set_eee(struct net_device *dev,
				  struct ethtool_keee *eee)
{}

static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
{}

static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
{}

static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
{}

static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
{}

static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
				    u64 min_rate, u64 max_rate)
{}

static int mvneta_setup_mqprio(struct net_device *dev,
			       struct tc_mqprio_qopt_offload *mqprio)
{}

static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
			   void *type_data)
{}

static const struct net_device_ops mvneta_netdev_ops =;

static const struct ethtool_ops mvneta_eth_tool_ops =;

/* Initialize hw */
static int mvneta_init(struct device *dev, struct mvneta_port *pp)
{}

/* platform glue : initialize decoding windows */
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
				     const struct mbus_dram_target_info *dram)
{}

/* Power up the port */
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{}

/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{}

/* Device removal routine */
static void mvneta_remove(struct platform_device *pdev)
{}

#ifdef CONFIG_PM_SLEEP
static int mvneta_suspend(struct device *device)
{}

static int mvneta_resume(struct device *device)
{}
#endif

static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);

static const struct of_device_id mvneta_match[] =;
MODULE_DEVICE_TABLE(of, mvneta_match);

static struct platform_driver mvneta_driver =;

static int __init mvneta_driver_init(void)
{}
module_init();

static void __exit mvneta_driver_exit(void)
{}
module_exit(mvneta_driver_exit);

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();

module_param(rxq_number, int, 0444);
module_param(txq_number, int, 0444);

module_param(rxq_def, int, 0444);
module_param(rx_copybreak, int, 0644);