linux/drivers/net/ethernet/nvidia/forcedeth.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
 *
 * Note: This driver is a cleanroom reimplementation based on reverse
 *      engineered documentation written by Carl-Daniel Hailfinger
 *      and Andrew de Quincey.
 *
 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
 * trademarks of NVIDIA Corporation in the United States and other
 * countries.
 *
 * Copyright (C) 2003,4,5 Manfred Spraul
 * Copyright (C) 2004 Andrew de Quincey (wol support)
 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
 *		IRQ rate fixes, bigendian fixes, cleanups, verification)
 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
 *
 * Known bugs:
 * We suspect that on some hardware no TX done interrupts are generated.
 * This means recovery from netif_stop_queue only happens if the hw timer
 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
 * If your hardware reliably generates tx done interrupts, then you can remove
 * DEV_NEED_TIMERIRQ from the driver_data flags.
 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
 * superfluous timer interrupts from the nic.
 */

#define pr_fmt(fmt)

#define FORCEDETH_VERSION
#define DRV_NAME

#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/random.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
#include <linux/u64_stats_sync.h>
#include <linux/io.h>

#include <asm/irq.h>

#define TX_WORK_PER_LOOP
#define RX_WORK_PER_LOOP

/*
 * Hardware access:
 */

#define DEV_NEED_TIMERIRQ
#define DEV_NEED_LINKTIMER
#define DEV_HAS_LARGEDESC
#define DEV_HAS_HIGH_DMA
#define DEV_HAS_CHECKSUM
#define DEV_HAS_VLAN
#define DEV_HAS_MSI
#define DEV_HAS_MSI_X
#define DEV_HAS_POWER_CNTRL
#define DEV_HAS_STATISTICS_V1
#define DEV_HAS_STATISTICS_V2
#define DEV_HAS_STATISTICS_V3
#define DEV_HAS_STATISTICS_V12
#define DEV_HAS_STATISTICS_V123
#define DEV_HAS_TEST_EXTENDED
#define DEV_HAS_MGMT_UNIT
#define DEV_HAS_CORRECT_MACADDR
#define DEV_HAS_COLLISION_FIX
#define DEV_HAS_PAUSEFRAME_TX_V1
#define DEV_HAS_PAUSEFRAME_TX_V2
#define DEV_HAS_PAUSEFRAME_TX_V3
#define DEV_NEED_TX_LIMIT
#define DEV_NEED_TX_LIMIT2
#define DEV_HAS_GEAR_MODE
#define DEV_NEED_PHY_INIT_FIX
#define DEV_NEED_LOW_POWER_FIX
#define DEV_NEED_MSI_FIX

enum {};

/* Big endian: should work, but is untested */
struct ring_desc {};

struct ring_desc_ex {};

ring_type;

#define FLAG_MASK_V1
#define FLAG_MASK_V2
#define LEN_MASK_V1
#define LEN_MASK_V2

#define NV_TX_LASTPACKET
#define NV_TX_RETRYERROR
#define NV_TX_RETRYCOUNT_MASK
#define NV_TX_FORCED_INTERRUPT
#define NV_TX_DEFERRED
#define NV_TX_CARRIERLOST
#define NV_TX_LATECOLLISION
#define NV_TX_UNDERFLOW
#define NV_TX_ERROR
#define NV_TX_VALID

#define NV_TX2_LASTPACKET
#define NV_TX2_RETRYERROR
#define NV_TX2_RETRYCOUNT_MASK
#define NV_TX2_FORCED_INTERRUPT
#define NV_TX2_DEFERRED
#define NV_TX2_CARRIERLOST
#define NV_TX2_LATECOLLISION
#define NV_TX2_UNDERFLOW
/* error and valid are the same for both */
#define NV_TX2_ERROR
#define NV_TX2_VALID
#define NV_TX2_TSO
#define NV_TX2_TSO_SHIFT
#define NV_TX2_TSO_MAX_SHIFT
#define NV_TX2_TSO_MAX_SIZE
#define NV_TX2_CHECKSUM_L3
#define NV_TX2_CHECKSUM_L4

#define NV_TX3_VLAN_TAG_PRESENT

#define NV_RX_DESCRIPTORVALID
#define NV_RX_MISSEDFRAME
#define NV_RX_SUBTRACT1
#define NV_RX_ERROR1
#define NV_RX_ERROR2
#define NV_RX_ERROR3
#define NV_RX_ERROR4
#define NV_RX_CRCERR
#define NV_RX_OVERFLOW
#define NV_RX_FRAMINGERR
#define NV_RX_ERROR
#define NV_RX_AVAIL
#define NV_RX_ERROR_MASK

#define NV_RX2_CHECKSUMMASK
#define NV_RX2_CHECKSUM_IP
#define NV_RX2_CHECKSUM_IP_TCP
#define NV_RX2_CHECKSUM_IP_UDP
#define NV_RX2_DESCRIPTORVALID
#define NV_RX2_SUBTRACT1
#define NV_RX2_ERROR1
#define NV_RX2_ERROR2
#define NV_RX2_ERROR3
#define NV_RX2_ERROR4
#define NV_RX2_CRCERR
#define NV_RX2_OVERFLOW
#define NV_RX2_FRAMINGERR
/* error and avail are the same for both */
#define NV_RX2_ERROR
#define NV_RX2_AVAIL
#define NV_RX2_ERROR_MASK

#define NV_RX3_VLAN_TAG_PRESENT
#define NV_RX3_VLAN_TAG_MASK

/* Miscellaneous hardware related defines: */
#define NV_PCI_REGSZ_VER1
#define NV_PCI_REGSZ_VER2
#define NV_PCI_REGSZ_VER3
#define NV_PCI_REGSZ_MAX

/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY
#define NV_TXSTOP_DELAY1
#define NV_TXSTOP_DELAY1MAX
#define NV_TXSTOP_DELAY2
#define NV_RXSTOP_DELAY1
#define NV_RXSTOP_DELAY1MAX
#define NV_RXSTOP_DELAY2
#define NV_SETUP5_DELAY
#define NV_SETUP5_DELAYMAX
#define NV_POWERUP_DELAY
#define NV_POWERUP_DELAYMAX
#define NV_MIIBUSY_DELAY
#define NV_MIIPHY_DELAY
#define NV_MIIPHY_DELAYMAX
#define NV_MAC_RESET_DELAY

#define NV_WAKEUPPATTERNS
#define NV_WAKEUPMASKENTRIES

/* General driver defaults */
#define NV_WATCHDOG_TIMEO

#define RX_RING_DEFAULT
#define TX_RING_DEFAULT
#define RX_RING_MIN
#define TX_RING_MIN
#define RING_MAX_DESC_VER_1
#define RING_MAX_DESC_VER_2_3

/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS
/* even more slack. */
#define NV_RX_ALLOC_PAD

/* maximum mtu size */
#define NV_PKTLIMIT_1
#define NV_PKTLIMIT_2

#define OOM_REFILL
#define POLL_WAIT
#define LINK_TIMEOUT
#define STATS_INTERVAL

/*
 * desc_ver values:
 * The nic supports three different descriptor types:
 * - DESC_VER_1: Original
 * - DESC_VER_2: support for jumbo frames.
 * - DESC_VER_3: 64-bit format.
 */
#define DESC_VER_1
#define DESC_VER_2
#define DESC_VER_3

/* PHY defines */
#define PHY_OUI_MARVELL
#define PHY_OUI_CICADA
#define PHY_OUI_VITESSE
#define PHY_OUI_REALTEK
#define PHY_OUI_REALTEK2
#define PHYID1_OUI_MASK
#define PHYID1_OUI_SHFT
#define PHYID2_OUI_MASK
#define PHYID2_OUI_SHFT
#define PHYID2_MODEL_MASK
#define PHY_MODEL_REALTEK_8211
#define PHY_REV_MASK
#define PHY_REV_REALTEK_8211B
#define PHY_REV_REALTEK_8211C
#define PHY_MODEL_REALTEK_8201
#define PHY_MODEL_MARVELL_E3016
#define PHY_MARVELL_E3016_INITMASK
#define PHY_CICADA_INIT1
#define PHY_CICADA_INIT2
#define PHY_CICADA_INIT3
#define PHY_CICADA_INIT4
#define PHY_CICADA_INIT5
#define PHY_CICADA_INIT6
#define PHY_VITESSE_INIT_REG1
#define PHY_VITESSE_INIT_REG2
#define PHY_VITESSE_INIT_REG3
#define PHY_VITESSE_INIT_REG4
#define PHY_VITESSE_INIT_MSK1
#define PHY_VITESSE_INIT_MSK2
#define PHY_VITESSE_INIT1
#define PHY_VITESSE_INIT2
#define PHY_VITESSE_INIT3
#define PHY_VITESSE_INIT4
#define PHY_VITESSE_INIT5
#define PHY_VITESSE_INIT6
#define PHY_VITESSE_INIT7
#define PHY_VITESSE_INIT8
#define PHY_VITESSE_INIT9
#define PHY_VITESSE_INIT10
#define PHY_REALTEK_INIT_REG1
#define PHY_REALTEK_INIT_REG2
#define PHY_REALTEK_INIT_REG3
#define PHY_REALTEK_INIT_REG4
#define PHY_REALTEK_INIT_REG5
#define PHY_REALTEK_INIT_REG6
#define PHY_REALTEK_INIT_REG7
#define PHY_REALTEK_INIT1
#define PHY_REALTEK_INIT2
#define PHY_REALTEK_INIT3
#define PHY_REALTEK_INIT4
#define PHY_REALTEK_INIT5
#define PHY_REALTEK_INIT6
#define PHY_REALTEK_INIT7
#define PHY_REALTEK_INIT8
#define PHY_REALTEK_INIT9
#define PHY_REALTEK_INIT10
#define PHY_REALTEK_INIT11
#define PHY_REALTEK_INIT_MSK1

#define PHY_GIGABIT

#define PHY_TIMEOUT
#define PHY_ERROR

#define PHY_100
#define PHY_1000
#define PHY_HALF

#define NV_PAUSEFRAME_RX_CAPABLE
#define NV_PAUSEFRAME_TX_CAPABLE
#define NV_PAUSEFRAME_RX_ENABLE
#define NV_PAUSEFRAME_TX_ENABLE
#define NV_PAUSEFRAME_RX_REQ
#define NV_PAUSEFRAME_TX_REQ
#define NV_PAUSEFRAME_AUTONEG

/* MSI/MSI-X defines */
#define NV_MSI_X_MAX_VECTORS
#define NV_MSI_X_VECTORS_MASK
#define NV_MSI_CAPABLE
#define NV_MSI_X_CAPABLE
#define NV_MSI_ENABLED
#define NV_MSI_X_ENABLED

#define NV_MSI_X_VECTOR_ALL
#define NV_MSI_X_VECTOR_RX
#define NV_MSI_X_VECTOR_TX
#define NV_MSI_X_VECTOR_OTHER

#define NV_MSI_PRIV_OFFSET
#define NV_MSI_PRIV_VALUE

#define NV_RESTART_TX
#define NV_RESTART_RX

#define NV_TX_LIMIT_COUNT

#define NV_DYNAMIC_THRESHOLD
#define NV_DYNAMIC_MAX_QUIET_COUNT

/* statistics */
struct nv_ethtool_str {};

static const struct nv_ethtool_str nv_estats_str[] =;

struct nv_ethtool_stats {};

#define NV_DEV_STATISTICS_V3_COUNT
#define NV_DEV_STATISTICS_V2_COUNT
#define NV_DEV_STATISTICS_V1_COUNT

/* diagnostics */
#define NV_TEST_COUNT_BASE
#define NV_TEST_COUNT_EXTENDED

static const struct nv_ethtool_str nv_etests_str[] =;

struct register_test {};

static const struct register_test nv_registers_test[] =;

struct nv_skb_map {};

struct nv_txrx_stats {};

#define nv_txrx_stats_inc(member)
#define nv_txrx_stats_add(member, count)

/*
 * SMP locking:
 * All hardware access under netdev_priv(dev)->lock, except the performance
 * critical parts:
 * - rx is (pseudo-) lockless: it relies on the single-threading provided
 *	by the arch code for interrupts.
 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
 *	needs netdev_priv(dev)->lock :-(
 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
 *
 * Hardware stats updates are protected by hwstats_lock:
 * - updated by nv_do_stats_poll (timer). This is meant to avoid
 *   integer wraparound in the NIC stats registers, at low frequency
 *   (0.1 Hz)
 * - updated by nv_get_ethtool_stats + nv_get_stats64
 *
 * Software stats are accessed only through 64b synchronization points
 * and are not subject to other synchronization techniques (single
 * update thread on the TX or RX paths).
 */

/* in dev: base, irq */
struct fe_priv {};

/*
 * Maximum number of loops until we assume that a bit in the irq mask
 * is stuck. Overridable with module param.
 */
static int max_interrupt_work =;

/*
 * Optimization can be either throuput mode or cpu mode
 *
 * Throughput Mode: Every tx and rx packet will generate an interrupt.
 * CPU Mode: Interrupts are controlled by a timer.
 */
enum {};
static int optimization_mode =;

/*
 * Poll interval for timer irq
 *
 * This interval determines how frequent an interrupt is generated.
 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
 * Min = 0, and Max = 65535
 */
static int poll_interval =;

/*
 * MSI interrupts
 */
enum {};
static int msi =;

/*
 * MSIX interrupts
 */
enum {};
static int msix =;

/*
 * DMA 64bit
 */
enum {};
static int dma_64bit =;

/*
 * Debug output control for tx_timeout
 */
static bool debug_tx_timeout =;

/*
 * Crossover Detection
 * Realtek 8201 phy + some OEM boards do not work properly.
 */
enum {};
static int phy_cross =;

/*
 * Power down phy when interface is down (persists through reboot;
 * older Linux and other OSes may not power it up again)
 */
static int phy_power_down;

static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{}

static inline u8 __iomem *get_hwbase(struct net_device *dev)
{}

static inline void pci_push(u8 __iomem *base)
{}

static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
{}

static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
{}

static bool nv_optimized(struct fe_priv *np)
{}

static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
		     int delay, int delaymax)
{}

#define NV_SETUP_RX_RING
#define NV_SETUP_TX_RING

static inline u32 dma_low(dma_addr_t addr)
{}

static inline u32 dma_high(dma_addr_t addr)
{}

static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
{}

static void free_rings(struct net_device *dev)
{}

static int using_multi_irqs(struct net_device *dev)
{}

static void nv_txrx_gate(struct net_device *dev, bool gate)
{}

static void nv_enable_irq(struct net_device *dev)
{}

static void nv_disable_irq(struct net_device *dev)
{}

/* In MSIX mode, a write to irqmask behaves as XOR */
static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
{}

static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
{}

static void nv_napi_enable(struct net_device *dev)
{}

static void nv_napi_disable(struct net_device *dev)
{}

#define MII_READ
/* mii_rw: read/write a register on the PHY.
 *
 * Caller must guarantee serialization
 */
static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
{}

static int phy_reset(struct net_device *dev, u32 bmcr_setup)
{}

static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
{}

static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
{}

static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
{}

static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
{}

static int init_cicada(struct net_device *dev, struct fe_priv *np,
		       u32 phyinterface)
{}

static int init_vitesse(struct net_device *dev, struct fe_priv *np)
{}

static int phy_init(struct net_device *dev)
{}

static void nv_start_rx(struct net_device *dev)
{}

static void nv_stop_rx(struct net_device *dev)
{}

static void nv_start_tx(struct net_device *dev)
{}

static void nv_stop_tx(struct net_device *dev)
{}

static void nv_start_rxtx(struct net_device *dev)
{}

static void nv_stop_rxtx(struct net_device *dev)
{}

static void nv_txrx_reset(struct net_device *dev)
{}

static void nv_mac_reset(struct net_device *dev)
{}

/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
static void nv_update_stats(struct net_device *dev)
{}

static void nv_get_stats(int cpu, struct fe_priv *np,
			 struct rtnl_link_stats64 *storage)
{}

/*
 * nv_get_stats64: dev->ndo_get_stats64 function
 * Get latest stats value from the nic.
 * Called with rcu_read_lock() held -
 * only synchronized against unregister_netdevice.
 */
static void
nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
	__acquires(&netdev_priv(dev)->hwstats_lock)
	__releases(&netdev_priv(dev)->hwstats_lock)
{}

/*
 * nv_alloc_rx: fill rx ring entries.
 * Return 1 if the allocations for the skbs failed and the
 * rx engine is without Available descriptors
 */
static int nv_alloc_rx(struct net_device *dev)
{}

static int nv_alloc_rx_optimized(struct net_device *dev)
{}

/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{}

static void nv_init_rx(struct net_device *dev)
{}

static void nv_init_tx(struct net_device *dev)
{}

static int nv_init_ring(struct net_device *dev)
{}

static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{}

static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{}

static void nv_drain_tx(struct net_device *dev)
{}

static void nv_drain_rx(struct net_device *dev)
{}

static void nv_drain_rxtx(struct net_device *dev)
{}

static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
{}

static void nv_legacybackoff_reseed(struct net_device *dev)
{}

/* Gear Backoff Seeds */
#define BACKOFF_SEEDSET_ROWS
#define BACKOFF_SEEDSET_LFSRS

/* Known Good seed sets */
static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] =;

static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] =;

static void nv_gear_backoff_reseed(struct net_device *dev)
{}

/*
 * nv_start_xmit: dev->hard_start_xmit function
 * Called with netif_tx_lock held.
 */
static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{}

static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
					   struct net_device *dev)
{}

static inline void nv_tx_flip_ownership(struct net_device *dev)
{}

/*
 * nv_tx_done: check for completed packets, release the skbs.
 *
 * Caller must own np->lock.
 */
static int nv_tx_done(struct net_device *dev, int limit)
{}

static int nv_tx_done_optimized(struct net_device *dev, int limit)
{}

/*
 * nv_tx_timeout: dev->tx_timeout function
 * Called with netif_tx_lock held.
 */
static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
{}

/*
 * Called when the nic notices a mismatch between the actual data len on the
 * wire and the len indicated in the 802 header
 */
static int nv_getlen(struct net_device *dev, void *packet, int datalen)
{}

static void rx_missing_handler(u32 flags, struct fe_priv *np)
{}

static int nv_rx_process(struct net_device *dev, int limit)
{}

static int nv_rx_process_optimized(struct net_device *dev, int limit)
{}

static void set_bufsize(struct net_device *dev)
{}

/*
 * nv_change_mtu: dev->change_mtu function
 * Called with RTNL held for read.
 */
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{}

static void nv_copy_mac_to_hw(struct net_device *dev)
{}

/*
 * nv_set_mac_address: dev->set_mac_address function
 * Called with rtnl_lock() held.
 */
static int nv_set_mac_address(struct net_device *dev, void *addr)
{}

/*
 * nv_set_multicast: dev->set_multicast function
 * Called with netif_tx_lock held.
 */
static void nv_set_multicast(struct net_device *dev)
{}

static void nv_update_pause(struct net_device *dev, u32 pause_flags)
{}

static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
{}

/**
 * nv_update_linkspeed - Setup the MAC according to the link partner
 * @dev: Network device to be configured
 *
 * The function queries the PHY and checks if there is a link partner.
 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
 * set to 10 MBit HD.
 *
 * The function returns 0 if there is no link partner and 1 if there is
 * a good link partner.
 */
static int nv_update_linkspeed(struct net_device *dev)
{}

static void nv_linkchange(struct net_device *dev)
{}

static void nv_link_irq(struct net_device *dev)
{}

static void nv_msi_workaround(struct fe_priv *np)
{}

static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
{}

static irqreturn_t nv_nic_irq(int foo, void *data)
{}

/* All _optimized functions are used to help increase performance
 * (reduce CPU and increase throughput). They use descripter version 3,
 * compiler directives, and reduce memory accesses.
 */
static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
{}

static irqreturn_t nv_nic_irq_tx(int foo, void *data)
{}

static int nv_napi_poll(struct napi_struct *napi, int budget)
{}

static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{}

static irqreturn_t nv_nic_irq_other(int foo, void *data)
{}

static irqreturn_t nv_nic_irq_test(int foo, void *data)
{}

static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
{}

static int nv_request_irq(struct net_device *dev, int intr_test)
{}

static void nv_free_irq(struct net_device *dev)
{}

static void nv_do_nic_poll(struct timer_list *t)
{}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{}
#endif

static void nv_do_stats_poll(struct timer_list *t)
	__acquires(&netdev_priv(dev)->hwstats_lock)
	__releases(&netdev_priv(dev)->hwstats_lock)
{}

static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{}

static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{}

static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{}

static int nv_get_link_ksettings(struct net_device *dev,
				 struct ethtool_link_ksettings *cmd)
{}

static int nv_set_link_ksettings(struct net_device *dev,
				 const struct ethtool_link_ksettings *cmd)
{}

#define FORCEDETH_REGS_VER

static int nv_get_regs_len(struct net_device *dev)
{}

static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
{}

static int nv_nway_reset(struct net_device *dev)
{}

static void nv_get_ringparam(struct net_device *dev,
			     struct ethtool_ringparam *ring,
			     struct kernel_ethtool_ringparam *kernel_ring,
			     struct netlink_ext_ack *extack)
{}

static int nv_set_ringparam(struct net_device *dev,
			    struct ethtool_ringparam *ring,
			    struct kernel_ethtool_ringparam *kernel_ring,
			    struct netlink_ext_ack *extack)
{}

static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{}

static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{}

static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
{}

static netdev_features_t nv_fix_features(struct net_device *dev,
	netdev_features_t features)
{}

static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{}

static int nv_set_features(struct net_device *dev, netdev_features_t features)
{}

static int nv_get_sset_count(struct net_device *dev, int sset)
{}

static void nv_get_ethtool_stats(struct net_device *dev,
				 struct ethtool_stats *estats, u64 *buffer)
	__acquires(&netdev_priv(dev)->hwstats_lock)
	__releases(&netdev_priv(dev)->hwstats_lock)
{}

static int nv_link_test(struct net_device *dev)
{}

static int nv_register_test(struct net_device *dev)
{}

static int nv_interrupt_test(struct net_device *dev)
{}

static int nv_loopback_test(struct net_device *dev)
{}

static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
{}

static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
{}

static const struct ethtool_ops ops =;

/* The mgmt unit and driver use a semaphore to access the phy during init */
static int nv_mgmt_acquire_sema(struct net_device *dev)
{}

static void nv_mgmt_release_sema(struct net_device *dev)
{}


static int nv_mgmt_get_version(struct net_device *dev)
{}

static int nv_open(struct net_device *dev)
{}

static int nv_close(struct net_device *dev)
{}

static const struct net_device_ops nv_netdev_ops =;

static const struct net_device_ops nv_netdev_ops_optimized =;

static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{}

static void nv_restore_phy(struct net_device *dev)
{}

static void nv_restore_mac_addr(struct pci_dev *pci_dev)
{}

static void nv_remove(struct pci_dev *pci_dev)
{}

#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{}

static int nv_resume(struct device *device)
{}

static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
#define NV_PM_OPS

#else
#define NV_PM_OPS
#endif /* CONFIG_PM_SLEEP */

#ifdef CONFIG_PM
static void nv_shutdown(struct pci_dev *pdev)
{}
#else
#define nv_shutdown
#endif /* CONFIG_PM */

static const struct pci_device_id pci_tbl[] =;

static struct pci_driver forcedeth_pci_driver =;

module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC();
module_param(optimization_mode, int, 0);
MODULE_PARM_DESC();
module_param(poll_interval, int, 0);
MODULE_PARM_DESC();
module_param(msi, int, 0);
MODULE_PARM_DESC();
module_param(msix, int, 0);
MODULE_PARM_DESC();
module_param(dma_64bit, int, 0);
MODULE_PARM_DESC();
module_param(phy_cross, int, 0);
MODULE_PARM_DESC();
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC();
module_param(debug_tx_timeout, bool, 0);
MODULE_PARM_DESC();

module_pci_driver();
MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_DEVICE_TABLE(pci, pci_tbl);