linux/drivers/net/ethernet/sun/cassini.c

// SPDX-License-Identifier: GPL-2.0+
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
 *
 * Copyright (C) 2004 Sun Microsystems Inc.
 * Copyright (C) 2003 Adrian Sun ([email protected])
 *
 * This driver uses the sungem driver (c) David Miller
 * ([email protected]) as its basis.
 *
 * The cassini chip has a number of features that distinguish it from
 * the gem chip:
 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
 *      load balancing (non-VLAN mode)
 *  batching of multiple packets
 *  multiple CPU dispatching
 *  page-based RX descriptor engine with separate completion rings
 *  Gigabit support (GMII and PCS interface)
 *  MIF link up/down detection works
 *
 * RX is handled by page sized buffers that are attached as fragments to
 * the skb. here's what's done:
 *  -- driver allocates pages at a time and keeps reference counts
 *     on them.
 *  -- the upper protocol layers assume that the header is in the skb
 *     itself. as a result, cassini will copy a small amount (64 bytes)
 *     to make them happy.
 *  -- driver appends the rest of the data pages as frags to skbuffs
 *     and increments the reference count
 *  -- on page reclamation, the driver swaps the page with a spare page.
 *     if that page is still in use, it frees its reference to that page,
 *     and allocates a new page for use. otherwise, it just recycles the
 *     page.
 *
 * NOTE: cassini can parse the header. however, it's not worth it
 *       as long as the network stack requires a header copy.
 *
 * TX has 4 queues. currently these queues are used in a round-robin
 * fashion for load balancing. They can also be used for QoS. for that
 * to work, however, QoS information needs to be exposed down to the driver
 * level so that subqueues get targeted to particular transmit rings.
 * alternatively, the queues can be configured via use of the all-purpose
 * ioctl.
 *
 * RX DATA: the rx completion ring has all the info, but the rx desc
 * ring has all of the data. RX can conceivably come in under multiple
 * interrupts, but the INT# assignment needs to be set up properly by
 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
 * that. also, the two descriptor rings are designed to distinguish between
 * encrypted and non-encrypted packets, but we use them for buffering
 * instead.
 *
 * by default, the selective clear mask is set up to process rx packets.
 */

#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/skbuff_ref.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
#include <linux/mii.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
#include <linux/firmware.h>

#include <net/checksum.h>

#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>

#define CAS_NCPUS

#define cas_skb_release(x)

/* select which firmware to use */
#define USE_HP_WORKAROUND
#define HP_WORKAROUND_DEFAULT
#define CAS_HP_ALT_FIRMWARE

#include "cassini.h"

#define USE_TX_COMPWB
#define USE_CSMA_CD_PROTO
#define USE_RX_BLANK
#undef USE_ENTROPY_DEV     /* don't test for entropy device */

/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 * also, we need to make cp->lock finer-grained.
 */
#undef  USE_PCI_INTB
#undef  USE_PCI_INTC
#undef  USE_PCI_INTD
#undef  USE_QOS

#undef  USE_VPD_DEBUG       /* debug vpd information if defined */

/* rx processing options */
#define USE_PAGE_ORDER
#define RX_DONT_BATCH
#define RX_COPY_ALWAYS
#define RX_COPY_MIN
#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */

#define DRV_MODULE_NAME
#define DRV_MODULE_VERSION
#define DRV_MODULE_RELDATE

#define CAS_DEF_MSG_ENABLE

/* length of time before we decide the hardware is borked,
 * and dev->tx_timeout() should be called to fix the problem
 */
#define CAS_TX_TIMEOUT
#define CAS_LINK_TIMEOUT
#define CAS_LINK_FAST_TIMEOUT

/* timeout values for state changing. these specify the number
 * of 10us delays to be used before giving up.
 */
#define STOP_TRIES_PHY
#define STOP_TRIES

/* specify a minimum frame size to deal with some fifo issues
 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 *            2 * page_size - 0x50
 */
#define CAS_MIN_FRAME
#define CAS_1000MB_MIN_FRAME
#define CAS_MIN_MTU
#define CAS_MAX_MTU

#if 1
/*
 * Eliminate these and use separate atomic counters for each, to
 * avoid a race condition.
 */
#else
#define CAS_RESET_MTU
#define CAS_RESET_ALL
#define CAS_RESET_SPARE
#endif

static char version[] =;

static int cassini_debug =;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
static int link_mode;

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_FIRMWARE();
module_param(cassini_debug, int, 0);
MODULE_PARM_DESC();
module_param(link_mode, int, 0);
MODULE_PARM_DESC();

/*
 * Work around for a PCS bug in which the link goes down due to the chip
 * being confused and never showing a link status of "up."
 */
#define DEFAULT_LINKDOWN_TIMEOUT
/*
 * Value in seconds, for user input.
 */
static int linkdown_timeout =;
module_param(linkdown_timeout, int, 0);
MODULE_PARM_DESC();

/*
 * value in 'ticks' (units used by jiffies). Set when we init the
 * module because 'HZ' in actually a function call on some flavors of
 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 */
static int link_transition_timeout;



static u16 link_modes[] =;

static const struct pci_device_id cas_pci_tbl[] =;

MODULE_DEVICE_TABLE(pci, cas_pci_tbl);

static void cas_set_link_modes(struct cas *cp);

static inline void cas_lock_tx(struct cas *cp)
{}

/* WTZ: QA was finding deadlock problems with the previous
 * versions after long test runs with multiple cards per machine.
 * See if replacing cas_lock_all with safer versions helps. The
 * symptoms QA is reporting match those we'd expect if interrupts
 * aren't being properly restored, and we fixed a previous deadlock
 * with similar symptoms by using save/restore versions in other
 * places.
 */
#define cas_lock_all_save(cp, flags)

static inline void cas_unlock_tx(struct cas *cp)
{}

#define cas_unlock_all_restore(cp, flags)

static void cas_disable_irq(struct cas *cp, const int ring)
{}

static inline void cas_mask_intr(struct cas *cp)
{}

static void cas_enable_irq(struct cas *cp, const int ring)
{}

static inline void cas_unmask_intr(struct cas *cp)
{}

static inline void cas_entropy_gather(struct cas *cp)
{}

static inline void cas_entropy_reset(struct cas *cp)
{}

/* access to the phy. the following assumes that we've initialized the MIF to
 * be in frame rather than bit-bang mode
 */
static u16 cas_phy_read(struct cas *cp, int reg)
{}

static int cas_phy_write(struct cas *cp, int reg, u16 val)
{}

static void cas_phy_powerup(struct cas *cp)
{}

static void cas_phy_powerdown(struct cas *cp)
{}

/* cp->lock held. note: the last put_page will free the buffer */
static int cas_page_free(struct cas *cp, cas_page_t *page)
{}

#ifdef RX_COUNT_BUFFERS
#define RX_USED_ADD
#define RX_USED_SET
#else
#define RX_USED_ADD(x, y)
#define RX_USED_SET(x, y)
#endif

/* local page allocation routines for the receive buffers. jumbo pages
 * require at least 8K contiguous and 8K aligned buffers.
 */
static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
{}

/* initialize spare pool of rx buffers, but allocate during the open */
static void cas_spare_init(struct cas *cp)
{}

/* used on close. free all the spare buffers. */
static void cas_spare_free(struct cas *cp)
{}

/* replenish spares if needed */
static void cas_spare_recover(struct cas *cp, const gfp_t flags)
{}

/* pull a page from the list. */
static cas_page_t *cas_page_dequeue(struct cas *cp)
{}


static void cas_mif_poll(struct cas *cp, const int enable)
{}

/* Must be invoked under cp->lock */
static void cas_begin_auto_negotiation(struct cas *cp,
				       const struct ethtool_link_ksettings *ep)
{}

/* Must be invoked under cp->lock. */
static int cas_reset_mii_phy(struct cas *cp)
{}

static void cas_saturn_firmware_init(struct cas *cp)
{}

static void cas_saturn_firmware_load(struct cas *cp)
{}


/* phy initialization */
static void cas_phy_init(struct cas *cp)
{}


static int cas_pcs_link_check(struct cas *cp)
{}

static int cas_pcs_interrupt(struct net_device *dev,
			     struct cas *cp, u32 status)
{}

static int cas_txmac_interrupt(struct net_device *dev,
			       struct cas *cp, u32 status)
{}

static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
{}

static void cas_init_rx_dma(struct cas *cp)
{}

static inline void cas_rxc_init(struct cas_rx_comp *rxc)
{}

/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
 * flipping is protected by the fact that the chip will not
 * hand back the same page index while it's being processed.
 */
static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
{}

/* this needs to be changed if we actually use the ENC RX DESC ring */
static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
				 const int index)
{}

static void cas_clean_rxds(struct cas *cp)
{}

static void cas_clean_rxcs(struct cas *cp)
{}

#if 0
/* When we get a RX fifo overflow, the RX unit is probably hung
 * so we do the following.
 *
 * If any part of the reset goes wrong, we return 1 and that causes the
 * whole chip to be reset.
 */
static int cas_rxmac_reset(struct cas *cp)
{
	struct net_device *dev = cp->dev;
	int limit;
	u32 val;

	/* First, reset MAC RX. */
	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
	for (limit = 0; limit < STOP_TRIES; limit++) {
		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
			break;
		udelay(10);
	}
	if (limit == STOP_TRIES) {
		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
		return 1;
	}

	/* Second, disable RX DMA. */
	writel(0, cp->regs + REG_RX_CFG);
	for (limit = 0; limit < STOP_TRIES; limit++) {
		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
			break;
		udelay(10);
	}
	if (limit == STOP_TRIES) {
		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
		return 1;
	}

	mdelay(5);

	/* Execute RX reset command. */
	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
	for (limit = 0; limit < STOP_TRIES; limit++) {
		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
			break;
		udelay(10);
	}
	if (limit == STOP_TRIES) {
		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
		return 1;
	}

	/* reset driver rx state */
	cas_clean_rxds(cp);
	cas_clean_rxcs(cp);

	/* Now, reprogram the rest of RX unit. */
	cas_init_rx_dma(cp);

	/* re-enable */
	val = readl(cp->regs + REG_RX_CFG);
	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
	val = readl(cp->regs + REG_MAC_RX_CFG);
	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
	return 0;
}
#endif

static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
			       u32 status)
{}

static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
			     u32 status)
{}


/* Must be invoked under cp->lock. */
static inline int cas_mdio_link_not_up(struct cas *cp)
{}


/* must be invoked with cp->lock held */
static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
{}

static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
			     u32 status)
{}

static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
			     u32 status)
{}

/* All non-normal interrupt conditions get serviced here.
 * Returns non-zero if we should just exit the interrupt
 * handler right now (ie. if we reset the card which invalidates
 * all of the other original irq status bits).
 */
static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
			    u32 status)
{}

/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
 *       determining whether to do a netif_stop/wakeup
 */
#define CAS_TABORT(x)
#define CAS_ROUND_PAGE(x)
static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
				  const int len)
{}

static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
{}

static void cas_tx(struct net_device *dev, struct cas *cp,
		   u32 status)
{}


static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
			      int entry, const u64 *words,
			      struct sk_buff **skbref)
{}


/* we can handle up to 64 rx flows at a time. we do the same thing
 * as nonreassm except that we batch up the buffers.
 * NOTE: we currently just treat each flow as a bunch of packets that
 *       we pass up. a better way would be to coalesce the packets
 *       into a jumbo packet. to do that, we need to do the following:
 *       1) the first packet will have a clean split between header and
 *          data. save both.
 *       2) each time the next flow packet comes in, extend the
 *          data length and merge the checksums.
 *       3) on flow release, fix up the header.
 *       4) make sure the higher layer doesn't care.
 * because packets get coalesced, we shouldn't run into fragment count
 * issues.
 */
static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
				   struct sk_buff *skb)
{}

/* put rx descriptor back on ring. if a buffer is in use by a higher
 * layer, this will need to put in a replacement.
 */
static void cas_post_page(struct cas *cp, const int ring, const int index)
{}


/* only when things are bad */
static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
{}


/* process a completion ring. packets are set up in three basic ways:
 * small packets: should be copied header + data in single buffer.
 * large packets: header and data in a single buffer.
 * split packets: header in a separate buffer from data.
 *                data may be in multiple pages. data may be > 256
 *                bytes but in a single page.
 *
 * NOTE: RX page posting is done in this routine as well. while there's
 *       the capability of using multiple RX completion rings, it isn't
 *       really worthwhile due to the fact that the page posting will
 *       force serialization on the single descriptor ring.
 */
static int cas_rx_ringN(struct cas *cp, int ring, int budget)
{}


/* put completion entries back on the ring */
static void cas_post_rxcs_ringN(struct net_device *dev,
				struct cas *cp, int ring)
{}



/* cassini can use all four PCI interrupts for the completion ring.
 * rings 3 and 4 are identical
 */
#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
static inline void cas_handle_irqN(struct net_device *dev,
				   struct cas *cp, const u32 status,
				   const int ring)
{
	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
		cas_post_rxcs_ringN(dev, cp, ring);
}

static irqreturn_t cas_interruptN(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct cas *cp = netdev_priv(dev);
	unsigned long flags;
	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));

	/* check for shared irq */
	if (status == 0)
		return IRQ_NONE;

	spin_lock_irqsave(&cp->lock, flags);
	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
		cas_mask_intr(cp);
		napi_schedule(&cp->napi);
#else
		cas_rx_ringN(cp, ring, 0);
#endif
		status &= ~INTR_RX_DONE_ALT;
	}

	if (status)
		cas_handle_irqN(dev, cp, status, ring);
	spin_unlock_irqrestore(&cp->lock, flags);
	return IRQ_HANDLED;
}
#endif

#ifdef USE_PCI_INTB
/* everything but rx packets */
static inline void cas_handle_irq1(struct cas *cp, const u32 status)
{
	if (status & INTR_RX_BUF_UNAVAIL_1) {
		/* Frame arrived, no free RX buffers available.
		 * NOTE: we can get this on a link transition. */
		cas_post_rxds_ringN(cp, 1, 0);
		spin_lock(&cp->stat_lock[1]);
		cp->net_stats[1].rx_dropped++;
		spin_unlock(&cp->stat_lock[1]);
	}

	if (status & INTR_RX_BUF_AE_1)
		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
				    RX_AE_FREEN_VAL(1));

	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
		cas_post_rxcs_ringN(cp, 1);
}

/* ring 2 handles a few more events than 3 and 4 */
static irqreturn_t cas_interrupt1(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct cas *cp = netdev_priv(dev);
	unsigned long flags;
	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));

	/* check for shared interrupt */
	if (status == 0)
		return IRQ_NONE;

	spin_lock_irqsave(&cp->lock, flags);
	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
		cas_mask_intr(cp);
		napi_schedule(&cp->napi);
#else
		cas_rx_ringN(cp, 1, 0);
#endif
		status &= ~INTR_RX_DONE_ALT;
	}
	if (status)
		cas_handle_irq1(cp, status);
	spin_unlock_irqrestore(&cp->lock, flags);
	return IRQ_HANDLED;
}
#endif

static inline void cas_handle_irq(struct net_device *dev,
				  struct cas *cp, const u32 status)
{}

static irqreturn_t cas_interrupt(int irq, void *dev_id)
{}


#ifdef USE_NAPI
static int cas_poll(struct napi_struct *napi, int budget)
{
	struct cas *cp = container_of(napi, struct cas, napi);
	struct net_device *dev = cp->dev;
	int i, enable_intr, credits;
	u32 status = readl(cp->regs + REG_INTR_STATUS);
	unsigned long flags;

	spin_lock_irqsave(&cp->lock, flags);
	cas_tx(dev, cp, status);
	spin_unlock_irqrestore(&cp->lock, flags);

	/* NAPI rx packets. we spread the credits across all of the
	 * rxc rings
	 *
	 * to make sure we're fair with the work we loop through each
	 * ring N_RX_COMP_RING times with a request of
	 * budget / N_RX_COMP_RINGS
	 */
	enable_intr = 1;
	credits = 0;
	for (i = 0; i < N_RX_COMP_RINGS; i++) {
		int j;
		for (j = 0; j < N_RX_COMP_RINGS; j++) {
			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
			if (credits >= budget) {
				enable_intr = 0;
				goto rx_comp;
			}
		}
	}

rx_comp:
	/* final rx completion */
	spin_lock_irqsave(&cp->lock, flags);
	if (status)
		cas_handle_irq(dev, cp, status);

#ifdef USE_PCI_INTB
	if (N_RX_COMP_RINGS > 1) {
		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
		if (status)
			cas_handle_irq1(dev, cp, status);
	}
#endif

#ifdef USE_PCI_INTC
	if (N_RX_COMP_RINGS > 2) {
		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
		if (status)
			cas_handle_irqN(dev, cp, status, 2);
	}
#endif

#ifdef USE_PCI_INTD
	if (N_RX_COMP_RINGS > 3) {
		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
		if (status)
			cas_handle_irqN(dev, cp, status, 3);
	}
#endif
	spin_unlock_irqrestore(&cp->lock, flags);
	if (enable_intr) {
		napi_complete(napi);
		cas_unmask_intr(cp);
	}
	return credits;
}
#endif

#ifdef CONFIG_NET_POLL_CONTROLLER
static void cas_netpoll(struct net_device *dev)
{}
#endif

static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
{}

static inline int cas_intme(int ring, int entry)
{}


static void cas_write_txd(struct cas *cp, int ring, int entry,
			  dma_addr_t mapping, int len, u64 ctrl, int last)
{}

static inline void *tx_tiny_buf(struct cas *cp, const int ring,
				const int entry)
{}

static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
				     const int entry, const int tentry)
{}

static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
				    struct sk_buff *skb)
{}

static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
{}

static void cas_init_tx_dma(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static inline void cas_init_dma(struct cas *cp)
{}

static void cas_process_mc_list(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{}

/* must be invoked under cp->stat_lock[N_TX_RINGS] */
static void cas_clear_mac_err(struct cas *cp)
{}


static void cas_mac_reset(struct cas *cp)
{}


/* Must be invoked under cp->lock. */
static void cas_init_mac(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static void cas_init_pause_thresholds(struct cas *cp)
{}

static int cas_vpd_match(const void __iomem *p, const char *str)
{}


/* get the mac address by reading the vpd information in the rom.
 * also get the phy type and determine if there's an entropy generator.
 * NOTE: this is a bit convoluted for the following reasons:
 *  1) vpd info has order-dependent mac addresses for multinic cards
 *  2) the only way to determine the nic order is to use the slot
 *     number.
 *  3) fiber cards don't have bridges, so their slot numbers don't
 *     mean anything.
 *  4) we don't actually know we have a fiber card until after
 *     the mac addresses are parsed.
 */
static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
			    const int offset)
{}

/* check pci invariants */
static void cas_check_pci_invariants(struct cas *cp)
{}


static int cas_check_invariants(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static inline void cas_start_dma(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
				   int *pause)
{}

/* Must be invoked under cp->lock. */
static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
				   int *pause)
{}

/* A link-up condition has occurred, initialize and enable the
 * rest of the chip.
 *
 * Must be invoked under cp->lock.
 */
static void cas_set_link_modes(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static void cas_init_hw(struct cas *cp, int restart_link)
{}

/* Must be invoked under cp->lock. on earlier cassini boards,
 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
 * let it settle out, and then restore pci state.
 */
static void cas_hard_reset(struct cas *cp)
{}


static void cas_global_reset(struct cas *cp, int blkflag)
{}

static void cas_reset(struct cas *cp, int blkflag)
{}

/* Shut down the chip, must be called with pm_mutex held.  */
static void cas_shutdown(struct cas *cp)
{}

static int cas_change_mtu(struct net_device *dev, int new_mtu)
{}

static void cas_clean_txd(struct cas *cp, int ring)
{}

/* freed on close */
static inline void cas_free_rx_desc(struct cas *cp, int ring)
{}

static void cas_free_rxds(struct cas *cp)
{}

/* Must be invoked under cp->lock. */
static void cas_clean_rings(struct cas *cp)
{}

/* allocated on open */
static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
{}

static int cas_alloc_rxds(struct cas *cp)
{}

static void cas_reset_task(struct work_struct *work)
{}

static void cas_link_timer(struct timer_list *t)
{}

/* tiny buffers are used to avoid target abort issues with
 * older cassini's
 */
static void cas_tx_tiny_free(struct cas *cp)
{}

static int cas_tx_tiny_alloc(struct cas *cp)
{}


static int cas_open(struct net_device *dev)
{}

static int cas_close(struct net_device *dev)
{}

static struct {} ethtool_cassini_statnames[] =;
#define CAS_NUM_STAT_KEYS

static struct {} ethtool_register_table[] =;
#define CAS_REG_LEN
#define CAS_MAX_REGS

static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
{}

static struct net_device_stats *cas_get_stats(struct net_device *dev)
{}


static void cas_set_multicast(struct net_device *dev)
{}

static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{}

static int cas_get_link_ksettings(struct net_device *dev,
				  struct ethtool_link_ksettings *cmd)
{}

static int cas_set_link_ksettings(struct net_device *dev,
				  const struct ethtool_link_ksettings *cmd)
{}

static int cas_nway_reset(struct net_device *dev)
{}

static u32 cas_get_link(struct net_device *dev)
{}

static u32 cas_get_msglevel(struct net_device *dev)
{}

static void cas_set_msglevel(struct net_device *dev, u32 value)
{}

static int cas_get_regs_len(struct net_device *dev)
{}

static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
			     void *p)
{}

static int cas_get_sset_count(struct net_device *dev, int sset)
{}

static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{}

static void cas_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *estats, u64 *data)
{}

static const struct ethtool_ops cas_ethtool_ops =;

static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{}

/* When this chip sits underneath an Intel 31154 bridge, it is the
 * only subordinate device and we can tweak the bridge settings to
 * reflect that fact.
 */
static void cas_program_bridge(struct pci_dev *cas_pdev)
{}

static const struct net_device_ops cas_netdev_ops =;

static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{}

static void cas_remove_one(struct pci_dev *pdev)
{}

static int __maybe_unused cas_suspend(struct device *dev_d)
{}

static int __maybe_unused cas_resume(struct device *dev_d)
{}

static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);

static struct pci_driver cas_driver =;

static int __init cas_init(void)
{}

static void __exit cas_cleanup(void)
{}

module_init();
module_exit(cas_cleanup);