linux/drivers/net/ethernet/dec/tulip/interrupt.c

/*
	drivers/net/ethernet/dec/tulip/interrupt.c

	Copyright 2000,2001  The Linux Kernel Team
	Written/copyright 1994-2001 by Donald Becker.

	This software may be used and distributed according to the terms
	of the GNU General Public License, incorporated herein by reference.

        Please submit bugs to http://bugzilla.kernel.org/ .
*/

#include <linux/pci.h>
#include "tulip.h"
#include <linux/etherdevice.h>

int tulip_rx_copybreak;
unsigned int tulip_max_interrupt_work;

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
#define MIT_SIZE
#define MIT_TABLE

static unsigned int mit_table[MIT_SIZE+1] =;
#endif


int tulip_refill_rx(struct net_device *dev)
{}

#ifdef CONFIG_TULIP_NAPI

void oom_timer(struct timer_list *t)
{}

int tulip_poll(struct napi_struct *napi, int budget)
{}

#else /* CONFIG_TULIP_NAPI */

static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
			   entry, tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
		short pkt_len;

		if (tulip_debug > 5)
			netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
				   entry, status);
		if (--rx_work_limit < 0)
			break;

		/*
		  Omit the four octet CRC from the length.
		  (May not be considered valid until we have
		  checked status for RxLengthOver2047 bits)
		*/
		pkt_len = ((status >> 16) & 0x7ff) - 4;
		/*
		  Maximum pkt_len is 1518 (1514 + vlan header)
		  Anything higher than this is always invalid
		  regardless of RxLengthOver2047 bits
		*/

		if ((status & (RxLengthOver2047 |
			       RxDescCRCError |
			       RxDescCollisionSeen |
			       RxDescRunt |
			       RxDescDescErr |
			       RxWholePkt))        != RxWholePkt ||
		    pkt_len > 1518) {
			if ((status & (RxLengthOver2047 |
			     RxWholePkt))         != RxWholePkt) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						netdev_warn(dev,
							    "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
							    status);
					dev->stats.rx_length_errors++;
				}
			} else {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					netdev_dbg(dev, "Receive error, Rx status %08x\n",
						   status);
				dev->stats.rx_errors++; /* end of a packet.*/
				if (pkt_len > 1518 ||
				    (status & RxDescRunt))
					dev->stats.rx_length_errors++;
				if (status & 0x0004)
					dev->stats.rx_frame_errors++;
				if (status & 0x0002)
					dev->stats.rx_crc_errors++;
				if (status & 0x0001)
					dev->stats.rx_fifo_errors++;
			}
		} else {
			struct sk_buff *skb;

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak &&
			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				dma_sync_single_for_cpu(&tp->pdev->dev,
							tp->rx_buffers[entry].mapping,
							pkt_len,
							DMA_FROM_DEVICE);
#if ! defined(__alpha__)
				skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
						 pkt_len);
				skb_put(skb, pkt_len);
#else
				skb_put_data(skb,
					     tp->rx_buffers[entry].skb->data,
					     pkt_len);
#endif
				dma_sync_single_for_device(&tp->pdev->dev,
							   tp->rx_buffers[entry].mapping,
							   pkt_len,
							   DMA_FROM_DEVICE);
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					dev_err(&dev->dev,
						"Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
						le32_to_cpu(tp->rx_ring[entry].buffer1),
						(long long)tp->rx_buffers[entry].mapping,
						skb->head, temp);
				}
#endif

				dma_unmap_single(&tp->pdev->dev,
						 tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, DMA_FROM_DEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);

			netif_rx(skb);

			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}
#endif  /* CONFIG_TULIP_NAPI */

static inline unsigned int phy_interrupt (struct net_device *dev)
{}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
irqreturn_t tulip_interrupt(int irq, void *dev_instance)
{}