#include <linux/pci.h>
#include "tulip.h"
#include <linux/etherdevice.h>
int tulip_rx_copybreak;
unsigned int tulip_max_interrupt_work;
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
#define MIT_SIZE …
#define MIT_TABLE …
static unsigned int mit_table[MIT_SIZE+1] = …;
#endif
int tulip_refill_rx(struct net_device *dev)
{ … }
#ifdef CONFIG_TULIP_NAPI
void oom_timer(struct timer_list *t)
{ … }
int tulip_poll(struct napi_struct *napi, int budget)
{ … }
#else
static int tulip_rx(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int entry = tp->cur_rx % RX_RING_SIZE;
int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
int received = 0;
if (tulip_debug > 4)
netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
entry, tp->rx_ring[entry].status);
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
entry, status);
if (--rx_work_limit < 0)
break;
pkt_len = ((status >> 16) & 0x7ff) - 4;
if ((status & (RxLengthOver2047 |
RxDescCRCError |
RxDescCollisionSeen |
RxDescRunt |
RxDescDescErr |
RxWholePkt)) != RxWholePkt ||
pkt_len > 1518) {
if ((status & (RxLengthOver2047 |
RxWholePkt)) != RxWholePkt) {
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
netdev_warn(dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
dev->stats.rx_length_errors++;
}
} else {
if (tulip_debug > 2)
netdev_dbg(dev, "Receive error, Rx status %08x\n",
status);
dev->stats.rx_errors++;
if (pkt_len > 1518 ||
(status & RxDescRunt))
dev->stats.rx_length_errors++;
if (status & 0x0004)
dev->stats.rx_frame_errors++;
if (status & 0x0002)
dev->stats.rx_crc_errors++;
if (status & 0x0001)
dev->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb;
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2);
dma_sync_single_for_cpu(&tp->pdev->dev,
tp->rx_buffers[entry].mapping,
pkt_len,
DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
skb_put(skb, pkt_len);
#else
skb_put_data(skb,
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
dma_sync_single_for_device(&tp->pdev->dev,
tp->rx_buffers[entry].mapping,
pkt_len,
DMA_FROM_DEVICE);
} else {
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
dev_err(&dev->dev,
"Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
le32_to_cpu(tp->rx_ring[entry].buffer1),
(long long)tp->rx_buffers[entry].mapping,
skb->head, temp);
}
#endif
dma_unmap_single(&tp->pdev->dev,
tp->rx_buffers[entry].mapping,
PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
received++;
entry = (++tp->cur_rx) % RX_RING_SIZE;
}
return received;
}
#endif
static inline unsigned int phy_interrupt (struct net_device *dev)
{ … }
irqreturn_t tulip_interrupt(int irq, void *dev_instance)
{ … }