[Click] error when compile click with kernel2.4.20
Eddie Kohler
kohler at icir.org
Fri Jul 11 17:43:28 EDT 2003
Hi Zheng,
Sorry, I misspoke; the README mentions 2.2 kernels, not the 2.4. I've
attached the patch currently in our anonymous CVS repository for 2.4.20.
Eddie
-------------- next part --------------
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/drivers/net/eepro100.c ./drivers/net/eepro100.c
--- ../linux-2.4.20-pure/drivers/net/eepro100.c 2002-11-28 15:53:13.000000000 -0800
+++ ./drivers/net/eepro100.c 2003-01-22 17:41:40.000000000 -0800
@@ -544,6 +544,16 @@
static void set_rx_mode(struct net_device *dev);
static void speedo_show_state(struct net_device *dev);
+/* device polling stuff */
+static int speedo_tx_queue(struct net_device *dev, struct sk_buff *skb);
+static int speedo_tx_eob(struct net_device *dev);
+static int speedo_tx_start(struct net_device *dev);
+static int speedo_rx_refill(struct net_device *dev, struct sk_buff **);
+static struct sk_buff *speedo_tx_clean(struct net_device *dev);
+static struct sk_buff *speedo_rx_poll(struct net_device *dev, int *want);
+static int speedo_poll_on(struct net_device *dev);
+static int speedo_poll_off(struct net_device *dev);
+
#ifdef honor_default_port
@@ -874,6 +884,17 @@
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &speedo_ioctl;
+ /* Click: polling support */
+ dev->polling = 0;
+ dev->poll_on = &speedo_poll_on;
+ dev->poll_off = &speedo_poll_off;
+ dev->rx_poll = &speedo_rx_poll;
+ dev->rx_refill = &speedo_rx_refill;
+ dev->tx_queue = &speedo_tx_queue;
+ dev->tx_clean = &speedo_tx_clean;
+ dev->tx_start = &speedo_tx_start;
+ dev->tx_eob = &speedo_tx_eob;
+
return 0;
}
@@ -1125,7 +1146,8 @@
ioaddr + SCBPointer);
/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
remain masked --Dragan */
- outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+ outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl |
+ (dev->polling ? SCBMaskAll : 0), ioaddr + SCBCmd);
}
/*
@@ -1390,7 +1412,8 @@
dev->name);
outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
ioaddr + SCBPointer);
- outw(CUStart, ioaddr + SCBCmd);
+ outw(CUStart | (dev->polling ? SCBMaskAll : 0),
+ ioaddr + SCBCmd);
reset_mii(dev);
} else {
#else
@@ -1437,6 +1460,14 @@
/* Prevent interrupts from changing the Tx ring from underneath us. */
unsigned long flags;
+#if 0
+ if (dev->polling)
+ {
+ printk(KERN_ERR "%s: start_xmit while polling\n", dev->name);
+ return 1;
+ }
+#endif
+
spin_lock_irqsave(&sp->lock, flags);
/* Check if there are enough space. */
@@ -1494,7 +1525,6 @@
spin_unlock_irqrestore(&sp->lock, flags);
dev->trans_start = jiffies;
-
return 0;
}
@@ -1503,6 +1533,12 @@
unsigned int dirty_tx;
struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ if (dev->polling) {
+ printk(KERN_ERR "%s: speedo_tx_buffer_gc while polling\n",
+ dev->name);
+ return;
+ }
+
dirty_tx = sp->dirty_tx;
while ((int)(sp->cur_tx - dirty_tx) > 0) {
int entry = dirty_tx % TX_RING_SIZE;
@@ -1566,6 +1602,11 @@
long ioaddr, boguscnt = max_interrupt_work;
unsigned short status;
+#if 0
+ if (dev->polling)
+ printk(KERN_ERR "%s: interrupt while polling\n", dev->name);
+#endif
+
ioaddr = dev->base_addr;
sp = (struct speedo_private *)dev->priv;
@@ -1594,13 +1635,15 @@
break;
- if ((status & 0x5000) || /* Packet received, or Rx error. */
- (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+ if (!dev->polling &&
+ ((status & 0x5000) || /* Packet received, or Rx error. */
+ (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed))
/* Need to gather the postponed packet. */
speedo_rx(dev);
/* Always check if all rx buffers are allocated. --SAW */
- speedo_refill_rx_buffers(dev, 0);
+ if (!dev->polling)
+ speedo_refill_rx_buffers(dev, 0);
spin_lock(&sp->lock);
/*
@@ -1625,7 +1668,7 @@
/* User interrupt, Command/Tx unit interrupt or CU not active. */
- if (status & 0xA400) {
+ if (!dev->polling && (status & 0xA400)) {
speedo_tx_buffer_gc(dev);
if (sp->tx_full
&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
@@ -1743,6 +1786,12 @@
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ if (dev->polling) {
+ printk(KERN_ERR "%s: speedo_refill_rx_buffers called "
+ "while polling\n", dev->name);
+ return;
+ }
+
/* Refill the RX ring. */
while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
speedo_refill_rx_buf(dev, force) != -1);
@@ -1759,6 +1808,12 @@
if (netif_msg_intr(sp))
printk(KERN_DEBUG " In speedo_rx().\n");
+ if (dev->polling) {
+ printk(KERN_ERR "%s: In speedo_rx() while polling.\n",
+ dev->name);
+ return 0;
+ }
+
/* If we own the next entry, it's a new packet. Send it up. */
while (sp->rx_ringp[entry] != NULL) {
int status;
@@ -2449,3 +2504,368 @@
* tab-width: 4
* End:
*/
+
+/*
+ * Click: Polling extensions. Most of this code has been copied
+ * from various routines above with slight modifications.
+ */
+
+static int speedo_rx_refill(struct net_device *dev, struct sk_buff **skbs) {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ struct sk_buff *skb_list;
+ int dirty_rx = sp->dirty_rx;
+
+ /* If the list is empty, return the number of skb's we want */
+ if (skbs == 0)
+ return sp->cur_rx - sp->dirty_rx;
+
+ skb_list = *skbs;
+
+ /*
+ * Refill the RX ring with supplied skb's. Unlike
+ * speedo_refill_rx_buf routine, we don't have to
+ * worry about failed allocations.
+ */
+ while ((int)(sp->cur_rx - sp->dirty_rx) > 0 && skb_list) {
+ int entry;
+ struct RxFD *rxf;
+ struct sk_buff *skb;
+
+ entry = sp->dirty_rx % RX_RING_SIZE;
+ if (sp->rx_skbuff[entry] == NULL) {
+ skb = skb_list;
+ skb_list = skb->next;
+ skb->prev = skb->next = NULL;
+ skb->list = NULL;
+
+ sp->rx_skbuff[entry] = skb;
+ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ sp->rx_ring_dma[entry] = pci_map_single(sp->pdev, rxf,
+ PKT_BUF_SZ + sizeof(struct RxFD),
+ PCI_DMA_FROMDEVICE);
+
+ skb->dev = dev;
+ skb_reserve(skb, sizeof(struct RxFD));
+ rxf->rx_buf_addr = 0xffffffff;
+ pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD),
+ PCI_DMA_TODEVICE);
+ } else {
+ rxf = sp->rx_ringp[entry];
+ }
+ speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
+ sp->dirty_rx++;
+ }
+
+ /*
+ * Check if the RU is stopped -- restart it, if so.
+ */
+ if ((inw(dev->base_addr + SCBStatus) & 0x003c) == 0x0008) {
+ wait_for_cmd_done(dev);
+
+ /*
+ * If the RU stopped, it's because there aren't
+ * any DMA buffers left, so the first DMA buffer
+ * we've just refilled is where we should start
+ * receiving.
+ */
+ outl(virt_to_bus(sp->rx_ringp[dirty_rx % RX_RING_SIZE]),
+ dev->base_addr + SCBPointer);
+ outb(RxStart, dev->base_addr + SCBCmd);
+ }
+
+ /*
+ * Clear error flags on the RX ring, write back the remaining
+ * skb's that we haven't used, and return the number of dirty
+ * buffers remaining.
+ */
+ sp->rx_ring_state &= ~(RrNoMem|RrOOMReported);
+ *skbs = skb_list;
+ return sp->cur_rx - sp->dirty_rx;
+}
+
+static struct sk_buff *speedo_rx_poll(struct net_device *dev, int *want) {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int entry = sp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+ struct sk_buff *skb_head, *skb_last;
+ int got = 0;
+
+ skb_head = skb_last = NULL;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (sp->rx_ringp[entry] != NULL) {
+ int status;
+ int pkt_len;
+
+ pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+ status = le32_to_cpu(sp->rx_ringp[entry]->status);
+ pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+
+ if (!(status & RxComplete))
+ break;
+
+ if (--rx_work_limit < 0 || got == *want)
+ break;
+
+ /* Check for a rare out-of-memory case: the current buffer is
+ the last buffer allocated in the RX ring. --SAW */
+ if (sp->last_rxf == sp->rx_ringp[entry]) {
+ /*
+ * Postpone the packet. It'll be reaped next time
+ * when this packet is no longer the last packet
+ * in the ring.
+ */
+ if (netif_msg_rx_err(sp))
+ printk(KERN_DEBUG "%s: RX packet postponed!\n",
+ dev->name);
+ sp->rx_ring_state |= RrPostponed;
+ break;
+ }
+
+ if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+ if (status & RxErrTooBig) {
+ printk(KERN_ERR "%s: Ethernet frame overran "
+ "the Rx buffer, status %8.8x!\n",
+ dev->name, status);
+ } else if (! (status & RxOK)) {
+ /*
+ * There was a fatal error. This *should*
+ * be impossible.
+ */
+ sp->stats.rx_errors++;
+ printk(KERN_ERR "%s: Anomalous event in "
+ "speedo_rx_poll(), status %8.8x.\n",
+ dev->name, status);
+ }
+ } else {
+ struct sk_buff *skb = sp->rx_skbuff[entry];
+
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx "
+ "descriptor chain.\n", dev->name);
+ break;
+ }
+
+ /* Remove skbuff from RX ring. */
+ sp->rx_skbuff[entry] = NULL;
+ sp->rx_ringp[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
+ PKT_BUF_SZ + sizeof(struct RxFD),
+ PCI_DMA_FROMDEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ sp->stats.rx_packets++;
+ sp->stats.rx_bytes += pkt_len;
+
+ /* Append the skb to the received list */
+ if (got == 0) {
+ skb_head = skb_last = skb;
+ skb->next = skb->prev = NULL;
+ } else {
+ skb_last->next = skb;
+ skb->prev = skb_last;
+ skb->next = NULL;
+ skb_last = skb;
+ }
+
+ got++;
+ }
+
+ entry = (++sp->cur_rx) % RX_RING_SIZE;
+ sp->rx_ring_state &= ~RrPostponed;
+ }
+
+ if (got == 0 && (inw(dev->base_addr + SCBStatus) & 0x003c) == 0x0008) {
+ wait_for_cmd_done(dev);
+
+ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ dev->base_addr + SCBPointer);
+ outb(RxStart, dev->base_addr + SCBCmd);
+ }
+
+ sp->last_rx_time = jiffies;
+ *want = got;
+ return skb_head;
+}
+
+static int speedo_tx_queue(struct net_device *dev, struct sk_buff *skb) {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int entry;
+
+ unsigned flags;
+ spin_lock_irqsave(&sp->lock, flags);
+
+ /* Check if there are enough space. */
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n",
+ dev->name);
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return 1;
+ }
+
+ /* Calculate the Tx descriptor entry. */
+ entry = sp->cur_tx++ % TX_RING_SIZE;
+
+ sp->tx_skbuff[entry] = skb;
+ sp->tx_ring[entry].status =
+ cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+ sp->tx_ring[entry].link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+ sp->tx_ring[entry].tx_desc_addr =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+
+ /* The data region is always in one buffer descriptor. */
+ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+ sp->tx_ring[entry].tx_buf_addr0 =
+ cpu_to_le32(pci_map_single(sp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+
+ /* Clear the suspend bit on the last command */
+ clear_suspend(sp->last_cmd);
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ /* Leave room for set_rx_mode(). If there is no more space than
+ * reserved for multicast filter mark the ring as full.
+ */
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ }
+
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return 0;
+}
+
+static int speedo_tx_eob(struct net_device *dev)
+{
+ /* benjie: not sure what this is used for... */
+ // wait_for_cmd_done(dev);
+
+ /* benjie: i suspect this won't cause a race condition because eob
+ * is called right after the last tx_queue and also we batch a
+ * bunch of packets, so tx is probably not going to be as fast as
+ * we are. */
+ outb(CUResume, dev->base_addr + SCBCmd);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static int speedo_tx_start(struct net_device *dev) {
+ printk("hard tx_start\n");
+ /* must have been suspended before the last queued DMA ring, so
+ * this mindless CUResume is probably okay */
+ outb(CUResume, dev->base_addr + SCBCmd);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static struct sk_buff *speedo_tx_clean(struct net_device *dev) {
+ unsigned int dirty_tx;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ struct sk_buff *skb_head, *skb_last;
+
+ skb_head = skb_last = NULL;
+ dirty_tx = sp->dirty_tx;
+ while ((int)(sp->cur_tx - dirty_tx) > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+ if ((status & StatusComplete) == 0)
+ break; /* It still hasn't been processed. */
+
+ if (status & TxUnderrun)
+ if (sp->tx_threshold < 0x01e08000) {
+ if (netif_msg_tx_err(sp))
+ printk(KERN_DEBUG "%s: TX underrun, "
+ "threshold adjusted.\n",
+ dev->name);
+ sp->tx_threshold += 0x00040000;
+ }
+
+ /* Put the original skb on the return list. */
+ if (sp->tx_skbuff[entry]) {
+ struct sk_buff *skb = sp->tx_skbuff[entry];
+
+ sp->stats.tx_packets++; /* Count only user packets. */
+ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+ sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ sp->tx_skbuff[entry] = 0;
+
+ if (skb_head == NULL) {
+ skb_head = skb_last = skb;
+ skb->next = skb->prev = NULL;
+ } else {
+ skb_last->next = skb;
+ skb->prev = skb_last;
+ skb->next = NULL;
+ skb_last = skb;
+ }
+ }
+ dirty_tx++;
+ }
+
+ if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
+ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+ " full=%d.\n",
+ dirty_tx, sp->cur_tx, sp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+
+ while (sp->mc_setup_head != NULL
+ && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
+ struct speedo_mc_block *t;
+ if (netif_msg_tx_err(sp))
+ printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+ pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
+ sp->mc_setup_head->len, PCI_DMA_TODEVICE);
+ t = sp->mc_setup_head->next;
+ kfree(sp->mc_setup_head);
+ sp->mc_setup_head = t;
+ }
+ if (sp->mc_setup_head == NULL)
+ sp->mc_setup_tail = NULL;
+
+ sp->dirty_tx = dirty_tx;
+
+ if (sp->tx_full && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+ /* The ring is no longer full. */
+ sp->tx_full = 0;
+ netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
+ }
+ return skb_head;
+}
+
+static int speedo_poll_on(struct net_device *dev) {
+ long ioaddr = dev->base_addr;
+
+ if (dev->polling == 0) {
+ /* Mask all interrupts */
+ outw(SCBMaskAll, ioaddr + SCBCmd);
+
+ dev->polling = 2;
+ }
+
+ return 0;
+}
+
+static int speedo_poll_off(struct net_device *dev) {
+ long ioaddr = dev->base_addr;
+
+ if (dev->polling > 0) {
+ /* Enable interrupts */
+ outw(0, ioaddr + SCBCmd);
+
+ dev->polling = 0;
+ }
+
+ return 0;
+}
+
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/drivers/net/tg3.c ./drivers/net/tg3.c
--- ../linux-2.4.20-pure/drivers/net/tg3.c 2002-11-28 15:53:14.000000000 -0800
+++ ./drivers/net/tg3.c 2003-01-22 17:26:56.000000000 -0800
@@ -2016,7 +2016,7 @@
desc->err_vlan & RXD_VLAN_MASK);
} else
#endif
- netif_receive_skb(skb);
+ netif_receive_skb(skb, skb->protocol, 0);
tp->dev->last_rx = jiffies;
received++;
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/drivers/net/tulip/interrupt.c ./drivers/net/tulip/interrupt.c
--- ../linux-2.4.20-pure/drivers/net/tulip/interrupt.c 2002-11-28 15:53:14.000000000 -0800
+++ ./drivers/net/tulip/interrupt.c 2003-01-22 16:34:28.000000000 -0800
@@ -311,6 +311,10 @@
#endif
}
+/* Polling extensions -- interrupt stats */
+void (*tulip_interrupt_hook)(struct net_device *, unsigned);
+
+
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -320,7 +324,6 @@
long ioaddr = dev->base_addr;
int csr5;
int entry;
- int missed;
int rx = 0;
int tx = 0;
int oi = 0;
@@ -328,6 +331,7 @@
int maxtx = TX_RING_SIZE;
int maxoi = TX_RING_SIZE;
unsigned int work_count = tulip_max_interrupt_work;
+ int first_time = 1;
/* Let's see whether the interrupt really is for us */
csr5 = inl(ioaddr + CSR5);
@@ -341,14 +345,33 @@
tp->nir++;
do {
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
+ if (dev->polling > 0)
+ goto out;
+ if (first_time)
+ goto out;
+ else
+ break;
+ }
+ first_time = 0;
+
/* Acknowledge all of the current interrupt sources ASAP. */
outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+ /* Notify tulip_interrupt_hook */
+ if (tulip_interrupt_hook)
+ tulip_interrupt_hook(dev, CSR5);
+
+ if (dev->polling > 0) {
+ if ((csr5 & (TxDied|TimerInt|AbnormalIntr)) == 0)
+ goto out;
+ }
+
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
dev->name, csr5, inl(dev->base_addr + CSR5));
- if (csr5 & (RxIntr | RxNoBuf)) {
+ if ((csr5 & (RxIntr | RxNoBuf)) && (dev->polling == 0)) {
#ifdef CONFIG_NET_HW_FLOWCONTROL
if ((!tp->fc_bit) ||
(!test_bit(tp->fc_bit, &netdev_fc_xoff)))
@@ -357,7 +380,13 @@
tulip_refill_rx(dev);
}
- if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+ if ((csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) &&
+ (dev->polling == 0)) {
+ /*
+ * part of the following code is duplicated at the end
+ * in tulip_tx_clean for the polling driver; changes
+ * here should propagate to there as well.
+ */
unsigned int dirty_tx;
spin_lock(&tp->lock);
@@ -425,16 +454,17 @@
netif_wake_queue(dev);
tp->dirty_tx = dirty_tx;
- if (csr5 & TxDied) {
- if (tulip_debug > 2)
- printk(KERN_WARNING "%s: The transmitter stopped."
- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
- dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
- tulip_restart_rxtx(tp);
- }
spin_unlock(&tp->lock);
}
+ if (csr5 & TxDied) { /* XXX move after loop? */
+ if (tulip_debug > 2)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+ tulip_restart_rxtx(tp);
+ }
+
/* Log errors. */
if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
if (csr5 == 0xffffffff)
@@ -456,7 +486,10 @@
}
}
if (csr5 & RxDied) { /* Missed a Rx frame. */
- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ unsigned csr8status = inl(ioaddr + CSR8);
+ unsigned fifostatus = csr8status >> 17;
+ tp->stats.rx_missed_errors += csr8status & 0xffff;
+ tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
#ifdef CONFIG_NET_HW_FLOWCONTROL
if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
tp->stats.rx_errors++;
@@ -547,7 +580,9 @@
csr5 = inl(ioaddr + CSR5);
} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
- tulip_refill_rx(dev);
+ if (dev->polling == 0) {
+ tulip_refill_rx(dev);
+ }
/* check if the card is in suspend mode */
entry = tp->dirty_rx % RX_RING_SIZE;
@@ -570,12 +605,230 @@
}
}
+#if 0
if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
}
+#endif
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
dev->name, inl(ioaddr + CSR5));
+out:
+}
+
+/* Click: polling support routines */
+
+int tulip_rx_refill(struct net_device *dev, struct sk_buff **skbs) {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct sk_buff *skb_list;
+
+ if (skbs == NULL)
+ return tp->cur_rx - tp->dirty_rx;
+
+ skb_list = *skbs;
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0 && skb_list; tp->dirty_rx++) {
+ int entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ /* Grab an skb from the list we were given */
+ skb = skb_list;
+ skb_list = skb_list->next;
+ skb->prev = NULL;
+ skb->next = NULL;
+ skb->list = NULL;
+
+ tp->rx_buffers[entry].skb = skb;
+
+ mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[entry].mapping = mapping;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+ if(tp->chip_id == LC82C168) {
+ if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
+ /* Rx stopped due to out of buffers,
+ * restart it
+ */
+ outl(0x01, dev->base_addr + CSR2);
+ }
+ }
+
+ /* Return the unused skb's */
+ *skbs = skb_list;
+
+ return tp->cur_rx - tp->dirty_rx;
+}
+
+struct sk_buff *tulip_tx_clean(struct net_device *dev) {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct sk_buff *skb_head, *skb_last;
+ unsigned int dirty_tx;
+
+ skb_head = skb_last = 0;
+
+ spin_lock(&tp->lock);
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+ struct sk_buff *skb;
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+ if (status & 0x0200) tp->stats.tx_window_errors++;
+ if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ tp->stats.tx_heartbeat_errors++;
+ } else {
+ tp->stats.tx_bytes +=
+ tp->tx_buffers[entry].skb->len;
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Remove from buffer list */
+ skb = tp->tx_buffers[entry].skb;
+
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+
+ /* Put the skb onto the return list */
+ if (skb_head == 0) {
+ skb_head = skb;
+ skb_last = skb;
+ skb_last->next = NULL;
+ skb_last->prev = NULL;
+ } else {
+ skb_last->next = skb;
+ skb->prev = skb_last;
+ skb->next = NULL;
+ skb_last = skb;
+ }
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+ dev->name, dirty_tx, tp->cur_tx);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+#if 0
+ if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+ netif_wake_queue(dev);
+#endif
+
+ tp->dirty_tx = dirty_tx;
+ spin_unlock(&tp->lock);
+
+ return skb_head;
+}
+
+struct sk_buff *tulip_rx_poll(struct net_device *dev, int *want) {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ struct sk_buff *skb_head, *skb_last;
+ int got = 0;
+
+ skb_head = skb_last = NULL;
+
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+ if (--rx_work_limit < 0 || got == *want) break;
+
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ignore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb = tp->rx_buffers[entry].skb;
+
+ pci_unmap_single(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ tp->stats.rx_packets++;
+ tp->stats.rx_bytes += pkt_len;
+
+ if (got == 0) {
+ skb_head = skb;
+ skb_last = skb;
+ skb->next = skb->prev = NULL;
+ } else {
+ skb_last->next = skb;
+ skb->prev = skb_last;
+ skb->next = NULL;
+ skb_last = skb;
+ }
+ got++;
+ }
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+
+ dev->last_rx = jiffies;
+ *want = got;
+ return skb_head;
}
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/drivers/net/tulip/tulip_core.c ./drivers/net/tulip/tulip_core.c
--- ../linux-2.4.20-pure/drivers/net/tulip/tulip_core.c 2002-11-28 15:53:14.000000000 -0800
+++ ./drivers/net/tulip/tulip_core.c 2003-01-22 16:34:28.000000000 -0800
@@ -264,6 +264,16 @@
static void set_rx_mode(struct net_device *dev);
+/* Click: polling support */
+static int tulip_tx_queue(struct net_device *dev, struct sk_buff *skb);
+static int tulip_tx_eob(struct net_device *dev);
+static int tulip_tx_start(struct net_device *dev);
+int tulip_rx_refill(struct net_device *dev, struct sk_buff **);
+struct sk_buff *tulip_tx_clean(struct net_device *dev);
+struct sk_buff *tulip_rx_poll(struct net_device *dev, int *want);
+static int tulip_poll_on(struct net_device *dev);
+static int tulip_poll_off(struct net_device *dev);
+
static void tulip_set_power_state (struct tulip_private *tp,
int sleep, int snooze)
@@ -709,6 +719,17 @@
}
static int
+tulip_tx_start(struct net_device *dev) {
+ /* Trigger an immediate transmit demand unless polling */
+ if (dev->polling <= 0)
+ outl(0, dev->base_addr + CSR1);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int
tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tulip_private *tp = (struct tulip_private *)dev->priv;
@@ -744,13 +765,13 @@
tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
/* if we were using Transmit Automatic Polling, we would need a
* wmb() here. */
+ wmb();
tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
wmb();
tp->cur_tx++;
- /* Trigger an immediate transmit demand. */
- outl(0, dev->base_addr + CSR1);
+ tulip_tx_start(dev);
spin_unlock_irqrestore(&tp->lock, eflags);
@@ -759,6 +780,19 @@
return 0;
}
+static __inline__ unsigned long long
+tulip_get_cycles(void)
+{
+ unsigned long low, high;
+ unsigned long long x;
+
+ __asm__ __volatile__("rdtsc":"=a" (low), "=d" (high));
+ x = high;
+ x <<= 32;
+ x |= low;
+ return(x);
+}
+
static void tulip_clean_tx_ring(struct tulip_private *tp)
{
unsigned int dirty_tx;
@@ -821,8 +855,12 @@
if (tp->chip_id == DC21040)
outl (0x00000004, ioaddr + CSR13);
- if (inl (ioaddr + CSR6) != 0xffffffff)
- tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;
+ if (inl (ioaddr + CSR6) != 0xffffffff) {
+ unsigned csr8status = inl(ioaddr + CSR8);
+ unsigned fifostatus = csr8status >> 17;
+ tp->stats.rx_missed_errors += csr8status & 0xffff;
+ tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
+ }
spin_unlock_irqrestore (&tp->lock, flags);
@@ -901,10 +939,14 @@
if (netif_running(dev)) {
unsigned long flags;
+ unsigned csr8status, fifostatus;
spin_lock_irqsave (&tp->lock, flags);
- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ csr8status = inl(ioaddr + CSR8);
+ fifostatus = csr8status >> 17;
+ tp->stats.rx_missed_errors += csr8status & 0xffff;
+ tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -1722,6 +1764,17 @@
dev->do_ioctl = private_ioctl;
dev->set_multicast_list = set_rx_mode;
+ /* Click polling for this device */
+ dev->polling = 0;
+ dev->rx_poll = tulip_rx_poll;
+ dev->rx_refill = tulip_rx_refill;
+ dev->tx_clean = tulip_tx_clean;
+ dev->tx_queue = tulip_tx_queue;
+ dev->tx_start = tulip_tx_start;
+ dev->tx_eob = tulip_tx_eob;
+ dev->poll_on = tulip_poll_on;
+ dev->poll_off = tulip_poll_off;
+
if (register_netdev(dev))
goto err_out_free_ring;
@@ -1930,3 +1983,113 @@
module_init(tulip_init);
module_exit(tulip_cleanup);
+
+/*
+ * Click polling extensions
+ */
+
+/* Demand polling - the TX DMA engine on some tulip cards can automatically
+ * poll the TX DMA ring for packets; with this feature the driver does not
+ * need to poke the TX DMA engine after packet transmission stopped. however
+ * it seems that on some cards this feature does not work, therefore by
+ * default it is disabled. the eob() function minimizes the number of such
+ * pokes already. */
+
+#define DEMAND_POLLTX 0
+
+static int
+tulip_poll_on(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int csr7;
+#if DEMAND_POLLTX
+ int csr0;
+#endif
+
+ if (dev->polling == 0) {
+ csr7 = inl(ioaddr + CSR7) & ~(NormalIntr|RxNoBuf|\
+ RxIntr|TxIntr|TxNoBuf);
+ outl(csr7, ioaddr + CSR7);
+
+#if DEMAND_POLLTX
+ csr0 = (inl(ioaddr + CSR0) & ~(7<<17)) | (4<<17);
+ outl(csr0, ioaddr + CSR0);
+#endif
+
+ dev->polling = 2;
+ }
+
+ return 0;
+}
+
+static int
+tulip_poll_off(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int csr7;
+#if DEMAND_POLLTX
+ int csr0;
+#endif
+
+ if (dev->polling > 0) {
+ csr7 = inl(ioaddr + CSR7) | (NormalIntr|RxNoBuf|\
+ RxIntr|TxIntr|TxNoBuf);
+ outl(csr7, ioaddr + CSR7);
+
+#if DEMAND_POLLTX
+ csr0 = inl(ioaddr + CSR0) & ~(7<<17);
+ outl(csr0, ioaddr + CSR0);
+#endif
+
+ dev->polling = 0;
+ }
+
+ return 0;
+}
+
+static int tulip_tx_queue(struct net_device *dev, struct sk_buff *skb) {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry;
+ u32 flag;
+ dma_addr_t mapping;
+
+ spin_lock_irq(&tp->lock);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_buffers[entry].skb = skb;
+ mapping = pci_map_single(tp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ tp->tx_buffers[entry].mapping = mapping;
+ tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+ flag = 0x60000000; /* No interrupt */
+
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ /* if we were using Transmit Automatic Polling, we would need a
+ * wmb() here. */
+ wmb();
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ wmb();
+
+ tp->cur_tx++;
+
+ /* If we've almost filled up the transmit ring, signal busy */
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static int tulip_tx_eob(struct net_device *dev) {
+ outl(0, dev->base_addr + CSR1);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/fs/proc/inode.c ./fs/proc/inode.c
--- ../linux-2.4.20-pure/fs/proc/inode.c 2001-11-17 11:24:32.000000000 -0800
+++ ./fs/proc/inode.c 2003-01-22 16:34:29.000000000 -0800
@@ -147,6 +147,11 @@
if (!inode)
goto out_fail;
+ /* Click change: don't double-increment de's use count if the inode
+ * existed already */
+ if (inode->u.generic_ip == (void *) de)
+ de_put(de);
+
inode->u.generic_ip = (void *) de;
if (de) {
if (de->mode) {
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-alpha/core_tsunami.h ./include/asm-alpha/core_tsunami.h
--- ../linux-2.4.20-pure/include/asm-alpha/core_tsunami.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-alpha/core_tsunami.h 2003-01-22 16:34:29.000000000 -0800
@@ -281,8 +281,7 @@
/*
* Data structure for handling TSUNAMI machine checks:
*/
-struct el_TSUNAMI_sysdata_mcheck {
-};
+EMPTY_STRUCT_DECL(el_TSUNAMI_sysdata_mcheck);
#ifdef __KERNEL__
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-arm/mmu.h ./include/asm-arm/mmu.h
--- ../linux-2.4.20-pure/include/asm-arm/mmu.h 2000-12-29 14:07:23.000000000 -0800
+++ ./include/asm-arm/mmu.h 2003-02-07 17:03:43.000000000 -0800
@@ -2,6 +2,6 @@
#define __ARM_MMU_H
/* The ARM doesn't have a mmu context */
-typedef struct { } mm_context_t;
+typedef EMPTY_STRUCT_DECL(/* unnamed */) mm_context_t;
#endif
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-cris/io.h ./include/asm-cris/io.h
--- ../linux-2.4.20-pure/include/asm-cris/io.h 2001-10-08 11:43:54.000000000 -0700
+++ ./include/asm-cris/io.h 2003-02-10 10:22:15.000000000 -0800
@@ -24,8 +24,8 @@
({ int _Foofoo; __asm__ volatile ("bmod [%0],%0" : "=r" (_Foofoo) : "0" \
(255)); _Foofoo; })
-#define TRACE_OFF() do { __asm__ volatile ("bmod [%0],%0" :: "r" (254)); } while (0)
-#define SIM_END() do { __asm__ volatile ("bmod [%0],%0" :: "r" (28)); } while (0)
+#define TRACE_OFF() do { __asm__ volatile ("bmod [%0],%0" : : "r" (254)); } while (0)
+#define SIM_END() do { __asm__ volatile ("bmod [%0],%0" : : "r" (28)); } while (0)
#define CRIS_CYCLES() __extension__ \
({ unsigned long c; asm ("bmod [%1],%0" : "=r" (c) : "r" (27)); c;})
#else /* ! defined CONFIG_SVINTO_SIM */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-i386/desc.h ./include/asm-i386/desc.h
--- ../linux-2.4.20-pure/include/asm-i386/desc.h 2001-07-26 13:40:32.000000000 -0700
+++ ./include/asm-i386/desc.h 2003-02-10 10:22:47.000000000 -0800
@@ -56,9 +56,9 @@
#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
#define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
-#define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3))
+#define load_TR(n) __asm__ __volatile__("ltr %%ax": :"a" (__TSS(n)<<3))
-#define __load_LDT(n) __asm__ __volatile__("lldt %%ax"::"a" (__LDT(n)<<3))
+#define __load_LDT(n) __asm__ __volatile__("lldt %%ax": :"a" (__LDT(n)<<3))
/*
* This is the ldt that every process will get unless we need
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-i386/highmem.h ./include/asm-i386/highmem.h
--- ../linux-2.4.20-pure/include/asm-i386/highmem.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-i386/highmem.h 2003-01-26 18:57:06.000000000 -0800
@@ -91,7 +91,7 @@
if (page < highmem_start_page)
return page_address(page);
- idx = type + KM_TYPE_NR*smp_processor_id();
+ idx = (enum fixed_addresses) (type + KM_TYPE_NR*smp_processor_id());
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte-idx)))
@@ -107,7 +107,8 @@
{
#if HIGHMEM_DEBUG
unsigned long vaddr = (unsigned long) kvaddr;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+ enum fixed_addresses idx =
+ (enum fixed_addresses) (type + KM_TYPE_NR*smp_processor_id());
if (vaddr < FIXADDR_START) // FIXME
return;
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-i386/pgtable.h ./include/asm-i386/pgtable.h
--- ../linux-2.4.20-pure/include/asm-i386/pgtable.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-i386/pgtable.h 2003-02-10 10:21:40.000000000 -0800
@@ -43,7 +43,7 @@
"movl %%cr3, %0; # flush TLB \n" \
"movl %0, %%cr3; \n" \
: "=r" (tmpreg) \
- :: "memory"); \
+ : : "memory"); \
} while (0)
/*
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-i386/rwlock.h ./include/asm-i386/rwlock.h
--- ../linux-2.4.20-pure/include/asm-i386/rwlock.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-i386/rwlock.h 2003-01-22 16:41:58.000000000 -0800
@@ -28,7 +28,7 @@
"2:\tcall " helper "\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- ::"a" (rw) : "memory")
+ : :"a" (rw) : "memory")
#define __build_read_lock_const(rw, helper) \
asm volatile(LOCK "subl $1,%0\n\t" \
@@ -58,7 +58,7 @@
"2:\tcall " helper "\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- ::"a" (rw) : "memory")
+ : :"a" (rw) : "memory")
#define __build_write_lock_const(rw, helper) \
asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-i386/string.h ./include/asm-i386/string.h
--- ../linux-2.4.20-pure/include/asm-i386/string.h 2001-11-22 11:46:18.000000000 -0800
+++ ./include/asm-i386/string.h 2003-01-22 17:35:06.000000000 -0800
@@ -29,6 +29,7 @@
* consider these trivial functions to be PD.
*/
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
#define __HAVE_ARCH_STRCPY
static inline char * strcpy(char * dest,const char *src)
{
@@ -42,6 +43,7 @@
:"0" (src),"1" (dest) : "memory");
return dest;
}
+#endif
#define __HAVE_ARCH_STRNCPY
static inline char * strncpy(char * dest,const char *src,size_t count)
@@ -102,6 +104,7 @@
return dest;
}
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char * cs,const char * ct)
{
@@ -122,6 +125,7 @@
:"1" (cs),"2" (ct));
return __res;
}
+#endif
#define __HAVE_ARCH_STRNCMP
static inline int strncmp(const char * cs,const char * ct,size_t count)
@@ -182,6 +186,7 @@
return __res;
}
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
#define __HAVE_ARCH_STRLEN
static inline size_t strlen(const char * s)
{
@@ -195,6 +200,7 @@
:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
return __res;
}
+#endif
static inline void * __memcpy(void * to, const void * from, size_t n)
{
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/delay.h ./include/asm-ia64/delay.h
--- ../linux-2.4.20-pure/include/asm-ia64/delay.h 2001-04-05 12:51:47.000000000 -0700
+++ ./include/asm-ia64/delay.h 2003-02-10 10:23:26.000000000 -0800
@@ -21,7 +21,7 @@
static __inline__ void
ia64_set_itm (unsigned long val)
{
- __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory");
+ __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" : : "r"(val) : "memory");
}
static __inline__ unsigned long
@@ -29,20 +29,20 @@
{
unsigned long result;
- __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory");
+ __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) : : "memory");
return result;
}
static __inline__ void
ia64_set_itv (unsigned long val)
{
- __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory");
+ __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" : : "r"(val) : "memory");
}
static __inline__ void
ia64_set_itc (unsigned long val)
{
- __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory");
+ __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" : : "r"(val) : "memory");
}
static __inline__ unsigned long
@@ -50,10 +50,10 @@
{
unsigned long result;
- __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+ __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) : : "memory");
#ifdef CONFIG_ITANIUM
while (__builtin_expect ((__s32) result == -1, 0))
- __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+ __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) : : "memory");
#endif
return result;
}
@@ -67,9 +67,9 @@
return;
__asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc));
- __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1));
+ __asm__ __volatile__("mov ar.lc=%0;;" : : "r"(loops - 1));
__asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
- __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
+ __asm__ __volatile__("mov ar.lc=%0" : : "r"(saved_ar_lc));
}
static __inline__ void
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/io.h ./include/asm-ia64/io.h
--- ../linux-2.4.20-pure/include/asm-ia64/io.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-ia64/io.h 2003-02-10 10:23:37.000000000 -0800
@@ -67,7 +67,7 @@
* Memory fence w/accept. This should never be used in code that is
* not IA-64 specific.
*/
-#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
+#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" : : : "memory")
static inline const unsigned long
__ia64_get_io_port_base (void)
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/pgalloc.h ./include/asm-ia64/pgalloc.h
--- ../linux-2.4.20-pure/include/asm-ia64/pgalloc.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-ia64/pgalloc.h 2003-02-10 10:23:43.000000000 -0800
@@ -194,7 +194,7 @@
flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
- asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
+ asm volatile ("ptc.l %0,%1" : : "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
else
vma->vm_mm->context = 0;
#endif
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/processor.h ./include/asm-ia64/processor.h
--- ../linux-2.4.20-pure/include/asm-ia64/processor.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-ia64/processor.h 2003-02-10 10:31:59.000000000 -0800
@@ -414,14 +414,14 @@
ia64_set_kr (unsigned long regnum, unsigned long r)
{
switch (regnum) {
- case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break;
- case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break;
- case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break;
- case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break;
- case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break;
- case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break;
- case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break;
- case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break;
+ case 0: asm volatile ("mov ar.k0=%0" : : "r"(r)); break;
+ case 1: asm volatile ("mov ar.k1=%0" : : "r"(r)); break;
+ case 2: asm volatile ("mov ar.k2=%0" : : "r"(r)); break;
+ case 3: asm volatile ("mov ar.k3=%0" : : "r"(r)); break;
+ case 4: asm volatile ("mov ar.k4=%0" : : "r"(r)); break;
+ case 5: asm volatile ("mov ar.k5=%0" : : "r"(r)); break;
+ case 6: asm volatile ("mov ar.k6=%0" : : "r"(r)); break;
+ case 7: asm volatile ("mov ar.k7=%0" : : "r"(r)); break;
}
}
@@ -448,8 +448,8 @@
extern void ia32_load_state (struct task_struct *task);
#endif
-#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
-#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
+#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" : : : "memory");
+#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" : : : "memory");
/* load fp 0.0 into fph */
static inline void
@@ -478,25 +478,25 @@
static inline void
ia64_fc (void *addr)
{
- asm volatile ("fc %0" :: "r"(addr) : "memory");
+ asm volatile ("fc %0" : : "r"(addr) : "memory");
}
static inline void
ia64_sync_i (void)
{
- asm volatile (";; sync.i" ::: "memory");
+ asm volatile (";; sync.i" : : : "memory");
}
static inline void
ia64_srlz_i (void)
{
- asm volatile (";; srlz.i ;;" ::: "memory");
+ asm volatile (";; srlz.i ;;" : : : "memory");
}
static inline void
ia64_srlz_d (void)
{
- asm volatile (";; srlz.d" ::: "memory");
+ asm volatile (";; srlz.d" : : : "memory");
}
static inline __u64
@@ -510,7 +510,7 @@
static inline void
ia64_set_rr (__u64 reg_bits, __u64 rr_val)
{
- asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
+ asm volatile ("mov rr[%0]=%1" : : "r"(reg_bits), "r"(rr_val) : "memory");
}
static inline __u64
@@ -524,7 +524,7 @@
static inline void
ia64_set_dcr (__u64 val)
{
- asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
+ asm volatile ("mov cr.dcr=%0;;" : : "r"(val) : "memory");
ia64_srlz_d();
}
@@ -539,7 +539,7 @@
static inline void
ia64_invala (void)
{
- asm volatile ("invala" ::: "memory");
+ asm volatile ("invala" : : : "memory");
}
/*
@@ -551,7 +551,7 @@
ia64_clear_ic (void)
{
__u64 psr;
- asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory");
+ asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) : : "memory");
return psr;
}
@@ -561,7 +561,7 @@
static inline void
ia64_set_psr (__u64 psr)
{
- asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory");
+ asm volatile (";; mov psr.l=%0;; srlz.d" : : "r" (psr) : "memory");
}
/*
@@ -573,14 +573,14 @@
__u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
- asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
- asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ asm volatile ("mov cr.itir=%0" : : "r"(log_page_size << 2) : "memory");
+ asm volatile ("mov cr.ifa=%0;;" : : "r"(vmaddr) : "memory");
if (target_mask & 0x1)
asm volatile ("itr.i itr[%0]=%1"
- :: "r"(tr_num), "r"(pte) : "memory");
+ : : "r"(tr_num), "r"(pte) : "memory");
if (target_mask & 0x2)
asm volatile (";;itr.d dtr[%0]=%1"
- :: "r"(tr_num), "r"(pte) : "memory");
+ : : "r"(tr_num), "r"(pte) : "memory");
}
/*
@@ -591,13 +591,13 @@
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
- asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
- asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ asm volatile ("mov cr.itir=%0" : : "r"(log_page_size << 2) : "memory");
+ asm volatile ("mov cr.ifa=%0;;" : : "r"(vmaddr) : "memory");
/* as per EAS2.6, itc must be the last instruction in an instruction group */
if (target_mask & 0x1)
- asm volatile ("itc.i %0;;" :: "r"(pte) : "memory");
+ asm volatile ("itc.i %0;;" : : "r"(pte) : "memory");
if (target_mask & 0x2)
- asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory");
+ asm volatile (";;itc.d %0;;" : : "r"(pte) : "memory");
}
/*
@@ -608,16 +608,16 @@
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
{
if (target_mask & 0x1)
- asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+ asm volatile ("ptr.i %0,%1" : : "r"(vmaddr), "r"(log_size << 2));
if (target_mask & 0x2)
- asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+ asm volatile ("ptr.d %0,%1" : : "r"(vmaddr), "r"(log_size << 2));
}
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
static inline void
ia64_set_iva (void *ivt_addr)
{
- asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
+ asm volatile ("mov cr.iva=%0;; srlz.i;;" : : "r"(ivt_addr) : "memory");
}
/* Set the page table address and control bits. */
@@ -625,7 +625,7 @@
ia64_set_pta (__u64 pta)
{
/* Note: srlz.i implies srlz.d */
- asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
+ asm volatile ("mov cr.pta=%0;; srlz.i;;" : : "r"(pta) : "memory");
}
static inline __u64
@@ -640,13 +640,13 @@
static inline void
ia64_eoi (void)
{
- asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
+ asm ("mov cr.eoi=r0;; srlz.d;;" : : : "memory");
}
static inline void
ia64_set_lrr0 (unsigned long val)
{
- asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
+ asm volatile ("mov cr.lrr0=%0;; srlz.d" : : "r"(val) : "memory");
}
#define cpu_relax() do { } while (0)
@@ -655,13 +655,13 @@
static inline void
ia64_set_lrr1 (unsigned long val)
{
- asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory");
+ asm volatile ("mov cr.lrr1=%0;; srlz.d" : : "r"(val) : "memory");
}
static inline void
ia64_set_pmv (__u64 val)
{
- asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory");
+ asm volatile ("mov cr.pmv=%0" : : "r"(val) : "memory");
}
static inline __u64
@@ -676,7 +676,7 @@
static inline void
ia64_set_pmc (__u64 regnum, __u64 value)
{
- asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
+ asm volatile ("mov pmc[%0]=%1" : : "r"(regnum), "r"(value));
}
static inline __u64
@@ -691,7 +691,7 @@
static inline void
ia64_set_pmd (__u64 regnum, __u64 value)
{
- asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
+ asm volatile ("mov pmd[%0]=%1" : : "r"(regnum), "r"(value));
}
/*
@@ -759,7 +759,7 @@
static inline void
ia64_set_cmcv (__u64 val)
{
- asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory");
+ asm volatile ("mov cr.cmcv=%0" : : "r"(val) : "memory");
}
/*
@@ -770,7 +770,7 @@
{
__u64 val;
- asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
+ asm volatile ("mov %0=cr.cmcv" : "=r"(val) : : "memory");
return val;
}
@@ -785,7 +785,7 @@
static inline void
ia64_set_tpr (__u64 val)
{
- asm volatile ("mov cr.tpr=%0" :: "r"(val));
+ asm volatile ("mov cr.tpr=%0" : : "r"(val));
}
static inline __u64
@@ -799,7 +799,7 @@
static inline void
ia64_set_irr0 (__u64 val)
{
- asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory");
+ asm volatile("mov cr.irr0=%0;;" : : "r"(val) : "memory");
ia64_srlz_d();
}
@@ -816,7 +816,7 @@
static inline void
ia64_set_irr1 (__u64 val)
{
- asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory");
+ asm volatile("mov cr.irr1=%0;;" : : "r"(val) : "memory");
ia64_srlz_d();
}
@@ -833,7 +833,7 @@
static inline void
ia64_set_irr2 (__u64 val)
{
- asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory");
+ asm volatile("mov cr.irr2=%0;;" : : "r"(val) : "memory");
ia64_srlz_d();
}
@@ -850,7 +850,7 @@
static inline void
ia64_set_irr3 (__u64 val)
{
- asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory");
+ asm volatile("mov cr.irr3=%0;;" : : "r"(val) : "memory");
ia64_srlz_d();
}
@@ -876,13 +876,13 @@
static inline void
ia64_set_ibr (__u64 regnum, __u64 value)
{
- asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value));
+ asm volatile ("mov ibr[%0]=%1" : : "r"(regnum), "r"(value));
}
static inline void
ia64_set_dbr (__u64 regnum, __u64 value)
{
- asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value));
+ asm volatile ("mov dbr[%0]=%1" : : "r"(regnum), "r"(value));
#ifdef CONFIG_ITANIUM
asm volatile (";; srlz.d");
#endif
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/spinlock.h ./include/asm-ia64/spinlock.h
--- ../linux-2.4.20-pure/include/asm-ia64/spinlock.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-ia64/spinlock.h 2003-02-10 10:32:08.000000000 -0800
@@ -45,7 +45,7 @@
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
";;\n" \
"1:\n" /* force a new bundle */ \
- :: "r"(addr) \
+ : : "r"(addr) \
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
}
@@ -93,7 +93,7 @@
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
- :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
+ : : "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
@@ -156,7 +156,7 @@
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
- :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
+ : : "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
} while(0)
#define write_unlock(x) \
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-ia64/system.h ./include/asm-ia64/system.h
--- ../linux-2.4.20-pure/include/asm-ia64/system.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-ia64/system.h 2003-02-10 10:32:17.000000000 -0800
@@ -57,7 +57,7 @@
static inline void
ia64_insn_group_barrier (void)
{
- __asm__ __volatile__ (";;" ::: "memory");
+ __asm__ __volatile__ (";;" : : : "memory");
}
/*
@@ -82,7 +82,7 @@
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
-#define mb() __asm__ __volatile__ ("mf" ::: "memory")
+#define mb() __asm__ __volatile__ ("mf" : : : "memory")
#define rmb() mb()
#define wmb() mb()
@@ -121,7 +121,7 @@
do { \
unsigned long ip, psr; \
\
- __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
+ __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) : : "memory"); \
if (psr & (1UL << 14)) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
@@ -133,7 +133,7 @@
do { \
unsigned long ip, psr; \
\
- __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
+ __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) : : "memory"); \
if (psr & (1UL << 14)) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
@@ -160,21 +160,21 @@
#else /* !CONFIG_IA64_DEBUG_IRQ */
/* clearing of psr.i is implicitly serialized (visible by next insn) */
# define local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" \
- : "=r" (x) :: "memory")
-# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
+ : "=r" (x) : : "memory")
+# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" : : : "memory")
/* (potentially) setting psr.i requires data serialization: */
# define local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"srlz.d" \
- :: "r"((x) & IA64_PSR_I) \
+ : : "r"((x) & IA64_PSR_I) \
: "p6", "p7", "memory")
#endif /* !CONFIG_IA64_DEBUG_IRQ */
-#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
+#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" : : : "memory")
#define __cli() local_irq_disable ()
-#define __save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
+#define __save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) : : "memory")
#define __save_and_cli(flags) local_irq_save(flags)
#define save_and_cli(flags) __save_and_cli(flags)
#define __sti() local_irq_enable ()
@@ -307,7 +307,7 @@
case 8: _o_ = (__u64) (long) (old); break; \
default: break; \
} \
- __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
+ __asm__ __volatile__ ("mov ar.ccv=%0;;" : : "rO"(_o_)); \
switch (size) { \
case 1: \
__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-mips/processor.h ./include/asm-mips/processor.h
--- ../linux-2.4.20-pure/include/asm-mips/processor.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-mips/processor.h 2003-02-10 10:32:16.000000000 -0800
@@ -256,6 +256,6 @@
* overhead of a function call by forcing the compiler to save the return
* address register on the stack.
*/
-#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
+#define return_address() ({__asm__ __volatile__("": : :"$31");__builtin_return_address(0);})
#endif /* _ASM_PROCESSOR_H */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-mips64/processor.h ./include/asm-mips64/processor.h
--- ../linux-2.4.20-pure/include/asm-mips64/processor.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-mips64/processor.h 2003-02-10 10:32:16.000000000 -0800
@@ -307,6 +307,6 @@
* functions. We avoid the overhead of a function call by forcing the
* compiler to save the return address register on the stack.
*/
-#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
+#define return_address() ({__asm__ __volatile__("": : :"$31");__builtin_return_address(0);})
#endif /* _ASM_PROCESSOR_H */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-parisc/system.h ./include/asm-parisc/system.h
--- ../linux-2.4.20-pure/include/asm-parisc/system.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-parisc/system.h 2003-02-10 10:32:16.000000000 -0800
@@ -145,7 +145,7 @@
** The __asm__ op below simple prevents gcc/ld from reordering
** instructions across the mb() "call".
*/
-#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */
+#define mb() __asm__ __volatile__("": : :"memory"); /* barrier() */
#define rmb() mb()
#define wmb() mb()
#define smp_mb() mb()
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-s390/uaccess.h ./include/asm-s390/uaccess.h
--- ../linux-2.4.20-pure/include/asm-s390/uaccess.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-s390/uaccess.h 2003-02-10 10:32:15.000000000 -0800
@@ -36,7 +36,7 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
-#define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4)); \
+#define set_fs(x) ({asm volatile("sar 4,%0": :"a" ((x).ar4)); \
current->addr_limit = (x);})
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-s390x/uaccess.h ./include/asm-s390x/uaccess.h
--- ../linux-2.4.20-pure/include/asm-s390x/uaccess.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-s390x/uaccess.h 2003-02-10 10:32:15.000000000 -0800
@@ -36,7 +36,7 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
-#define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4));\
+#define set_fs(x) ({asm volatile("sar 4,%0": :"a" ((x).ar4));\
current->addr_limit = (x);})
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-sparc/system.h ./include/asm-sparc/system.h
--- ../linux-2.4.20-pure/include/asm-sparc/system.h 2001-10-30 15:08:11.000000000 -0800
+++ ./include/asm-sparc/system.h 2003-02-10 10:32:14.000000000 -0800
@@ -280,9 +280,9 @@
#define wmb() mb()
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
#define set_wmb(__var, __value) set_mb(__var, __value)
-#define smp_mb() __asm__ __volatile__("":::"memory");
-#define smp_rmb() __asm__ __volatile__("":::"memory");
-#define smp_wmb() __asm__ __volatile__("":::"memory");
+#define smp_mb() __asm__ __volatile__("": : :"memory");
+#define smp_rmb() __asm__ __volatile__("": : :"memory");
+#define smp_wmb() __asm__ __volatile__("": : :"memory");
#define nop() __asm__ __volatile__ ("nop");
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-sparc64/system.h ./include/asm-sparc64/system.h
--- ../linux-2.4.20-pure/include/asm-sparc64/system.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/asm-sparc64/system.h 2003-02-10 10:32:14.000000000 -0800
@@ -110,9 +110,9 @@
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
-#define smp_mb() __asm__ __volatile__("":::"memory");
-#define smp_rmb() __asm__ __volatile__("":::"memory");
-#define smp_wmb() __asm__ __volatile__("":::"memory");
+#define smp_mb() __asm__ __volatile__("": : :"memory");
+#define smp_rmb() __asm__ __volatile__("": : :"memory");
+#define smp_wmb() __asm__ __volatile__("": : :"memory");
#endif
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/desc.h ./include/asm-x86_64/desc.h
--- ../linux-2.4.20-pure/include/asm-x86_64/desc.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/desc.h 2003-02-10 10:32:13.000000000 -0800
@@ -73,9 +73,9 @@
#define __CPU_DESC_INDEX(x,field) \
((x) * sizeof(struct per_cpu_gdt) + offsetof(struct per_cpu_gdt, field) + __GDT_HEAD_SIZE)
-#define load_TR(cpu) asm volatile("ltr %w0"::"r" (__CPU_DESC_INDEX(cpu, tss)));
-#define __load_LDT(cpu) asm volatile("lldt %w0"::"r" (__CPU_DESC_INDEX(cpu, ldt)));
-#define clear_LDT(n) asm volatile("lldt %w0"::"r" (0))
+#define load_TR(cpu) asm volatile("ltr %w0": :"r" (__CPU_DESC_INDEX(cpu, tss)));
+#define __load_LDT(cpu) asm volatile("lldt %w0": :"r" (__CPU_DESC_INDEX(cpu, ldt)));
+#define clear_LDT(n) asm volatile("lldt %w0": :"r" (0))
extern struct gate_struct idt_table[];
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/page.h ./include/asm-x86_64/page.h
--- ../linux-2.4.20-pure/include/asm-x86_64/page.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/page.h 2003-02-10 10:32:13.000000000 -0800
@@ -85,9 +85,9 @@
char *filename; /* should use 32bit offset instead, but the assembler doesn't like it */
unsigned short line;
} __attribute__((packed));
-#define BUG() asm volatile("ud2 ; .quad %c1 ; .short %c0" :: "i"(__LINE__), \
+#define BUG() asm volatile("ud2 ; .quad %c1 ; .short %c0" : : "i"(__LINE__), \
"i" (__stringify(KBUILD_BASENAME)))
-#define HEADER_BUG() asm volatile("ud2 ; .quad %c1 ; .short %c0" :: "i"(__LINE__), \
+#define HEADER_BUG() asm volatile("ud2 ; .quad %c1 ; .short %c0" : : "i"(__LINE__), \
"i" (__stringify(__FILE__)))
#define PAGE_BUG(page) BUG()
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/pda.h ./include/asm-x86_64/pda.h
--- ../linux-2.4.20-pure/include/asm-x86_64/pda.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/pda.h 2003-02-10 10:32:13.000000000 -0800
@@ -47,9 +47,9 @@
#define pda_to_op(op,field,val) do { \
switch (sizeof_field(struct x8664_pda, field)) { \
- case 2: asm volatile(op "w %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \
- case 4: asm volatile(op "l %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \
- case 8: asm volatile(op "q %0,%%gs:" __STR2(pda_ ## field) ::"r" (val):"memory"); break; \
+ case 2: asm volatile(op "w %0,%%gs:" __STR2(pda_ ## field) : :"r" (val):"memory"); break; \
+ case 4: asm volatile(op "l %0,%%gs:" __STR2(pda_ ## field) : :"r" (val):"memory"); break; \
+ case 8: asm volatile(op "q %0,%%gs:" __STR2(pda_ ## field) : :"r" (val):"memory"); break; \
default: __bad_pda_field(); \
} \
} while (0)
@@ -58,9 +58,9 @@
#define pda_from_op(op,field) ({ \
typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
switch (sizeof_field(struct x8664_pda, field)) { \
- case 2: asm volatile(op "w %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \
- case 4: asm volatile(op "l %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \
- case 8: asm volatile(op "q %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__)::"memory"); break; \
+ case 2: asm volatile(op "w %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__): :"memory"); break; \
+ case 4: asm volatile(op "l %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__): :"memory"); break; \
+ case 8: asm volatile(op "q %%gs:" __STR2(pda_ ## field) ",%0":"=r" (ret__): :"memory"); break; \
default: __bad_pda_field(); \
} \
ret__; })
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/pgtable.h ./include/asm-x86_64/pgtable.h
--- ../linux-2.4.20-pure/include/asm-x86_64/pgtable.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/pgtable.h 2003-02-10 10:32:12.000000000 -0800
@@ -49,7 +49,7 @@
"movq %%cr3, %0; # flush TLB \n" \
"movq %0, %%cr3; \n" \
: "=r" (tmpreg) \
- :: "memory"); \
+ : : "memory"); \
} while (0)
/*
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/rwlock.h ./include/asm-x86_64/rwlock.h
--- ../linux-2.4.20-pure/include/asm-x86_64/rwlock.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/rwlock.h 2003-02-10 10:32:11.000000000 -0800
@@ -31,7 +31,7 @@
"2:\tcall " helper "\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- ::"a" (rw) : "memory")
+ : :"a" (rw) : "memory")
#define __build_read_lock_const(rw, helper) \
asm volatile(LOCK "subl $1,%0\n\t" \
@@ -44,7 +44,7 @@
"popq %%rax\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- :"=m" (*((volatile int *)rw))::"memory")
+ :"=m" (*((volatile int *)rw)): :"memory")
#define __build_read_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
@@ -61,7 +61,7 @@
"2:\tcall " helper "\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- ::"a" (rw) : "memory")
+ : :"a" (rw) : "memory")
#define __build_write_lock_const(rw, helper) \
asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
@@ -74,7 +74,7 @@
"popq %%rax\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END \
- :"=m" (*((volatile long *)rw))::"memory")
+ :"=m" (*((volatile long *)rw)): :"memory")
#define __build_write_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/asm-x86_64/system.h ./include/asm-x86_64/system.h
--- ../linux-2.4.20-pure/include/asm-x86_64/system.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/asm-x86_64/system.h 2003-02-10 10:32:10.000000000 -0800
@@ -230,9 +230,9 @@
* but I'd also expect them to finally get their act together
* and add some real memory barriers if so.
*/
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence":::"memory")
+#define mb() asm volatile("mfence": : :"memory")
+#define rmb() asm volatile("lfence": : :"memory")
+#define wmb() asm volatile("sfence": : :"memory")
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
@@ -273,7 +273,7 @@
#endif
/* Default simics "magic" breakpoint */
-#define icebp() asm volatile("xchg %%bx,%%bx" ::: "ebx")
+#define icebp() asm volatile("xchg %%bx,%%bx" : : : "ebx")
/*
* disable hlt during certain critical i/o operations
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/atalk.h ./include/linux/atalk.h
--- ../linux-2.4.20-pure/include/linux/atalk.h 2001-11-22 11:47:11.000000000 -0800
+++ ./include/linux/atalk.h 2003-01-27 15:57:46.000000000 -0800
@@ -163,7 +163,7 @@
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
- return dev->atalk_ptr;
+ return (struct atalk_iface *) dev->atalk_ptr;
}
extern struct at_addr *atalk_find_dev_addr(struct net_device *dev);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/highmem.h ./include/linux/highmem.h
--- ../linux-2.4.20-pure/include/linux/highmem.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/highmem.h 2003-02-10 10:45:29.000000000 -0800
@@ -17,7 +17,7 @@
static inline char *bh_kmap(struct buffer_head *bh)
{
- return kmap(bh->b_page) + bh_offset(bh);
+ return (char *)kmap(bh->b_page) + bh_offset(bh);
}
static inline void bh_kunmap(struct buffer_head *bh)
@@ -102,7 +102,7 @@
if (offset + size > PAGE_SIZE)
out_of_line_bug();
- kaddr = kmap(page);
+ kaddr = (char *) kmap(page);
memset(kaddr + offset, 0, size);
flush_dcache_page(page);
flush_page_to_ram(page);
@@ -113,8 +113,8 @@
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from, KM_USER0);
- vto = kmap_atomic(to, KM_USER1);
+ vfrom = (char *) kmap_atomic(from, KM_USER0);
+ vto = (char *) kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/if_vlan.h ./include/linux/if_vlan.h
--- ../linux-2.4.20-pure/include/linux/if_vlan.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/if_vlan.h 2003-01-26 19:01:47.000000000 -0800
@@ -183,7 +183,7 @@
break;
};
- return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+ return (polling ? netif_receive_skb(skb, skb->protocol, 0) : netif_rx(skb));
}
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/inetdevice.h ./include/linux/inetdevice.h
--- ../linux-2.4.20-pure/include/linux/inetdevice.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/linux/inetdevice.h 2003-01-22 16:34:29.000000000 -0800
@@ -124,7 +124,7 @@
struct in_device *in_dev;
read_lock(&inetdev_lock);
- in_dev = dev->ip_ptr;
+ in_dev = (struct in_device *) dev->ip_ptr;
if (in_dev)
atomic_inc(&in_dev->refcnt);
read_unlock(&inetdev_lock);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/intermezzo_kml.h ./include/linux/intermezzo_kml.h
--- ../linux-2.4.20-pure/include/linux/intermezzo_kml.h 2001-11-11 10:20:21.000000000 -0800
+++ ./include/linux/intermezzo_kml.h 2003-02-07 17:16:44.000000000 -0800
@@ -70,8 +70,7 @@
int gid;
};
-struct kml_open {
-};
+EMPTY_STRUCT_DECL(kml_open);
struct kml_mkdir {
char *path;
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/list.h ./include/linux/list.h
--- ../linux-2.4.20-pure/include/linux/list.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/list.h 2003-02-10 10:45:29.000000000 -0800
@@ -91,8 +91,8 @@
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
- entry->next = (void *) 0;
- entry->prev = (void *) 0;
+ entry->next = (struct list_head *) 0;
+ entry->prev = (struct list_head *) 0;
}
/**
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/mm.h ./include/linux/mm.h
--- ../linux-2.4.20-pure/include/linux/mm.h 2002-08-02 17:39:45.000000000 -0700
+++ ./include/linux/mm.h 2003-02-10 10:45:29.000000000 -0800
@@ -576,7 +576,6 @@
return 0;
}
-struct zone_t;
/* filemap.c */
extern void remove_inode_page(struct page *);
extern unsigned long page_unuse(struct page *);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/netdevice.h ./include/linux/netdevice.h
--- ../linux-2.4.20-pure/include/linux/netdevice.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/netdevice.h 2003-01-26 18:58:26.000000000 -0800
@@ -438,6 +438,46 @@
/* this will get initialized at each interface type init routine */
struct divert_blk *divert;
#endif /* CONFIG_NET_DIVERT */
+
+ /* Click polling support */
+ /*
+ * polling is < 0 if the device does not support polling, == 0 if the
+ * device supports polling but interrupts are on, and > 0 if polling
+ * is on.
+ */
+ int polling;
+ int (*poll_on)(struct net_device *);
+ int (*poll_off)(struct net_device *);
+ /*
+ * rx_poll returns to caller a linked list of sk_buff objects received
+ * by the device. on call, the want argument specifies the number of
+ * packets wanted. on return, the want argument specifies the number
+ * of packets actually returned.
+ */
+ struct sk_buff * (*rx_poll)(struct net_device*, int *want);
+ /* refill rx dma ring using the given sk_buff list. returns 0 if
+ * successful, or if there are more entries need to be cleaned,
+ * returns the number of dirty entries. the ptr to the sk_buff list is
+ * updated by the driver to point to any unused skbs.
+ */
+ int (*rx_refill)(struct net_device*, struct sk_buff**);
+ /*
+ * place sk_buff on the transmit ring. returns 0 if successful, 1
+ * otherwise
+ */
+ int (*tx_queue)(struct net_device *, struct sk_buff *);
+ /*
+ * clean tx dma ring. returns the list of skb objects cleaned
+ */
+ struct sk_buff* (*tx_clean)(struct net_device *);
+ /*
+ * start transmission. returns 0 if successful, 1 otherwise
+ */
+ int (*tx_start)(struct net_device *);
+ /*
+ * tell device the end of a batch of packets
+ */
+ int (*tx_eob)(struct net_device *);
};
@@ -476,6 +516,9 @@
extern int unregister_netdevice(struct net_device *dev);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
+extern int register_net_in(struct notifier_block *nb); /* Click */
+extern int unregister_net_in(struct notifier_block *nb); /* Click */
+extern int ptype_dispatch(struct sk_buff *skb, unsigned short type); /* Click */
extern int dev_new_index(void);
extern struct net_device *dev_get_by_index(int ifindex);
extern struct net_device *__dev_get_by_index(int ifindex);
@@ -589,7 +632,7 @@
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
-extern int netif_receive_skb(struct sk_buff *skb);
+extern int netif_receive_skb(struct sk_buff *skb, unsigned short, int ignore_notifiers);
extern int dev_ioctl(unsigned int cmd, void *);
extern int dev_change_flags(struct net_device *, unsigned);
extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/reiserfs_fs_sb.h ./include/linux/reiserfs_fs_sb.h
--- ../linux-2.4.20-pure/include/linux/reiserfs_fs_sb.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/reiserfs_fs_sb.h 2003-02-10 10:45:29.000000000 -0800
@@ -405,8 +405,7 @@
} journal;
} reiserfs_proc_info_data_t;
#else
-typedef struct reiserfs_proc_info_data
-{} reiserfs_proc_info_data_t;
+typedef EMPTY_STRUCT_DECL(reiserfs_proc_info_data) reiserfs_proc_info_data_t;
#endif
/* reiserfs union of in-core super block data */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/skbuff.h ./include/linux/skbuff.h
--- ../linux-2.4.20-pure/include/linux/skbuff.h 2002-08-02 17:39:46.000000000 -0700
+++ ./include/linux/skbuff.h 2003-01-26 18:57:28.000000000 -0800
@@ -126,15 +126,31 @@
skb_frag_t frags[MAX_SKB_FRAGS];
};
+/* Click: overload sk_buff.pkt_type to contain information about whether
+ a packet is clean. Clean packets have the following fields zero:
+ dst, destructor, pkt_bridged, prev, list, sk, security, priority. */
+#define PACKET_CLEAN 128 /* Is packet clean? */
+#define PACKET_TYPE_MASK 127 /* Actual packet type */
+
+/* Click: change sk_buff structure so all fields used for router are grouped
+ * together on one cache line, we hope */
struct sk_buff {
/* These two members must be first. */
struct sk_buff * next; /* Next buffer in list */
struct sk_buff * prev; /* Previous buffer in list */
- struct sk_buff_head * list; /* List we are on */
- struct sock *sk; /* Socket we are owned by */
- struct timeval stamp; /* Time we arrived */
+ unsigned int len; /* Length of actual data */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
struct net_device *dev; /* Device we arrived on/are leaving by */
+ unsigned char __unused, /* Dead field, may be reused */
+ cloned, /* head may be cloned (check refcnt to be sure). */
+ pkt_type, /* Packet class */
+ ip_summed; /* Driver fed us an IP checksum */
+ atomic_t users; /* User count - see datagram.c,tcp.c */
+ unsigned int truesize; /* Buffer size */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *end; /* End pointer */
/* Transport layer header */
union
@@ -165,8 +181,6 @@
unsigned char *raw;
} mac;
- struct dst_entry *dst;
-
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
@@ -175,23 +189,17 @@
*/
char cb[48];
- unsigned int len; /* Length of actual data */
+ struct dst_entry *dst;
+
+ struct sk_buff_head * list; /* List we are on */
+ struct sock *sk; /* Socket we are owned by */
+ struct timeval stamp; /* Time we arrived */
+
unsigned int data_len;
unsigned int csum; /* Checksum */
- unsigned char __unused, /* Dead field, may be reused */
- cloned, /* head may be cloned (check refcnt to be sure). */
- pkt_type, /* Packet class */
- ip_summed; /* Driver fed us an IP checksum */
__u32 priority; /* Packet queueing priority */
- atomic_t users; /* User count - see datagram.c,tcp.c */
unsigned short protocol; /* Packet protocol from driver. */
unsigned short security; /* Security level of packet */
- unsigned int truesize; /* Buffer size */
-
- unsigned char *head; /* Head of buffer */
- unsigned char *data; /* Data head pointer */
- unsigned char *tail; /* Tail pointer */
- unsigned char *end; /* End pointer */
void (*destructor)(struct sk_buff *); /* Destruct function */
#ifdef CONFIG_NETFILTER
@@ -233,6 +241,8 @@
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
+extern void skb_recycled_init(struct sk_buff *buf);
+extern struct sk_buff * skb_recycle(struct sk_buff *buf);
extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
@@ -822,7 +832,7 @@
return skb->data;
}
-static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len-=len;
if (skb->len < skb->data_len)
@@ -850,7 +860,7 @@
extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
__pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/spinlock.h ./include/linux/spinlock.h
--- ../linux-2.4.20-pure/include/linux/spinlock.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/spinlock.h 2003-02-10 10:45:29.000000000 -0800
@@ -2,6 +2,7 @@
#define __LINUX_SPINLOCK_H
#include <linux/config.h>
+#include <linux/types.h>
/*
* These are the generic versions of the spinlocks and read-write
@@ -71,13 +72,8 @@
* Some older gcc versions had a nasty bug with empty initializers.
* (XXX: could someone please confirm whether egcs 1.1 still has this bug?)
*/
-#if (__GNUC__ > 2 || __GNUC_MINOR__ > 95)
- typedef struct { } spinlock_t;
- #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
-#else
- typedef struct { int gcc_is_buggy; } spinlock_t;
- #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-#endif
+typedef EMPTY_STRUCT_DECL(/* unnamed */) spinlock_t;
+#define SPIN_LOCK_UNLOCKED EMPTY_STRUCT_INIT(spinlock_t)
#define spin_lock_init(lock) do { } while(0)
#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
@@ -135,13 +131,8 @@
* Some older gcc versions had a nasty bug with empty initializers.
* (XXX: could someone please confirm whether egcs 1.1 still has this bug?)
*/
-#if (__GNUC__ > 2 || __GNUC_MINOR__ > 91)
- typedef struct { } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { }
-#else
- typedef struct { int gcc_is_buggy; } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#endif
+typedef EMPTY_STRUCT_DECL(/* unnamed */) rwlock_t;
+#define RW_LOCK_UNLOCKED EMPTY_STRUCT_INIT(rwlock_t)
#define rwlock_init(lock) do { } while(0)
#define read_lock(lock) (void)(lock) /* Not "unused variable". */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/swap.h ./include/linux/swap.h
--- ../linux-2.4.20-pure/include/linux/swap.h 2002-11-28 15:53:15.000000000 -0800
+++ ./include/linux/swap.h 2003-02-10 10:45:29.000000000 -0800
@@ -100,7 +100,7 @@
struct vm_area_struct;
struct sysinfo;
-struct zone_t;
+struct zone_struct;
/* linux/mm/swap.c */
extern void FASTCALL(lru_cache_add(struct page *));
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/linux/types.h ./include/linux/types.h
--- ../linux-2.4.20-pure/include/linux/types.h 2002-08-02 17:39:46.000000000 -0700
+++ ./include/linux/types.h 2003-02-10 10:08:04.000000000 -0800
@@ -127,4 +127,23 @@
char f_fpack[6];
};
+/*
+ * Click: Macros for defining empty structures. Needed because GCC's C and C++
+ * compilers have different ABIs for empty structures.
+ */
+
+#if 1
+# define EMPTY_STRUCT_DECL(s) struct s { int gcc_is_buggy; }
+# define EMPTY_STRUCT_INIT(s) (s) { 0 }
+#else
+/* This code remains in case GCC ever gets an option to give empty structures
+ * zero size. */
+# define EMPTY_STRUCT_DECL(s) struct s { }
+# ifdef __cplusplus
+# define EMPTY_STRUCT_INIT(s) s()
+# else
+# define EMPTY_STRUCT_INIT(s) (s) { }
+# endif
+#endif
+
#endif /* _LINUX_TYPES_H */
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/include/net/route.h ./include/net/route.h
--- ../linux-2.4.20-pure/include/net/route.h 2002-08-02 17:39:46.000000000 -0700
+++ ./include/net/route.h 2003-01-26 18:59:42.000000000 -0800
@@ -140,7 +140,13 @@
static inline int ip_route_output(struct rtable **rp,
u32 daddr, u32 saddr, u32 tos, int oif)
{
+#ifdef __cplusplus
+ struct rt_key key = { daddr, saddr };
+ key.oif = oif;
+ key.tos = tos;
+#else
struct rt_key key = { dst:daddr, src:saddr, oif:oif, tos:tos };
+#endif
return ip_route_output_key(rp, &key);
}
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/kernel/ksyms.c ./kernel/ksyms.c
--- ../linux-2.4.20-pure/kernel/ksyms.c 2002-11-28 15:53:15.000000000 -0800
+++ ./kernel/ksyms.c 2003-01-22 16:45:29.000000000 -0800
@@ -533,6 +533,8 @@
EXPORT_SYMBOL(event);
EXPORT_SYMBOL(brw_page);
EXPORT_SYMBOL(__inode_dir_notify);
+EXPORT_SYMBOL(super_blocks);
+EXPORT_SYMBOL(sb_lock);
#ifdef CONFIG_UID16
EXPORT_SYMBOL(overflowuid);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/net/core/dev.c ./net/core/dev.c
--- ../linux-2.4.20-pure/net/core/dev.c 2002-11-28 15:53:15.000000000 -0800
+++ ./net/core/dev.c 2003-01-22 17:51:37.000000000 -0800
@@ -182,6 +182,9 @@
static struct notifier_block *netdev_chain=NULL;
+/* Click: input packet handlers, might steal packets from net_rx_action. */
+static struct notifier_block *net_in_chain = 0;
+
/*
* Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler.
@@ -1379,6 +1382,22 @@
}
+/*
+ * Click: Allow Click to ask to intercept input packets.
+ */
+int
+register_net_in(struct notifier_block *nb)
+{
+ return notifier_chain_register(&net_in_chain, nb);
+}
+
+int
+unregister_net_in(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&net_in_chain, nb);
+}
+
+
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
#endif
@@ -1412,11 +1431,10 @@
}
#endif /* CONFIG_NET_DIVERT */
-int netif_receive_skb(struct sk_buff *skb)
+int netif_receive_skb(struct sk_buff *skb, unsigned short type, int notifier_data)
{
struct packet_type *ptype, *pt_prev;
int ret = NET_RX_DROP;
- unsigned short type = skb->protocol;
if (skb->stamp.tv_sec == 0)
do_gettimeofday(&skb->stamp);
@@ -1434,6 +1452,14 @@
skb->h.raw = skb->nh.raw = skb->data;
+ /* Click: may want to steal the packet */
+ if (notifier_data >= 0
+ && notifier_call_chain(&net_in_chain,
+ notifier_data,
+ skb) & NOTIFY_STOP_MASK) {
+ return ret;
+ }
+
pt_prev = NULL;
for (ptype = ptype_all; ptype; ptype = ptype->next) {
if (!ptype->dev || ptype->dev == skb->dev) {
@@ -1513,7 +1539,7 @@
dev = skb->dev;
- netif_receive_skb(skb);
+ netif_receive_skb(skb, skb->protocol, skb_queue_len(&queue->input_pkt_queue));
dev_put(dev);
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/net/core/skbuff.c ./net/core/skbuff.c
--- ../linux-2.4.20-pure/net/core/skbuff.c 2002-08-02 17:39:46.000000000 -0700
+++ ./net/core/skbuff.c 2003-01-22 16:34:29.000000000 -0800
@@ -443,6 +443,65 @@
#endif
}
+/* Click: attempts to recycle a sk_buff. if it can be recycled, return it
+ * without reinitializing any bits */
+struct sk_buff *skb_recycle(struct sk_buff *skb)
+{
+ if (atomic_dec_and_test(&skb->users)) {
+
+ if (skb->list) {
+ printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+ "on a list (from %p).\n", NET_CALLER(skb));
+ BUG();
+ }
+
+ dst_release(skb->dst);
+ if(skb->destructor) {
+ if (in_irq()) {
+ printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
+ NET_CALLER(skb));
+ }
+ skb->destructor(skb);
+ }
+#ifdef CONFIG_NETFILTER
+ nf_conntrack_put(skb->nfct);
+#endif
+ skb_headerinit(skb, NULL, 0);
+
+ if (!skb->cloned ||
+ atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ put_page(skb_shinfo(skb)->frags[i].page);
+ }
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_drop_fraglist(skb);
+
+ /* Load the data pointers. */
+ skb->data = skb->head;
+ skb->tail = skb->data;
+ /* end and truesize should have never changed */
+ /* skb->end = skb->data + skb->truesize; */
+
+ /* set up other state */
+ skb->len = 0;
+ skb->cloned = 0;
+
+ atomic_set(&skb->users, 1);
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+
+ return skb;
+ }
+
+ skb_head_to_pool(skb);
+ }
+
+ return 0;
+}
+
+
/**
* skb_copy - create private copy of an sk_buff
* @skb: buffer to copy
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/net/ipv4/arp.c ./net/ipv4/arp.c
--- ../linux-2.4.20-pure/net/ipv4/arp.c 2002-11-28 15:53:15.000000000 -0800
+++ ./net/ipv4/arp.c 2003-01-22 16:34:29.000000000 -0800
@@ -318,6 +318,7 @@
{
u32 saddr;
u8 *dst_ha = NULL;
+ u8 dst_ha_buf[MAX_ADDR_LEN+sizeof(unsigned long)];
struct net_device *dev = neigh->dev;
u32 target = *(u32*)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
@@ -330,8 +331,8 @@
if ((probes -= neigh->parms->ucast_probes) < 0) {
if (!(neigh->nud_state&NUD_VALID))
printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
- dst_ha = neigh->ha;
- read_lock_bh(&neigh->lock);
+ memcpy(dst_ha_buf, neigh->ha, sizeof(neigh->ha));
+ dst_ha = dst_ha_buf;
} else if ((probes -= neigh->parms->app_probes) < 0) {
#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
@@ -341,8 +342,6 @@
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
dst_ha, dev->dev_addr, NULL);
- if (dst_ha)
- read_unlock_bh(&neigh->lock);
}
static int arp_filter(__u32 sip, __u32 tip, struct net_device *dev)
diff -ru --exclude=.depend --exclude='*.o' --exclude='*.orig' --exclude='*.flags' ../linux-2.4.20-pure/net/netsyms.c ./net/netsyms.c
--- ../linux-2.4.20-pure/net/netsyms.c 2002-11-28 15:53:16.000000000 -0800
+++ ./net/netsyms.c 2003-02-09 22:25:12.000000000 -0800
@@ -277,6 +277,11 @@
EXPORT_SYMBOL(register_inetaddr_notifier);
EXPORT_SYMBOL(unregister_inetaddr_notifier);
+/* Click */
+EXPORT_SYMBOL(register_net_in);
+EXPORT_SYMBOL(unregister_net_in);
+EXPORT_SYMBOL(skb_recycle);
+
/* needed for ip_gre -cw */
EXPORT_SYMBOL(ip_statistics);
More information about the click
mailing list