Click for Linux 2.4.20

Bart Samwel bsamwel at liacs.nl
Mon Feb 3 19:24:39 EST 2003


Hi guys,

I've been needing to have Click running on linux 2.4.20 (because I want 
it to interoperate with another project that only runs on 2.4.18+) and I 
thought, why not try it? So, here are the patches. I haven't tested them 
yet (I haven't had the chance), but at least everything builds 
correctly. I've had to correct some extra stuff in the kernel headers, 
especially the appearance of "struct namespace", which was introduced 
somewhere between 2.4.9 and 2.4.20. The patch to Click itself is against 
version 1.2.4, the only thing it does is replace get_fast_time with 
do_gettimeofday if the linux version is 2.4.18 or greater.

The code consists of 3 parts:
1) kernel patch replacement
2) small Click 1.2.4 patch
3) C++ ABI replacement (to make things work with GCC 3.2, it expects 
__cxa_virtual_function() to be declared somewhere)

(1) and (2) are easy to do; (3) is a bit harder: install cxxabi.cc in 
click/lib and modify linuxmodule/Makefile to include cxxabi.o as a 
generic_obj. I guess you don't need (3) when you only support GCC 2.95, 
but if you ever want to move this is a useful thing to do.

I have NOT tested this at the moment, I have to go home and get some 
dinner. However, I expect things to work relatively okay; I HAVE been 
able to install the module into the kernel (it's installed as I'm typing 
this) but I do not know enough about Click to make it tick immediately, 
so that'll be my next bit of experimenting.

Can you use this? Please let me know what you think.

Regards,
Bart

-- 

Leiden Institute of Advanced Computer Science (http://www.liacs.nl)
E-mail: bsamwel at liacs.nl    Telephone: +31-71-5277037
Homepage: http://www.liacs.nl/~bsamwel
Opinions stated in this e-mail are mine and not necessarily my employer's.

-------------- next part --------------
diff -Naur kernel-source-2.4.20-orig/drivers/net/eepro100.c kernel-source-2.4.20/drivers/net/eepro100.c
--- kernel-source-2.4.20-orig/drivers/net/eepro100.c	2002-11-29 00:53:13.000000000 +0100
+++ kernel-source-2.4.20/drivers/net/eepro100.c	2003-02-03 18:33:17.000000000 +0100
@@ -544,6 +544,16 @@
 static void set_rx_mode(struct net_device *dev);
 static void speedo_show_state(struct net_device *dev);
 
+/* device polling stuff */
+static int speedo_tx_queue(struct net_device *dev, struct sk_buff *skb);
+static int speedo_tx_eob(struct net_device *dev);
+static int speedo_tx_start(struct net_device *dev);
+static int speedo_rx_refill(struct net_device *dev, struct sk_buff **);
+static struct sk_buff *speedo_tx_clean(struct net_device *dev);
+static struct sk_buff *speedo_rx_poll(struct net_device *dev, int *want);
+static int speedo_poll_on(struct net_device *dev);
+static int speedo_poll_off(struct net_device *dev);
+
 
 
 #ifdef honor_default_port
@@ -874,6 +884,17 @@
 	dev->set_multicast_list = &set_rx_mode;
 	dev->do_ioctl = &speedo_ioctl;
 
+	/* Click: polling support */
+	dev->polling = 0;
+	dev->poll_on = &speedo_poll_on;
+	dev->poll_off = &speedo_poll_off;
+	dev->rx_poll = &speedo_rx_poll;
+	dev->rx_refill = &speedo_rx_refill;
+	dev->tx_queue = &speedo_tx_queue;
+	dev->tx_clean = &speedo_tx_clean;
+	dev->tx_start = &speedo_tx_start;
+	dev->tx_eob = &speedo_tx_eob;
+
 	return 0;
 }
 
@@ -1125,7 +1146,8 @@
 		 ioaddr + SCBPointer);
 	/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
 	   remain masked --Dragan */
-	outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+	outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl |
+	     (dev->polling ? SCBMaskAll : 0), ioaddr + SCBCmd);
 }
 
 /*
@@ -1390,7 +1412,8 @@
 			   dev->name);
 		outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
 			 ioaddr + SCBPointer);
-		outw(CUStart, ioaddr + SCBCmd);
+		outw(CUStart | (dev->polling ? SCBMaskAll : 0),
+		     ioaddr + SCBCmd);
 		reset_mii(dev);
 	} else {
 #else
@@ -1437,6 +1460,14 @@
 	/* Prevent interrupts from changing the Tx ring from underneath us. */
 	unsigned long flags;
 
+#if 0
+	if (dev->polling)
+	{
+		printk(KERN_ERR "%s: start_xmit while polling\n", dev->name);
+		return 1;
+	}
+#endif
+
 	spin_lock_irqsave(&sp->lock, flags);
 
 	/* Check if there are enough space. */
@@ -1494,7 +1525,6 @@
 	spin_unlock_irqrestore(&sp->lock, flags);
 
 	dev->trans_start = jiffies;
-
 	return 0;
 }
 
@@ -1503,6 +1533,12 @@
 	unsigned int dirty_tx;
 	struct speedo_private *sp = (struct speedo_private *)dev->priv;
 
+	if (dev->polling) {
+		printk(KERN_ERR "%s: speedo_tx_buffer_gc while polling\n",
+		       dev->name);
+		return;
+	}
+
 	dirty_tx = sp->dirty_tx;
 	while ((int)(sp->cur_tx - dirty_tx) > 0) {
 		int entry = dirty_tx % TX_RING_SIZE;
@@ -1566,6 +1602,11 @@
 	long ioaddr, boguscnt = max_interrupt_work;
 	unsigned short status;
 
+#if 0
+	if (dev->polling)
+		printk(KERN_ERR "%s: interrupt while polling\n", dev->name);
+#endif
+
 	ioaddr = dev->base_addr;
 	sp = (struct speedo_private *)dev->priv;
 
@@ -1594,13 +1635,15 @@
 			break;
 
 
-		if ((status & 0x5000) ||	/* Packet received, or Rx error. */
-			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+		if (!dev->polling &&
+		    ((status & 0x5000) ||	/* Packet received, or Rx error. */
+			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed))
 									/* Need to gather the postponed packet. */
 			speedo_rx(dev);
 
 		/* Always check if all rx buffers are allocated.  --SAW */
-		speedo_refill_rx_buffers(dev, 0);
+		if (!dev->polling)
+		  speedo_refill_rx_buffers(dev, 0);
 		
 		spin_lock(&sp->lock);
 		/*
@@ -1625,7 +1668,7 @@
 		
 		
 		/* User interrupt, Command/Tx unit interrupt or CU not active. */
-		if (status & 0xA400) {
+		if (!dev->polling && (status & 0xA400)) {
 			speedo_tx_buffer_gc(dev);
 			if (sp->tx_full
 				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
@@ -1743,6 +1786,12 @@
 {
 	struct speedo_private *sp = (struct speedo_private *)dev->priv;
 
+	if (dev->polling) {
+		printk(KERN_ERR "%s: speedo_refill_rx_buffers called "
+		       "while polling\n", dev->name);
+		return;
+	}
+
 	/* Refill the RX ring. */
 	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
 			speedo_refill_rx_buf(dev, force) != -1);
@@ -1757,6 +1806,12 @@
 	int alloc_ok = 1;
 	int npkts = 0;
 
+ 	if (dev->polling) {
+ 		printk(KERN_ERR "%s: in speedo_rx() while polling\n",
+ 		       dev->name);
+ 		return 0;
+ 	}
+
 	if (netif_msg_intr(sp))
 		printk(KERN_DEBUG " In speedo_rx().\n");
 	/* If we own the next entry, it's a new packet. Send it up. */
@@ -2449,3 +2504,368 @@
  *  tab-width: 4
  * End:
  */
+
+/*
+ * Click: Polling extensions.  Most of this code has been copied
+ * from various routines above with slight modifications.
+ */
+
+static int speedo_rx_refill(struct net_device *dev, struct sk_buff **skbs) {
+	struct speedo_private *sp = (struct speedo_private *)dev->priv;
+	struct sk_buff *skb_list;
+	int dirty_rx = sp->dirty_rx;
+
+	/* If the list is empty, return the number of skb's we want */
+	if (skbs == 0)
+		return sp->cur_rx - sp->dirty_rx;
+
+	skb_list = *skbs;
+
+	/*
+	 * Refill the RX ring with supplied skb's.  Unlike
+	 * speedo_refill_rx_buf routine, we don't have to
+	 * worry about failed allocations.
+	 */
+	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 && skb_list) {
+		int entry;
+		struct RxFD *rxf;
+		struct sk_buff *skb;
+
+		entry = sp->dirty_rx % RX_RING_SIZE;
+		if (sp->rx_skbuff[entry] == NULL) {
+			skb = skb_list;
+			skb_list = skb->next;
+			skb->prev = skb->next = NULL;
+			skb->list = NULL;
+
+			sp->rx_skbuff[entry] = skb;
+			rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+			sp->rx_ring_dma[entry] = pci_map_single(sp->pdev, rxf,
+				PKT_BUF_SZ + sizeof(struct RxFD),
+				PCI_DMA_FROMDEVICE);
+
+			skb->dev = dev;
+			skb_reserve(skb, sizeof(struct RxFD));
+			rxf->rx_buf_addr = 0xffffffff;
+			pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
+					    sizeof(struct RxFD),
+					    PCI_DMA_TODEVICE);
+		} else {
+			rxf = sp->rx_ringp[entry];
+		}
+		speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
+		sp->dirty_rx++;
+	}
+
+	/*
+	 * Check if the RU is stopped -- restart it, if so.
+	 */
+	if ((inw(dev->base_addr + SCBStatus) & 0x003c) == 0x0008) {
+		wait_for_cmd_done(dev->base_addr + SCBCmd);
+
+		/*
+		 * If the RU stopped, it's because there aren't
+		 * any DMA buffers left, so the first DMA buffer
+		 * we've just refilled is where we should start
+		 * receiving.
+		 */
+		outl(virt_to_bus(sp->rx_ringp[dirty_rx % RX_RING_SIZE]),
+		     dev->base_addr + SCBPointer);
+		outb(RxStart, dev->base_addr + SCBCmd);
+	}
+
+	/*
+	 * Clear error flags on the RX ring, write back the remaining
+	 * skb's that we haven't used, and return the number of dirty
+	 * buffers remaining.
+	 */
+	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported);
+	*skbs = skb_list;
+	return sp->cur_rx - sp->dirty_rx;
+}
+
+static struct sk_buff *speedo_rx_poll(struct net_device *dev, int *want) {
+	struct speedo_private *sp = (struct speedo_private *)dev->priv;
+	int entry = sp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+	struct sk_buff *skb_head, *skb_last;
+	int got = 0;
+
+	skb_head = skb_last = NULL;
+
+	/* If we own the next entry, it's a new packet. Send it up. */
+	while (sp->rx_ringp[entry] != NULL) {
+		int status;
+		int pkt_len;
+
+		pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
+			sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+		status = le32_to_cpu(sp->rx_ringp[entry]->status);
+		pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+
+		if (!(status & RxComplete))
+			break;
+
+		if (--rx_work_limit < 0 || got == *want)
+			break;
+
+		/* Check for a rare out-of-memory case: the current buffer is
+		   the last buffer allocated in the RX ring.  --SAW */
+		if (sp->last_rxf == sp->rx_ringp[entry]) {
+			/*
+			 * Postpone the packet.  It'll be reaped next time
+			 * when this packet is no longer the last packet
+			 * in the ring.
+			 */
+			if (debug > 2)
+				printk(KERN_DEBUG "%s: RX packet postponed!\n",
+					   dev->name);
+			sp->rx_ring_state |= RrPostponed;
+			break;
+		}
+
+		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+			if (status & RxErrTooBig) {
+				printk(KERN_ERR "%s: Ethernet frame overran "
+				       "the Rx buffer, status %8.8x!\n",
+				       dev->name, status);
+			} else if (! (status & RxOK)) {
+				/*
+				 * There was a fatal error.  This *should*
+				 * be impossible.
+				 */
+				sp->stats.rx_errors++;
+				printk(KERN_ERR "%s: Anomalous event in "
+				       "speedo_rx_poll(), status %8.8x.\n",
+				       dev->name, status);
+			}
+		} else {
+			struct sk_buff *skb = sp->rx_skbuff[entry];
+
+			if (skb == NULL) {
+				printk(KERN_ERR "%s: Inconsistent Rx "
+				       "descriptor chain.\n", dev->name);
+				break;
+			}
+
+			/* Remove skbuff from RX ring. */
+			sp->rx_skbuff[entry] = NULL;
+			sp->rx_ringp[entry] = NULL;
+			skb_put(skb, pkt_len);
+			pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
+				PKT_BUF_SZ + sizeof(struct RxFD),
+				PCI_DMA_FROMDEVICE);
+
+			skb->protocol = eth_type_trans(skb, dev);
+			sp->stats.rx_packets++;
+			sp->stats.rx_bytes += pkt_len;
+
+			/* Append the skb to the received list */
+			if (got == 0) {
+				skb_head = skb_last = skb;
+				skb->next = skb->prev = NULL;
+			} else {
+				skb_last->next = skb;
+				skb->prev = skb_last;
+				skb->next = NULL;
+				skb_last = skb;
+			}
+
+			got++;
+		}
+
+		entry = (++sp->cur_rx) % RX_RING_SIZE;
+		sp->rx_ring_state &= ~RrPostponed;
+	}
+
+	if (got == 0 && (inw(dev->base_addr + SCBStatus) & 0x003c) == 0x0008) {
+		wait_for_cmd_done(dev->base_addr + SCBCmd);
+
+		outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+		     dev->base_addr + SCBPointer);
+		outb(RxStart, dev->base_addr + SCBCmd);
+	}
+
+	sp->last_rx_time = jiffies;
+	*want = got;
+	return skb_head;
+}
+
+static int speedo_tx_queue(struct net_device *dev, struct sk_buff *skb) {
+	struct speedo_private *sp = (struct speedo_private *)dev->priv;
+	int entry;
+   
+	unsigned flags;
+	spin_lock_irqsave(&sp->lock, flags);
+
+	/* Check if there are enough space. */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		sp->tx_full = 1;
+		spin_unlock_irqrestore(&sp->lock, flags);
+		return 1;
+	}
+
+	/* Calculate the Tx descriptor entry. */
+	entry = sp->cur_tx++ % TX_RING_SIZE;
+
+	sp->tx_skbuff[entry] = skb;
+	sp->tx_ring[entry].status =
+		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+	sp->tx_ring[entry].link =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+	sp->tx_ring[entry].tx_desc_addr =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+
+	/* The data region is always in one buffer descriptor. */
+	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+	sp->tx_ring[entry].tx_buf_addr0 =
+		cpu_to_le32(pci_map_single(sp->pdev, skb->data,
+					   skb->len, PCI_DMA_TODEVICE));
+	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+
+	/* Clear the suspend bit on the last command */
+	clear_suspend(sp->last_cmd);
+	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+	/* Leave room for set_rx_mode(). If there is no more space than
+	 * reserved for multicast filter mark the ring as full.
+	 */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		netif_stop_queue(dev);
+		sp->tx_full = 1;
+	}
+
+	spin_unlock_irqrestore(&sp->lock, flags);
+	return 0;
+}
+
+static int speedo_tx_eob(struct net_device *dev)
+{
+	/* benjie: not sure what this is used for... */
+	// wait_for_cmd_done(dev->base_addr + SCBCmd);
+
+	/* benjie: i suspect this won't cause a race condition because eob
+	 * is called right after the last tx_queue and also we batch a
+	 * bunch of packets, so tx is probably not going to be as fast as
+	 * we are. */
+	outb(CUResume, dev->base_addr + SCBCmd);
+	dev->trans_start = jiffies;
+	return 0;
+}
+
+static int speedo_tx_start(struct net_device *dev) {
+	printk("hard tx_start\n");
+	/* must have been suspended before the last queued DMA ring, so
+	 * this mindless CUResume is probably okay */
+	outb(CUResume, dev->base_addr + SCBCmd);
+	dev->trans_start = jiffies;
+	return 0;
+}
+
+static struct sk_buff *speedo_tx_clean(struct net_device *dev) {
+	unsigned int dirty_tx;
+	struct speedo_private *sp = (struct speedo_private *)dev->priv;
+	struct sk_buff *skb_head, *skb_last;
+
+	skb_head = skb_last = NULL;
+	dirty_tx = sp->dirty_tx;
+	while ((int)(sp->cur_tx - dirty_tx) > 0) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+		if ((status & StatusComplete) == 0)
+			break;		/* It still hasn't been processed. */
+
+		if (status & TxUnderrun)
+			if (sp->tx_threshold < 0x01e08000) {
+				if (debug > 2)
+					printk(KERN_DEBUG "%s: TX underrun, "
+					       "threshold adjusted.\n",
+					       dev->name);
+				sp->tx_threshold += 0x00040000;
+			}
+
+		/* Put the original skb on the return list. */
+		if (sp->tx_skbuff[entry]) {
+			struct sk_buff *skb = sp->tx_skbuff[entry];
+
+			sp->stats.tx_packets++;	/* Count only user packets. */
+			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+			pci_unmap_single(sp->pdev,
+				le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+				sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+			sp->tx_skbuff[entry] = 0;
+
+			if (skb_head == NULL) {
+				skb_head = skb_last = skb;
+				skb->next = skb->prev = NULL;
+			} else {
+				skb_last->next = skb;
+				skb->prev = skb_last;
+				skb->next = NULL;
+				skb_last = skb;
+			}
+		}
+		dirty_tx++;
+	}
+
+	if (debug && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
+		printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+				" full=%d.\n",
+				dirty_tx, sp->cur_tx, sp->tx_full);
+		dirty_tx += TX_RING_SIZE;
+	}
+
+	while (sp->mc_setup_head != NULL
+		   && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
+		struct speedo_mc_block *t;
+		if (debug > 1)
+			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
+				sp->mc_setup_head->len, PCI_DMA_TODEVICE);
+		t = sp->mc_setup_head->next;
+		kfree(sp->mc_setup_head);
+		sp->mc_setup_head = t;
+	}
+	if (sp->mc_setup_head == NULL)
+		sp->mc_setup_tail = NULL;
+
+	sp->dirty_tx = dirty_tx;
+
+	if (sp->tx_full && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+		/* The ring is no longer full. */
+		sp->tx_full = 0;
+		netif_wake_queue(dev); /* Attention: under a spinlock.  --SAW */
+	}
+	return skb_head;
+}
+
+static int speedo_poll_on(struct net_device *dev) {
+	long ioaddr = dev->base_addr;
+
+	if (dev->polling == 0) {
+		/* Mask all interrupts */
+		outw(SCBMaskAll, ioaddr + SCBCmd);
+
+		dev->polling = 2;
+	}
+
+	return 0;
+}
+
+static int speedo_poll_off(struct net_device *dev) {
+	long ioaddr = dev->base_addr;
+
+	if (dev->polling > 0) {
+		/* Enable interrupts */
+		outw(0, ioaddr + SCBCmd);
+
+		dev->polling = 0;
+	}
+
+	return 0;
+}
+
diff -Naur kernel-source-2.4.20-orig/drivers/net/macsonic.c kernel-source-2.4.20/drivers/net/macsonic.c
--- kernel-source-2.4.20-orig/drivers/net/macsonic.c	2002-11-29 00:53:14.000000000 +0100
+++ kernel-source-2.4.20/drivers/net/macsonic.c	2003-02-03 18:34:10.000000000 +0100
@@ -635,7 +635,7 @@
 }
 
 #ifdef MODULE
-static char namespace[16] = "";
+static char name_space[16] = "";
 static struct net_device dev_macsonic;
 
 MODULE_PARM(sonic_debug, "i");
@@ -647,7 +647,7 @@
 int
 init_module(void)
 {
-        dev_macsonic.name = namespace;
+        dev_macsonic.name = name_space;
         dev_macsonic.init = macsonic_probe;
 
         if (register_netdev(&dev_macsonic) != 0) {
diff -Naur kernel-source-2.4.20-orig/drivers/net/tulip/interrupt.c kernel-source-2.4.20/drivers/net/tulip/interrupt.c
--- kernel-source-2.4.20-orig/drivers/net/tulip/interrupt.c	2002-11-29 00:53:14.000000000 +0100
+++ kernel-source-2.4.20/drivers/net/tulip/interrupt.c	2003-02-03 18:33:17.000000000 +0100
@@ -311,6 +311,10 @@
 #endif
 }
 
+/* Polling extensions -- interrupt stats */
+void (*tulip_interrupt_hook)(struct net_device *, unsigned);
+
+
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -320,7 +324,6 @@
 	long ioaddr = dev->base_addr;
 	int csr5;
 	int entry;
-	int missed;
 	int rx = 0;
 	int tx = 0;
 	int oi = 0;
@@ -328,6 +331,7 @@
 	int maxtx = TX_RING_SIZE;
 	int maxoi = TX_RING_SIZE;
 	unsigned int work_count = tulip_max_interrupt_work;
+	int first_time = 1;
 
 	/* Let's see whether the interrupt really is for us */
 	csr5 = inl(ioaddr + CSR5);
@@ -341,14 +345,33 @@
 	tp->nir++;
 
 	do {
+		if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
+			if (dev->polling > 0)
+				goto out;
+			if (first_time)
+				goto out;
+			else
+				break;
+		}
+		first_time = 0;
+
 		/* Acknowledge all of the current interrupt sources ASAP. */
 		outl(csr5 & 0x0001ffff, ioaddr + CSR5);
 
+		/* Notify tulip_interrupt_hook */
+		if (tulip_interrupt_hook)
+			tulip_interrupt_hook(dev, CSR5);
+
+		if (dev->polling > 0) {
+			if ((csr5 & (TxDied|TimerInt|AbnormalIntr)) == 0)
+				goto out;
+		}
+
 		if (tulip_debug > 4)
 			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
 				   dev->name, csr5, inl(dev->base_addr + CSR5));
 
-		if (csr5 & (RxIntr | RxNoBuf)) {
+		if ((csr5 & (RxIntr | RxNoBuf)) && (dev->polling == 0)) {
 #ifdef CONFIG_NET_HW_FLOWCONTROL
                         if ((!tp->fc_bit) ||
 			    (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
@@ -357,7 +380,13 @@
 			tulip_refill_rx(dev);
 		}
 
-		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+		if ((csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) &&
+		    (dev->polling == 0)) {
+			/*
+			 * part of the following code is duplicated at the end
+			 * in tulip_tx_clean for the polling driver; changes
+			 * here should propagate to there as well.
+			 */
 			unsigned int dirty_tx;
 
 			spin_lock(&tp->lock);
@@ -425,16 +454,17 @@
 				netif_wake_queue(dev);
 
 			tp->dirty_tx = dirty_tx;
-			if (csr5 & TxDied) {
-				if (tulip_debug > 2)
-					printk(KERN_WARNING "%s: The transmitter stopped."
-						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
-						   dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
-				tulip_restart_rxtx(tp);
-			}
 			spin_unlock(&tp->lock);
 		}
 
+		if (csr5 & TxDied) { /* XXX move after loop? */
+			if (tulip_debug > 2)
+				printk(KERN_WARNING "%s: The transmitter stopped."
+				       "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+				       dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+			tulip_restart_rxtx(tp);
+		}
+
 		/* Log errors. */
 		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
 			if (csr5 == 0xffffffff)
@@ -456,7 +486,10 @@
 				}
 			}
 			if (csr5 & RxDied) {		/* Missed a Rx frame. */
-                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+				unsigned csr8status = inl(ioaddr + CSR8);
+				unsigned fifostatus = csr8status >> 17;
+                                tp->stats.rx_missed_errors += csr8status & 0xffff;
+				tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
 #ifdef CONFIG_NET_HW_FLOWCONTROL
 				if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
 					tp->stats.rx_errors++;
@@ -547,7 +580,9 @@
 		csr5 = inl(ioaddr + CSR5);
 	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
 
-	tulip_refill_rx(dev);
+	if (dev->polling == 0) {
+		tulip_refill_rx(dev);
+	}
 
 	/* check if the card is in suspend mode */
 	entry = tp->dirty_rx % RX_RING_SIZE;
@@ -570,12 +605,230 @@
 		}
 	}
 
+#if 0
 	if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
 		tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
 	}
+#endif
 
 	if (tulip_debug > 4)
 		printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
 			   dev->name, inl(ioaddr + CSR5));
 
+out:
+}
+
+/* Click: polling support routines */
+
+int tulip_rx_refill(struct net_device *dev, struct sk_buff **skbs) {
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	struct sk_buff *skb_list;
+
+	if (skbs == NULL)
+		return tp->cur_rx - tp->dirty_rx;
+
+	skb_list = *skbs;
+
+	/* Refill the Rx ring buffers. */
+	for (; tp->cur_rx - tp->dirty_rx > 0 && skb_list; tp->dirty_rx++) {
+		int entry = tp->dirty_rx % RX_RING_SIZE;
+		if (tp->rx_buffers[entry].skb == NULL) {
+			struct sk_buff *skb;
+			dma_addr_t mapping;
+
+			/* Grab an skb from the list we were given */
+			skb = skb_list;
+			skb_list = skb_list->next;
+			skb->prev = NULL;
+			skb->next = NULL;
+			skb->list = NULL;
+
+			tp->rx_buffers[entry].skb = skb;
+
+			mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+						 PCI_DMA_FROMDEVICE);
+			tp->rx_buffers[entry].mapping = mapping;
+
+			skb->dev = dev;	/* Mark as being used by this device. */
+			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+		}
+		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+	}
+	if(tp->chip_id == LC82C168) {
+		if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
+			/* Rx stopped due to out of buffers,
+			 * restart it
+			 */
+			outl(0x01, dev->base_addr + CSR2);
+		}
+	}
+
+	/* Return the unused skb's */
+	*skbs = skb_list;
+
+	return tp->cur_rx - tp->dirty_rx;
+}
+
+struct sk_buff *tulip_tx_clean(struct net_device *dev) {
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	struct sk_buff *skb_head, *skb_last;
+	unsigned int dirty_tx;
+
+	skb_head = skb_last = 0;
+
+	spin_lock(&tp->lock);
+
+	for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(tp->tx_ring[entry].status);
+		struct sk_buff *skb;
+
+		if (status < 0)
+			break;			/* It still has not been Txed */
+
+		/* Check for Rx filter setup frames. */
+		if (tp->tx_buffers[entry].skb == NULL) {
+			/* test because dummy frames not mapped */
+			if (tp->tx_buffers[entry].mapping)
+				pci_unmap_single(tp->pdev,
+					 tp->tx_buffers[entry].mapping,
+					 sizeof(tp->setup_frame),
+					 PCI_DMA_TODEVICE);
+			continue;
+		}
+
+		if (status & 0x8000) {
+			/* There was an major error, log it. */
+#ifndef final_version
+			if (tulip_debug > 1)
+				printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+					   dev->name, status);
+#endif
+			tp->stats.tx_errors++;
+			if (status & 0x4104) tp->stats.tx_aborted_errors++;
+			if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+			if (status & 0x0200) tp->stats.tx_window_errors++;
+			if (status & 0x0002) tp->stats.tx_fifo_errors++;
+			if ((status & 0x0080) && tp->full_duplex == 0)
+				tp->stats.tx_heartbeat_errors++;
+		} else {
+			tp->stats.tx_bytes +=
+				tp->tx_buffers[entry].skb->len;
+			tp->stats.collisions += (status >> 3) & 15;
+			tp->stats.tx_packets++;
+		}
+
+		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+				 tp->tx_buffers[entry].skb->len,
+				 PCI_DMA_TODEVICE);
+
+		/* Remove from buffer list */
+		skb = tp->tx_buffers[entry].skb;
+
+		tp->tx_buffers[entry].skb = NULL;
+		tp->tx_buffers[entry].mapping = 0;
+
+		/* Put the skb onto the return list */
+		if (skb_head == 0) {
+			skb_head = skb;
+			skb_last = skb;
+			skb_last->next = NULL;
+			skb_last->prev = NULL;
+		} else {
+			skb_last->next = skb;
+			skb->prev = skb_last;
+			skb->next = NULL;
+			skb_last = skb;
+		}
+	}
+
+#ifndef final_version
+	if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+		printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+			   dev->name, dirty_tx, tp->cur_tx);
+		dirty_tx += TX_RING_SIZE;
+	}
+#endif
+
+#if 0
+	if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+		netif_wake_queue(dev);
+#endif
+
+	tp->dirty_tx = dirty_tx;
+	spin_unlock(&tp->lock);
+
+	return skb_head;
+}
+
+struct sk_buff *tulip_rx_poll(struct net_device *dev, int *want) {
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int entry = tp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+	struct sk_buff *skb_head, *skb_last;
+	int got = 0;
+
+	skb_head = skb_last = NULL;
+
+	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+		if (--rx_work_limit < 0 || got == *want) break;
+
+		if ((status & 0x38008300) != 0x0300) {
+			if ((status & 0x38000300) != 0x0300) {
+				/* Ignore earlier buffers. */
+				if ((status & 0xffff) != 0x7fff) {
+					if (tulip_debug > 1)
+						printk(KERN_WARNING "%s: Oversized Ethernet frame "
+							   "spanned multiple buffers, status %8.8x!\n",
+							   dev->name, status);
+					tp->stats.rx_length_errors++;
+				}
+			} else if (status & RxDescFatalErr) {
+				/* There was a fatal error. */
+				if (tulip_debug > 2)
+					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+						   dev->name, status);
+				tp->stats.rx_errors++; /* end of a packet.*/
+				if (status & 0x0890) tp->stats.rx_length_errors++;
+				if (status & 0x0004) tp->stats.rx_frame_errors++;
+				if (status & 0x0002) tp->stats.rx_crc_errors++;
+				if (status & 0x0001) tp->stats.rx_fifo_errors++;
+			}
+		} else {
+			/* Omit the four octet CRC from the length. */
+			short pkt_len = ((status >> 16) & 0x7ff) - 4;
+			struct sk_buff *skb = tp->rx_buffers[entry].skb;
+
+			pci_unmap_single(tp->pdev,
+					 tp->rx_buffers[entry].mapping,
+					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+			tp->rx_buffers[entry].skb = NULL;
+			tp->rx_buffers[entry].mapping = 0;
+
+			skb_put(skb, pkt_len);
+			skb->protocol = eth_type_trans(skb, dev);
+			tp->stats.rx_packets++;
+			tp->stats.rx_bytes += pkt_len;
+
+			if (got == 0) {
+				skb_head = skb;
+				skb_last = skb;
+				skb->next = skb->prev = NULL;
+			} else {
+				skb_last->next = skb;
+				skb->prev = skb_last;
+				skb->next = NULL;
+				skb_last = skb;
+			}
+			got++;
+		}
+		entry = (++tp->cur_rx) % RX_RING_SIZE;
+	}
+
+	dev->last_rx = jiffies;
+	*want = got;
+	return skb_head;
 }
diff -Naur kernel-source-2.4.20-orig/drivers/net/tulip/tulip_core.c kernel-source-2.4.20/drivers/net/tulip/tulip_core.c
--- kernel-source-2.4.20-orig/drivers/net/tulip/tulip_core.c	2002-11-29 00:53:14.000000000 +0100
+++ kernel-source-2.4.20/drivers/net/tulip/tulip_core.c	2003-02-03 18:33:17.000000000 +0100
@@ -264,6 +264,16 @@
 static void set_rx_mode(struct net_device *dev);
 
 
+/* Click: polling support */
+static int tulip_tx_queue(struct net_device *dev, struct sk_buff *skb);
+static int tulip_tx_eob(struct net_device *dev);
+static int tulip_tx_start(struct net_device *dev);
+int tulip_rx_refill(struct net_device *dev, struct sk_buff **);
+struct sk_buff *tulip_tx_clean(struct net_device *dev);
+struct sk_buff *tulip_rx_poll(struct net_device *dev, int *want);
+static int tulip_poll_on(struct net_device *dev);
+static int tulip_poll_off(struct net_device *dev);
+
 
 static void tulip_set_power_state (struct tulip_private *tp,
 				   int sleep, int snooze)
@@ -709,6 +719,17 @@
 }
 
 static int
+tulip_tx_start(struct net_device *dev) {
+	/* Trigger an immediate transmit demand unless polling */
+	if (dev->polling <= 0)
+		outl(0, dev->base_addr + CSR1);
+
+	dev->trans_start = jiffies;
+
+	return 0;
+}
+
+static int
 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct tulip_private *tp = (struct tulip_private *)dev->priv;
@@ -744,13 +765,13 @@
 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
 	/* if we were using Transmit Automatic Polling, we would need a
 	 * wmb() here. */
+	wmb();
 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
 	wmb();
 
 	tp->cur_tx++;
 
-	/* Trigger an immediate transmit demand. */
-	outl(0, dev->base_addr + CSR1);
+	tulip_tx_start(dev);
 
 	spin_unlock_irqrestore(&tp->lock, eflags);
 
@@ -759,6 +780,19 @@
 	return 0;
 }
 
+static __inline__ unsigned long long
+tulip_get_cycles(void)
+{
+	unsigned long low, high;
+	unsigned long long x;
+
+	__asm__ __volatile__("rdtsc":"=a" (low), "=d" (high));
+	x = high;
+	x <<= 32;
+	x |= low;
+	return(x);
+}
+
 static void tulip_clean_tx_ring(struct tulip_private *tp)
 {
 	unsigned int dirty_tx;
@@ -821,8 +855,12 @@
 	if (tp->chip_id == DC21040)
 		outl (0x00000004, ioaddr + CSR13);
 
-	if (inl (ioaddr + CSR6) != 0xffffffff)
-		tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;
+	if (inl (ioaddr + CSR6) != 0xffffffff) {
+		unsigned csr8status = inl(ioaddr + CSR8);
+		unsigned fifostatus = csr8status >> 17;
+		tp->stats.rx_missed_errors += csr8status & 0xffff;
+		tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
+	}
 
 	spin_unlock_irqrestore (&tp->lock, flags);
 
@@ -901,10 +939,14 @@
 
 	if (netif_running(dev)) {
 		unsigned long flags;
+		unsigned csr8status, fifostatus;
 
 		spin_lock_irqsave (&tp->lock, flags);
 
-		tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+		csr8status = inl(ioaddr + CSR8);
+		fifostatus = csr8status >> 17;
+		tp->stats.rx_missed_errors += csr8status & 0xffff;
+		tp->stats.rx_fifo_errors += fifostatus & 0x7ff;
 
 		spin_unlock_irqrestore(&tp->lock, flags);
 	}
@@ -1722,6 +1764,17 @@
 	dev->do_ioctl = private_ioctl;
 	dev->set_multicast_list = set_rx_mode;
 
+	/* Click polling for this device */
+	dev->polling = 0;
+	dev->rx_poll = tulip_rx_poll;
+	dev->rx_refill = tulip_rx_refill;
+	dev->tx_clean = tulip_tx_clean;
+	dev->tx_queue = tulip_tx_queue;
+	dev->tx_start = tulip_tx_start;
+	dev->tx_eob = tulip_tx_eob;
+	dev->poll_on = tulip_poll_on;
+	dev->poll_off = tulip_poll_off;
+
 	if (register_netdev(dev))
 		goto err_out_free_ring;
 
@@ -1930,3 +1983,113 @@
 
 module_init(tulip_init);
 module_exit(tulip_cleanup);
+
+/*
+ * Click polling extensions
+ */
+
+/* Demand polling - the TX DMA engine on some tulip cards can automatically
+ * poll the TX DMA ring for packets; with this feature the driver does not
+ * need to poke the TX DMA engine after packet transmission stopped. however
+ * it seems that on some cards this feature does not work, therefore by
+ * default it is disabled. the eob() function minimizes the number of such
+ * pokes already. */
+
+#define	DEMAND_POLLTX	0
+
+static int
+tulip_poll_on(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	int csr7;
+#if DEMAND_POLLTX
+	int csr0;
+#endif
+
+	if (dev->polling == 0) {
+		csr7 = inl(ioaddr + CSR7) & ~(NormalIntr|RxNoBuf|\
+					      RxIntr|TxIntr|TxNoBuf);
+		outl(csr7, ioaddr + CSR7);
+
+#if DEMAND_POLLTX
+		csr0 = (inl(ioaddr + CSR0) & ~(7<<17)) | (4<<17);
+		outl(csr0, ioaddr + CSR0);
+#endif
+
+		dev->polling = 2;
+	}
+
+	return 0;
+}
+
+static int
+tulip_poll_off(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	int csr7;
+#if DEMAND_POLLTX
+	int csr0;
+#endif
+
+	if (dev->polling > 0) {
+		csr7 = inl(ioaddr + CSR7) | (NormalIntr|RxNoBuf|\
+					     RxIntr|TxIntr|TxNoBuf);
+		outl(csr7, ioaddr + CSR7);
+
+#if DEMAND_POLLTX
+		csr0 = inl(ioaddr + CSR0) & ~(7<<17);
+		outl(csr0, ioaddr + CSR0);
+#endif
+
+		dev->polling = 0;
+	}
+
+	return 0;
+}
+
+static int tulip_tx_queue(struct net_device *dev, struct sk_buff *skb) {
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int entry;
+	u32 flag;
+	dma_addr_t mapping;
+
+	spin_lock_irq(&tp->lock);
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % TX_RING_SIZE;
+
+	tp->tx_buffers[entry].skb = skb;
+	mapping = pci_map_single(tp->pdev, skb->data,
+				 skb->len, PCI_DMA_TODEVICE);
+	tp->tx_buffers[entry].mapping = mapping;
+	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+	flag = 0x60000000; /* No interrupt */
+
+	if (entry == TX_RING_SIZE-1)
+		flag = 0xe0000000 | DESC_RING_WRAP;
+
+	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+	/* if we were using Transmit Automatic Polling, we would need a
+	 * wmb() here. */
+	wmb();
+	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+	wmb();
+
+	tp->cur_tx++;
+
+	/* If we've almost filled up the transmit ring, signal busy */
+	if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
+		netif_stop_queue(dev);
+
+	spin_unlock_irq(&tp->lock);
+
+	return 0;
+}
+
+static int tulip_tx_eob(struct net_device *dev) {
+	outl(0, dev->base_addr + CSR1);
+	dev->trans_start = jiffies;
+	return 0;
+}
+
diff -Naur kernel-source-2.4.20-orig/fs/namespace.c kernel-source-2.4.20/fs/namespace.c
--- kernel-source-2.4.20-orig/fs/namespace.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/fs/namespace.c	2003-02-03 18:34:10.000000000 +0100
@@ -92,7 +92,7 @@
 	while (mnt->mnt_parent != mnt)
 		mnt = mnt->mnt_parent;
 	spin_unlock(&dcache_lock);
-	return mnt == current->namespace->root;
+	return mnt == current->name_space->root;
 }
 
 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
@@ -159,7 +159,7 @@
 /* iterator */
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
-	struct namespace *n = m->private;
+	struct name_space *n = m->private;
 	struct list_head *p;
 	loff_t l = *pos;
 
@@ -172,7 +172,7 @@
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
-	struct namespace *n = m->private;
+	struct name_space *n = m->private;
 	struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
 	(*pos)++;
 	return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
@@ -180,7 +180,7 @@
 
 static void m_stop(struct seq_file *m, void *v)
 {
-	struct namespace *n = m->private;
+	struct name_space *n = m->private;
 	up_read(&n->sem);
 }
 
@@ -327,7 +327,7 @@
 		return retval;
 	}
 
-	down_write(&current->namespace->sem);
+	down_write(&current->name_space->sem);
 	spin_lock(&dcache_lock);
 
 	if (atomic_read(&sb->s_active) == 1) {
@@ -346,7 +346,7 @@
 		retval = 0;
 	}
 	spin_unlock(&dcache_lock);
-	up_write(&current->namespace->sem);
+	up_write(&current->name_space->sem);
 	return retval;
 }
 
@@ -468,7 +468,7 @@
 		struct list_head head;
 		attach_mnt(mnt, nd);
 		list_add_tail(&head, &mnt->mnt_list);
-		list_splice(&head, current->namespace->list.prev);
+		list_splice(&head, current->name_space->list.prev);
 		mntget(mnt);
 		err = 0;
 	}
@@ -494,7 +494,7 @@
 	if (err)
 		return err;
 
-	down_write(&current->namespace->sem);
+	down_write(&current->name_space->sem);
 	err = -EINVAL;
 	if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
 		err = -ENOMEM;
@@ -514,7 +514,7 @@
 			mntput(mnt);
 	}
 
-	up_write(&current->namespace->sem);
+	up_write(&current->name_space->sem);
 	path_release(&old_nd);
 	return err;
 }
@@ -560,7 +560,7 @@
 	if (err)
 		return err;
 
-	down_write(&current->namespace->sem);
+	down_write(&current->name_space->sem);
 	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
 		;
 	err = -EINVAL;
@@ -600,7 +600,7 @@
 out1:
 	up(&nd->dentry->d_inode->i_zombie);
 out:
-	up_write(&current->namespace->sem);
+	up_write(&current->name_space->sem);
 	if (!err)
 		path_release(&parent_nd);
 	path_release(&old_nd);
@@ -625,7 +625,7 @@
 	if (IS_ERR(mnt))
 		goto out;
 
-	down_write(&current->namespace->sem);
+	down_write(&current->name_space->sem);
 	/* Something was mounted here while we slept */
 	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
 		;
@@ -641,7 +641,7 @@
 	mnt->mnt_flags = mnt_flags;
 	err = graft_tree(mnt, nd);
 unlock:
-	up_write(&current->namespace->sem);
+	up_write(&current->name_space->sem);
 	mntput(mnt);
 out:
 	return err;
@@ -740,27 +740,27 @@
 	return retval;
 }
 
-int copy_namespace(int flags, struct task_struct *tsk)
+int copy_name_space(int flags, struct task_struct *tsk)
 {
-	struct namespace *namespace = tsk->namespace;
-	struct namespace *new_ns;
+	struct name_space *name_space = tsk->name_space;
+	struct name_space *new_ns;
 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
 	struct fs_struct *fs = tsk->fs;
 
-	if (!namespace)
+	if (!name_space)
 		return 0;
 
-	get_namespace(namespace);
+	get_name_space(name_space);
 
 	if (! (flags & CLONE_NEWNS))
 		return 0;
 
 	if (!capable(CAP_SYS_ADMIN)) {
-		put_namespace(namespace);
+		put_name_space(name_space);
 		return -EPERM;
 	}
 
-	new_ns = kmalloc(sizeof(struct namespace *), GFP_KERNEL);
+	new_ns = kmalloc(sizeof(struct name_space *), GFP_KERNEL);
 	if (!new_ns)
 		goto out;
 
@@ -769,9 +769,9 @@
 	new_ns->root = NULL;
 	INIT_LIST_HEAD(&new_ns->list);
 
-	down_write(&tsk->namespace->sem);
+	down_write(&tsk->name_space->sem);
 	/* First pass: copy the tree topology */
-	new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
+	new_ns->root = copy_tree(name_space->root, name_space->root->mnt_root);
 	spin_lock(&dcache_lock);
 	list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
 	spin_unlock(&dcache_lock);
@@ -781,7 +781,7 @@
 		struct vfsmount *p, *q;
 		write_lock(&fs->lock);
 
-		p = namespace->root;
+		p = name_space->root;
 		q = new_ns->root;
 		while (p) {
 			if (p == fs->rootmnt) {
@@ -796,14 +796,14 @@
 				altrootmnt = p;
 				fs->altrootmnt = mntget(q);
 			}
-			p = next_mnt(p, namespace->root);
+			p = next_mnt(p, name_space->root);
 			q = next_mnt(q, new_ns->root);
 		}
 		write_unlock(&fs->lock);
 	}
-	up_write(&tsk->namespace->sem);
+	up_write(&tsk->name_space->sem);
 
-	tsk->namespace = new_ns;
+	tsk->name_space = new_ns;
 
 	if (rootmnt)
 		mntput(rootmnt);
@@ -812,11 +812,11 @@
 	if (altrootmnt)
 		mntput(altrootmnt);
 
-	put_namespace(namespace);
+	put_name_space(name_space);
 	return 0;
 
 out:
-	put_namespace(namespace);
+	put_name_space(name_space);
 	return -ENOMEM;
 }
 
@@ -923,7 +923,7 @@
 	user_nd.mnt = mntget(current->fs->rootmnt);
 	user_nd.dentry = dget(current->fs->root);
 	read_unlock(&current->fs->lock);
-	down_write(&current->namespace->sem);
+	down_write(&current->name_space->sem);
 	down(&old_nd.dentry->d_inode->i_zombie);
 	error = -EINVAL;
 	if (!check_mnt(user_nd.mnt))
@@ -968,7 +968,7 @@
 	path_release(&parent_nd);
 out2:
 	up(&old_nd.dentry->d_inode->i_zombie);
-	up_write(&current->namespace->sem);
+	up_write(&current->name_space->sem);
 	path_release(&user_nd);
 	path_release(&old_nd);
 out1:
@@ -984,31 +984,31 @@
 static void __init init_mount_tree(void)
 {
 	struct vfsmount *mnt;
-	struct namespace *namespace;
+	struct name_space *name_space;
 	struct task_struct *p;
 
 	mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
 	if (IS_ERR(mnt))
 		panic("Can't create rootfs");
-	namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
-	if (!namespace)
-		panic("Can't allocate initial namespace");
-	atomic_set(&namespace->count, 1);
-	INIT_LIST_HEAD(&namespace->list);
-	init_rwsem(&namespace->sem);
-	list_add(&mnt->mnt_list, &namespace->list);
-	namespace->root = mnt;
+	name_space = kmalloc(sizeof(*name_space), GFP_KERNEL);
+	if (!name_space)
+		panic("Can't allocate initial name_space");
+	atomic_set(&name_space->count, 1);
+	INIT_LIST_HEAD(&name_space->list);
+	init_rwsem(&name_space->sem);
+	list_add(&mnt->mnt_list, &name_space->list);
+	name_space->root = mnt;
 
-	init_task.namespace = namespace;
+	init_task.name_space = name_space;
 	read_lock(&tasklist_lock);
 	for_each_task(p) {
-		get_namespace(namespace);
-		p->namespace = namespace;
+		get_name_space(name_space);
+		p->name_space = name_space;
 	}
 	read_unlock(&tasklist_lock);
 
-	set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
-	set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
+	set_fs_pwd(current->fs, name_space->root, name_space->root->mnt_root);
+	set_fs_root(current->fs, name_space->root, name_space->root->mnt_root);
 }
 
 void __init mnt_init(unsigned long mempages)
diff -Naur kernel-source-2.4.20-orig/fs/ncpfs/ioctl.c kernel-source-2.4.20/fs/ncpfs/ioctl.c
--- kernel-source-2.4.20-orig/fs/ncpfs/ioctl.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/fs/ncpfs/ioctl.c	2003-02-03 18:34:10.000000000 +0100
@@ -183,14 +183,14 @@
 					if (inode) {
 						sr.volNumber = NCP_FINFO(inode)->volNumber;
 						sr.dirEntNum = NCP_FINFO(inode)->dirEntNum;
-						sr.namespace = server->name_space[sr.volNumber];
+						sr.name_space = server->name_space[sr.volNumber];
 					} else
 						DPRINTK("ncpfs: s_root->d_inode==NULL\n");
 				} else
 					DPRINTK("ncpfs: s_root==NULL\n");
 			} else {
 				sr.volNumber = -1;
-				sr.namespace = 0;
+				sr.name_space = 0;
 				sr.dirEntNum = 0;
 			}
 			if (copy_to_user((struct ncp_setroot_ioctl*)arg, 
@@ -221,7 +221,7 @@
 				return -EINVAL;
 			} else
 				if (ncp_mount_subdir(server, &i, sr.volNumber,
-						sr.namespace, sr.dirEntNum))
+						sr.name_space, sr.dirEntNum))
 					return -ENOENT;
 
 			dentry = inode->i_sb->s_root;
diff -Naur kernel-source-2.4.20-orig/fs/ncpfs/ncplib_kernel.c kernel-source-2.4.20/fs/ncpfs/ncplib_kernel.c
--- kernel-source-2.4.20-orig/fs/ncpfs/ncplib_kernel.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/fs/ncpfs/ncplib_kernel.c	2003-02-03 18:34:10.000000000 +0100
@@ -325,7 +325,7 @@
 {
 #if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS)
 	int result;
-	__u8 *namespace;
+	__u8 *name_space;
 	__u16 no_namespaces;
 
 	ncp_init_request(server);
@@ -340,25 +340,25 @@
 
 	result = NW_NS_DOS;
 	no_namespaces = le16_to_cpu(ncp_reply_word(server, 0));
-	namespace = ncp_reply_data(server, 2);
+	name_space = ncp_reply_data(server, 2);
 
 	while (no_namespaces > 0) {
-		DPRINTK("get_namespaces: found %d on %d\n", *namespace, volume);
+		DPRINTK("get_namespaces: found %d on %d\n", *name_space, volume);
 
 #ifdef CONFIG_NCPFS_NFS_NS
-		if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) 
+		if ((*name_space == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) 
 		{
 			result = NW_NS_NFS;
 			break;
 		}
 #endif	/* CONFIG_NCPFS_NFS_NS */
 #ifdef CONFIG_NCPFS_OS2_NS
-		if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2))
+		if ((*name_space == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2))
 		{
 			result = NW_NS_OS2;
 		}
 #endif	/* CONFIG_NCPFS_OS2_NS */
-		namespace += 1;
+		name_space += 1;
 		no_namespaces -= 1;
 	}
 	ncp_unlock_server(server);
diff -Naur kernel-source-2.4.20-orig/fs/proc/base.c kernel-source-2.4.20/fs/proc/base.c
--- kernel-source-2.4.20-orig/fs/proc/base.c	2002-08-03 02:39:45.000000000 +0200
+++ kernel-source-2.4.20/fs/proc/base.c	2003-02-03 18:34:10.000000000 +0100
@@ -256,15 +256,15 @@
 
 	if (!ret) {
 		struct seq_file *m = file->private_data;
-		struct namespace *namespace;
+		struct name_space *name_space;
 		task_lock(task);
-		namespace = task->namespace;
-		if (namespace)
-			get_namespace(namespace);
+		name_space = task->name_space;
+		if (name_space)
+			get_name_space(name_space);
 		task_unlock(task);
 
-		if (namespace)
-			m->private = namespace;
+		if (name_space)
+			m->private = name_space;
 		else {
 			seq_release(inode, file);
 			ret = -EINVAL;
@@ -276,8 +276,8 @@
 static int mounts_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *m = file->private_data;
-	struct namespace *namespace = m->private;
-	put_namespace(namespace);
+	struct name_space *name_space = m->private;
+	put_name_space(name_space);
 	return seq_release(inode, file);
 }
 
diff -Naur kernel-source-2.4.20-orig/fs/proc/inode.c kernel-source-2.4.20/fs/proc/inode.c
--- kernel-source-2.4.20-orig/fs/proc/inode.c	2001-11-17 20:24:32.000000000 +0100
+++ kernel-source-2.4.20/fs/proc/inode.c	2003-02-03 18:33:17.000000000 +0100
@@ -147,6 +147,11 @@
 	if (!inode)
 		goto out_fail;
 	
+	/* Click change: don't double-increment de's use count if the inode
+	 * existed already */
+	if (inode->u.generic_ip == (void *) de)
+		de_put(de);
+
 	inode->u.generic_ip = (void *) de;
 	if (de) {
 		if (de->mode) {
diff -Naur kernel-source-2.4.20-orig/include/asm-i386/highmem.h kernel-source-2.4.20/include/asm-i386/highmem.h
--- kernel-source-2.4.20-orig/include/asm-i386/highmem.h	2002-08-03 02:39:45.000000000 +0200
+++ kernel-source-2.4.20/include/asm-i386/highmem.h	2003-02-03 18:33:17.000000000 +0100
@@ -91,7 +91,7 @@
 	if (page < highmem_start_page)
 		return page_address(page);
 
-	idx = type + KM_TYPE_NR*smp_processor_id();
+	idx = (enum fixed_addresses) (type + KM_TYPE_NR*smp_processor_id());
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #if HIGHMEM_DEBUG
 	if (!pte_none(*(kmap_pte-idx)))
@@ -107,7 +107,8 @@
 {
 #if HIGHMEM_DEBUG
 	unsigned long vaddr = (unsigned long) kvaddr;
-	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+	enum fixed_addresses idx =
+		(enum fixed_addresses) (type + KM_TYPE_NR*smp_processor_id());
 
 	if (vaddr < FIXADDR_START) // FIXME
 		return;
diff -Naur kernel-source-2.4.20-orig/include/asm-i386/rwlock.h kernel-source-2.4.20/include/asm-i386/rwlock.h
--- kernel-source-2.4.20-orig/include/asm-i386/rwlock.h	2002-08-03 02:39:45.000000000 +0200
+++ kernel-source-2.4.20/include/asm-i386/rwlock.h	2003-02-03 18:33:17.000000000 +0100
@@ -28,7 +28,7 @@
 		     "2:\tcall " helper "\n\t" \
 		     "jmp 1b\n" \
 		     LOCK_SECTION_END \
-		     ::"a" (rw) : "memory")
+		     : : "a" (rw) : "memory")
 
 #define __build_read_lock_const(rw, helper)   \
 	asm volatile(LOCK "subl $1,%0\n\t" \
@@ -58,7 +58,7 @@
 		     "2:\tcall " helper "\n\t" \
 		     "jmp 1b\n" \
 		     LOCK_SECTION_END \
-		     ::"a" (rw) : "memory")
+		     : : "a" (rw) : "memory")
 
 #define __build_write_lock_const(rw, helper) \
 	asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
diff -Naur kernel-source-2.4.20-orig/include/asm-i386/string.h kernel-source-2.4.20/include/asm-i386/string.h
--- kernel-source-2.4.20-orig/include/asm-i386/string.h	2001-08-11 03:13:47.000000000 +0200
+++ kernel-source-2.4.20/include/asm-i386/string.h	2003-02-03 18:33:17.000000000 +0100
@@ -29,6 +29,7 @@
  *		consider these trivial functions to be PD.
  */
 
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
 #define __HAVE_ARCH_STRCPY
 static inline char * strcpy(char * dest,const char *src)
 {
@@ -42,6 +43,7 @@
 	:"0" (src),"1" (dest) : "memory");
 return dest;
 }
+#endif
 
 #define __HAVE_ARCH_STRNCPY
 static inline char * strncpy(char * dest,const char *src,size_t count)
@@ -102,6 +104,7 @@
 return dest;
 }
 
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char * cs,const char * ct)
 {
@@ -122,6 +125,7 @@
 		     :"1" (cs),"2" (ct));
 return __res;
 }
+#endif
 
 #define __HAVE_ARCH_STRNCMP
 static inline int strncmp(const char * cs,const char * ct,size_t count)
@@ -182,6 +186,7 @@
 return __res;
 }
 
+#if __GNUC__ > 2 || __GNUC_MINOR__ != 96 || !defined(CLICK_LINUXMODULE)
 #define __HAVE_ARCH_STRLEN
 static inline size_t strlen(const char * s)
 {
@@ -195,6 +200,7 @@
 	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
 return __res;
 }
+#endif
 
 static inline void * __memcpy(void * to, const void * from, size_t n)
 {
diff -Naur kernel-source-2.4.20-orig/include/linux/highmem.h kernel-source-2.4.20/include/linux/highmem.h
--- kernel-source-2.4.20-orig/include/linux/highmem.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/highmem.h	2003-02-03 18:33:17.000000000 +0100
@@ -17,7 +17,7 @@
 
 static inline char *bh_kmap(struct buffer_head *bh)
 {
-	return kmap(bh->b_page) + bh_offset(bh);
+	return (char *)kmap(bh->b_page) + bh_offset(bh);
 }
 
 static inline void bh_kunmap(struct buffer_head *bh)
@@ -102,7 +102,7 @@
 
 	if (offset + size > PAGE_SIZE)
 		out_of_line_bug();
-	kaddr = kmap(page);
+	kaddr = (char*)kmap(page);
 	memset(kaddr + offset, 0, size);
 	flush_dcache_page(page);
 	flush_page_to_ram(page);
@@ -113,8 +113,8 @@
 {
 	char *vfrom, *vto;
 
-	vfrom = kmap_atomic(from, KM_USER0);
-	vto = kmap_atomic(to, KM_USER1);
+	vfrom = (char*)kmap_atomic(from, KM_USER0);
+	vto = (char*)kmap_atomic(to, KM_USER1);
 	copy_user_page(vto, vfrom, vaddr);
 	kunmap_atomic(vfrom, KM_USER0);
 	kunmap_atomic(vto, KM_USER1);
diff -Naur kernel-source-2.4.20-orig/include/linux/inetdevice.h kernel-source-2.4.20/include/linux/inetdevice.h
--- kernel-source-2.4.20-orig/include/linux/inetdevice.h	2002-08-03 02:39:45.000000000 +0200
+++ kernel-source-2.4.20/include/linux/inetdevice.h	2003-02-03 18:33:17.000000000 +0100
@@ -124,7 +124,7 @@
 	struct in_device *in_dev;
 
 	read_lock(&inetdev_lock);
-	in_dev = dev->ip_ptr;
+	in_dev = (struct in_device *) dev->ip_ptr;
 	if (in_dev)
 		atomic_inc(&in_dev->refcnt);
 	read_unlock(&inetdev_lock);
diff -Naur kernel-source-2.4.20-orig/include/linux/list.h kernel-source-2.4.20/include/linux/list.h
--- kernel-source-2.4.20-orig/include/linux/list.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/list.h	2003-02-03 18:35:05.000000000 +0100
@@ -91,8 +91,8 @@
 static inline void list_del(struct list_head *entry)
 {
 	__list_del(entry->prev, entry->next);
-	entry->next = (void *) 0;
-	entry->prev = (void *) 0;
+	entry->next = (struct list_head *) 0;
+	entry->prev = (struct list_head *) 0;
 }
 
 /**
diff -Naur kernel-source-2.4.20-orig/include/linux/mm.h kernel-source-2.4.20/include/linux/mm.h
--- kernel-source-2.4.20-orig/include/linux/mm.h	2002-08-03 02:39:45.000000000 +0200
+++ kernel-source-2.4.20/include/linux/mm.h	2003-02-03 18:33:17.000000000 +0100
@@ -576,7 +576,6 @@
 		return 0;
 }
 
-struct zone_t;
 /* filemap.c */
 extern void remove_inode_page(struct page *);
 extern unsigned long page_unuse(struct page *);
diff -Naur kernel-source-2.4.20-orig/include/linux/namespace.h kernel-source-2.4.20/include/linux/namespace.h
--- kernel-source-2.4.20-orig/include/linux/namespace.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/namespace.h	2003-02-03 18:34:10.000000000 +0100
@@ -2,7 +2,7 @@
 #define _NAMESPACE_H_
 #ifdef __KERNEL__
 
-struct namespace {
+struct name_space {
 	atomic_t		count;
 	struct vfsmount *	root;
 	struct list_head	list;
@@ -11,33 +11,33 @@
 
 extern void umount_tree(struct vfsmount *);
 
-static inline void put_namespace(struct namespace *namespace)
+static inline void put_name_space(struct name_space *name_space)
 {
-	if (atomic_dec_and_test(&namespace->count)) {
-		down_write(&namespace->sem);
+	if (atomic_dec_and_test(&name_space->count)) {
+		down_write(&name_space->sem);
 		spin_lock(&dcache_lock);
-		umount_tree(namespace->root);
+		umount_tree(name_space->root);
 		spin_unlock(&dcache_lock);
-		up_write(&namespace->sem);
-		kfree(namespace);
+		up_write(&name_space->sem);
+		kfree(name_space);
 	}
 }
 
-static inline void exit_namespace(struct task_struct *p)
+static inline void exit_name_space(struct task_struct *p)
 {
-	struct namespace *namespace = p->namespace;
-	if (namespace) {
+	struct name_space *name_space = p->name_space;
+	if (name_space) {
 		task_lock(p);
-		p->namespace = NULL;
+		p->name_space = NULL;
 		task_unlock(p);
-		put_namespace(namespace);
+		put_name_space(name_space);
 	}
 }
-extern int copy_namespace(int, struct task_struct *);
+extern int copy_name_space(int, struct task_struct *);
 
-static inline void get_namespace(struct namespace *namespace)
+static inline void get_name_space(struct name_space *name_space)
 {
-	atomic_inc(&namespace->count);
+	atomic_inc(&name_space->count);
 }
 
 #endif
diff -Naur kernel-source-2.4.20-orig/include/linux/ncp_fs.h kernel-source-2.4.20/include/linux/ncp_fs.h
--- kernel-source-2.4.20-orig/include/linux/ncp_fs.h	2001-02-09 20:29:44.000000000 +0100
+++ kernel-source-2.4.20/include/linux/ncp_fs.h	2003-02-03 18:34:10.000000000 +0100
@@ -75,7 +75,7 @@
 struct ncp_setroot_ioctl
 {
 	int		volNumber;
-	int		namespace;
+	int		name_space;
 	__u32		dirEntNum;
 };
 
diff -Naur kernel-source-2.4.20-orig/include/linux/netdevice.h kernel-source-2.4.20/include/linux/netdevice.h
--- kernel-source-2.4.20-orig/include/linux/netdevice.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/netdevice.h	2003-02-03 18:33:17.000000000 +0100
@@ -438,6 +438,46 @@
 	/* this will get initialized at each interface type init routine */
 	struct divert_blk	*divert;
 #endif /* CONFIG_NET_DIVERT */
+
+	/* Click polling support */
+	/*
+	 * polling is < 0 if the device does not support polling, == 0 if the
+	 * device supports polling but interrupts are on, and > 0 if polling
+	 * is on.
+	 */
+	int			polling;
+	int			(*poll_on)(struct net_device *);
+	int			(*poll_off)(struct net_device *);
+	/*
+	 * rx_poll returns to caller a linked list of sk_buff objects received
+	 * by the device. on call, the want argument specifies the number of
+	 * packets wanted. on return, the want argument specifies the number
+	 * of packets actually returned.
+	 */
+	struct sk_buff *	(*rx_poll)(struct net_device*, int *want);
+	/* refill rx dma ring using the given sk_buff list. returns 0 if
+	 * successful, or if there are more entries need to be cleaned,
+	 * returns the number of dirty entries. the ptr to the sk_buff list is
+	 * updated by the driver to point to any unused skbs.
+	 */
+	int			(*rx_refill)(struct net_device*, struct sk_buff**);
+	/*
+	 * place sk_buff on the transmit ring. returns 0 if successful, 1
+	 * otherwise
+	 */
+	int			(*tx_queue)(struct net_device *, struct sk_buff *);
+	/*
+	 * clean tx dma ring. returns the list of skb objects cleaned
+	 */
+	struct sk_buff*		(*tx_clean)(struct net_device *);
+	/*
+	 * start transmission. returns 0 if successful, 1 otherwise
+	 */
+	int			(*tx_start)(struct net_device *);
+	/*
+	 * tell device the end of a batch of packets
+	 */
+	int			(*tx_eob)(struct net_device *);
 };
 
 
@@ -476,6 +516,9 @@
 extern int		unregister_netdevice(struct net_device *dev);
 extern int 		register_netdevice_notifier(struct notifier_block *nb);
 extern int		unregister_netdevice_notifier(struct notifier_block *nb);
+extern int		register_net_in(struct notifier_block *nb); /* Click */
+extern int		unregister_net_in(struct notifier_block *nb); /* Click */
+extern int		ptype_dispatch(struct sk_buff *skb, unsigned short type); /* Click */
 extern int		dev_new_index(void);
 extern struct net_device	*dev_get_by_index(int ifindex);
 extern struct net_device	*__dev_get_by_index(int ifindex);
diff -Naur kernel-source-2.4.20-orig/include/linux/sched.h kernel-source-2.4.20/include/linux/sched.h
--- kernel-source-2.4.20-orig/include/linux/sched.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/sched.h	2003-02-03 18:34:10.000000000 +0100
@@ -169,7 +169,7 @@
  */
 #define NR_OPEN_DEFAULT BITS_PER_LONG
 
-struct namespace;
+struct name_space;
 /*
  * Open file table structure
  */
@@ -395,8 +395,8 @@
 	struct fs_struct *fs;
 /* open file information */
 	struct files_struct *files;
-/* namespace */
-	struct namespace *namespace;
+/* name_space */
+	struct name_space *name_space;
 /* signal handlers */
 	spinlock_t sigmask_lock;	/* Protects signal and blocked */
 	struct signal_struct *sig;
diff -Naur kernel-source-2.4.20-orig/include/linux/skbuff.h kernel-source-2.4.20/include/linux/skbuff.h
--- kernel-source-2.4.20-orig/include/linux/skbuff.h	2003-01-12 21:54:43.000000000 +0100
+++ kernel-source-2.4.20/include/linux/skbuff.h	2003-02-03 18:33:17.000000000 +0100
@@ -126,15 +126,31 @@
 	skb_frag_t	frags[MAX_SKB_FRAGS];
 };
 
+/* Click: overload sk_buff.pkt_type to contain information about whether
+   a packet is clean. Clean packets have the following fields zero:
+   dst, destructor, pkt_bridged, prev, list, sk, security, priority. */
+#define PACKET_CLEAN		128		/* Is packet clean? */
+#define PACKET_TYPE_MASK	127		/* Actual packet type */
+
+/* Click: change sk_buff structure so all fields used for router are grouped
+ * together on one cache line, we hope */
 struct sk_buff {
 	/* These two members must be first. */
 	struct sk_buff	* next;			/* Next buffer in list 				*/
 	struct sk_buff	* prev;			/* Previous buffer in list 			*/
 
-	struct sk_buff_head * list;		/* List we are on				*/
-	struct sock	*sk;			/* Socket we are owned by 			*/
-	struct timeval	stamp;			/* Time we arrived				*/
+	unsigned int 	len;			/* Length of actual data			*/
+	unsigned char	*data;			/* Data head pointer				*/
+	unsigned char	*tail;			/* Tail pointer					*/
 	struct net_device	*dev;		/* Device we arrived on/are leaving by		*/
+	unsigned char 	__unused,		/* Dead field, may be reused			*/
+			cloned, 		/* head may be cloned (check refcnt to be sure). */
+  			pkt_type,		/* Packet class					*/
+  			ip_summed;		/* Driver fed us an IP checksum			*/
+	atomic_t	users;			/* User count - see datagram.c,tcp.c 		*/
+	unsigned int	truesize;		/* Buffer size 					*/
+	unsigned char	*head;			/* Head of buffer 				*/
+	unsigned char 	*end;			/* End pointer					*/
 
 	/* Transport layer header */
 	union
@@ -165,8 +181,6 @@
 	  	unsigned char 	*raw;
 	} mac;
 
-	struct  dst_entry *dst;
-
 	/* 
 	 * This is the control buffer. It is free to use for every
 	 * layer. Please put your private variables there. If you
@@ -175,23 +189,17 @@
 	 */ 
 	char		cb[48];	 
 
-	unsigned int 	len;			/* Length of actual data			*/
+	struct  dst_entry *dst;
+
+	struct sk_buff_head * list;		/* List we are on				*/
+	struct sock	*sk;			/* Socket we are owned by 			*/
+	struct timeval	stamp;			/* Time we arrived				*/
+
  	unsigned int 	data_len;
 	unsigned int	csum;			/* Checksum 					*/
-	unsigned char 	__unused,		/* Dead field, may be reused			*/
-			cloned, 		/* head may be cloned (check refcnt to be sure). */
-  			pkt_type,		/* Packet class					*/
-  			ip_summed;		/* Driver fed us an IP checksum			*/
 	__u32		priority;		/* Packet queueing priority			*/
-	atomic_t	users;			/* User count - see datagram.c,tcp.c 		*/
 	unsigned short	protocol;		/* Packet protocol from driver. 		*/
 	unsigned short	security;		/* Security level of packet			*/
-	unsigned int	truesize;		/* Buffer size 					*/
-
-	unsigned char	*head;			/* Head of buffer 				*/
-	unsigned char	*data;			/* Data head pointer				*/
-	unsigned char	*tail;			/* Tail pointer					*/
-	unsigned char 	*end;			/* End pointer					*/
 
 	void 		(*destructor)(struct sk_buff *);	/* Destruct function		*/
 #ifdef CONFIG_NETFILTER
@@ -233,6 +241,8 @@
 extern void			kfree_skbmem(struct sk_buff *skb);
 extern struct sk_buff *		skb_clone(struct sk_buff *skb, int priority);
 extern struct sk_buff *		skb_copy(const struct sk_buff *skb, int priority);
+extern void			skb_recycled_init(struct sk_buff *buf);
+extern struct sk_buff *		skb_recycle(struct sk_buff *buf);
 extern struct sk_buff *		pskb_copy(struct sk_buff *skb, int gfp_mask);
 extern int			pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
 extern struct sk_buff *		skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
@@ -823,7 +833,7 @@
 	return skb->data;
 }
 
-static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
 	skb->len-=len;
 	if (skb->len < skb->data_len)
@@ -851,7 +861,7 @@
 
 extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
 
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len > skb_headlen(skb) &&
 	    __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
diff -Naur kernel-source-2.4.20-orig/include/linux/spinlock.h kernel-source-2.4.20/include/linux/spinlock.h
--- kernel-source-2.4.20-orig/include/linux/spinlock.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/spinlock.h	2003-02-03 18:34:33.000000000 +0100
@@ -71,7 +71,7 @@
  * Some older gcc versions had a nasty bug with empty initializers.
  * (XXX: could someone please confirm whether egcs 1.1 still has this bug?)
  */
-#if (__GNUC__ > 2 || __GNUC_MINOR__ > 95)
+#if 0 /* g++ cannot handle this. */
   typedef struct { } spinlock_t;
   #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
 #else
diff -Naur kernel-source-2.4.20-orig/include/linux/swap.h kernel-source-2.4.20/include/linux/swap.h
--- kernel-source-2.4.20-orig/include/linux/swap.h	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/include/linux/swap.h	2003-02-03 18:33:17.000000000 +0100
@@ -100,7 +100,7 @@
 struct vm_area_struct;
 struct sysinfo;
 
-struct zone_t;
+struct zone_struct;
 
 /* linux/mm/swap.c */
 extern void FASTCALL(lru_cache_add(struct page *));
diff -Naur kernel-source-2.4.20-orig/include/net/route.h kernel-source-2.4.20/include/net/route.h
--- kernel-source-2.4.20-orig/include/net/route.h	2002-08-03 02:39:46.000000000 +0200
+++ kernel-source-2.4.20/include/net/route.h	2003-02-03 18:33:17.000000000 +0100
@@ -140,7 +140,13 @@
 static inline int ip_route_output(struct rtable **rp,
 				      u32 daddr, u32 saddr, u32 tos, int oif)
 {
+#ifdef __cplusplus
+	struct rt_key key = { daddr, saddr };
+	key.oif = oif;
+	key.tos = tos;
+#else
 	struct rt_key key = { dst:daddr, src:saddr, oif:oif, tos:tos };
+#endif
 
 	return ip_route_output_key(rp, &key);
 }
diff -Naur kernel-source-2.4.20-orig/kernel/exit.c kernel-source-2.4.20/kernel/exit.c
--- kernel-source-2.4.20-orig/kernel/exit.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/kernel/exit.c	2003-02-03 18:34:10.000000000 +0100
@@ -445,7 +445,7 @@
 	sem_exit();
 	__exit_files(tsk);
 	__exit_fs(tsk);
-	exit_namespace(tsk);
+	exit_name_space(tsk);
 	exit_sighand(tsk);
 	exit_thread();
 
diff -Naur kernel-source-2.4.20-orig/kernel/fork.c kernel-source-2.4.20/kernel/fork.c
--- kernel-source-2.4.20-orig/kernel/fork.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/kernel/fork.c	2003-02-03 18:34:10.000000000 +0100
@@ -688,7 +688,7 @@
 		goto bad_fork_cleanup_fs;
 	if (copy_mm(clone_flags, p))
 		goto bad_fork_cleanup_sighand;
-	if (copy_namespace(clone_flags, p))
+	if (copy_name_space(clone_flags, p))
 		goto bad_fork_cleanup_mm;
 	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
 	if (retval)
@@ -760,7 +760,7 @@
 	return retval;
 
 bad_fork_cleanup_namespace:
-	exit_namespace(p);
+	exit_name_space(p);
 bad_fork_cleanup_mm:
 	exit_mm(p);
 bad_fork_cleanup_sighand:
diff -Naur kernel-source-2.4.20-orig/kernel/kmod.c kernel-source-2.4.20/kernel/kmod.c
--- kernel-source-2.4.20-orig/kernel/kmod.c	2002-08-03 02:39:46.000000000 +0200
+++ kernel-source-2.4.20/kernel/kmod.c	2003-02-03 18:34:10.000000000 +0100
@@ -38,7 +38,7 @@
 	struct fs_struct *our_fs, *init_fs;
 	struct dentry *root, *pwd;
 	struct vfsmount *rootmnt, *pwdmnt;
-	struct namespace *our_ns, *init_ns;
+	struct name_space *our_ns, *init_ns;
 
 	/*
 	 * Make modprobe's fs context be a copy of init's.
@@ -58,11 +58,11 @@
 	 */
 
 	init_fs = init_task.fs;
-	init_ns = init_task.namespace;
-	get_namespace(init_ns);
-	our_ns = current->namespace;
-	current->namespace = init_ns;
-	put_namespace(our_ns);
+	init_ns = init_task.name_space;
+	get_name_space(init_ns);
+	our_ns = current->name_space;
+	current->name_space = init_ns;
+	put_name_space(our_ns);
 	read_lock(&init_fs->lock);
 	rootmnt = mntget(init_fs->rootmnt);
 	root = dget(init_fs->root);
diff -Naur kernel-source-2.4.20-orig/net/core/dev.c kernel-source-2.4.20/net/core/dev.c
--- kernel-source-2.4.20-orig/net/core/dev.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/net/core/dev.c	2003-02-03 18:33:17.000000000 +0100
@@ -182,6 +182,9 @@
  
 static struct notifier_block *netdev_chain=NULL;
 
+/* Click: input packet handlers, might steal packets from net_rx_action. */
+static struct notifier_block *net_in_chain = 0;
+
 /*
  *	Device drivers call our routines to queue packets here. We empty the
  *	queue in the local softnet handler.
@@ -1379,6 +1382,22 @@
 }
 
 
+/*
+ * Click: Allow Click to ask to intercept input packets.
+ */
+int
+register_net_in(struct notifier_block *nb)
+{
+  return notifier_chain_register(&net_in_chain, nb);
+}
+
+int
+unregister_net_in(struct notifier_block *nb)
+{
+  return notifier_chain_unregister(&net_in_chain, nb);
+}
+
+
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
 #endif
@@ -1434,62 +1453,6 @@
 
 	skb->h.raw = skb->nh.raw = skb->data;
 
-	pt_prev = NULL;
-	for (ptype = ptype_all; ptype; ptype = ptype->next) {
-		if (!ptype->dev || ptype->dev == skb->dev) {
-			if (pt_prev) {
-				if (!pt_prev->data) {
-					ret = deliver_to_old_ones(pt_prev, skb, 0);
-				} else {
-					atomic_inc(&skb->users);
-					ret = pt_prev->func(skb, skb->dev, pt_prev);
-				}
-			}
-			pt_prev = ptype;
-		}
-	}
-
-#ifdef CONFIG_NET_DIVERT
-	if (skb->dev->divert && skb->dev->divert->divert)
-		ret = handle_diverter(skb);
-#endif /* CONFIG_NET_DIVERT */
-			
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
-	if (skb->dev->br_port != NULL &&
-	    br_handle_frame_hook != NULL) {
-		return handle_bridge(skb, pt_prev);
-	}
-#endif
-
-	for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {
-		if (ptype->type == type &&
-		    (!ptype->dev || ptype->dev == skb->dev)) {
-			if (pt_prev) {
-				if (!pt_prev->data) {
-					ret = deliver_to_old_ones(pt_prev, skb, 0);
-				} else {
-					atomic_inc(&skb->users);
-					ret = pt_prev->func(skb, skb->dev, pt_prev);
-				}
-			}
-			pt_prev = ptype;
-		}
-	}
-
-	if (pt_prev) {
-		if (!pt_prev->data) {
-			ret = deliver_to_old_ones(pt_prev, skb, 1);
-		} else {
-			ret = pt_prev->func(skb, skb->dev, pt_prev);
-		}
-	} else {
-		kfree_skb(skb);
-		/* Jamal, now you will not able to escape explaining
-		 * me how you were going to use this. :-)
-		 */
-		ret = NET_RX_DROP;
-	}
-
 	return ret;
 }
 
@@ -1515,6 +1478,20 @@
 
 		netif_receive_skb(skb);
 
+		/* Click: may want to steal the packet */
+		if (notifier_call_chain(&net_in_chain,
+					skb_queue_len(&queue->input_pkt_queue),
+					skb) & NOTIFY_STOP_MASK) {
+		  dev_put(dev);
+		  continue;
+		}
+	
+		/* Click: dispatch based on protocol type */
+		if (ptype_dispatch(skb, skb->protocol)) {
+		  dev_put(dev);
+		  continue;
+		}
+
 		dev_put(dev);
 
 		work++;
@@ -1555,6 +1532,86 @@
 	return 0;
 }
 
+/*
+ * Click: Hand a packet to the ordinary Linux protocol stack.
+ * Broke this out from net_tx_action so that Click can call it.
+ * Always frees the skb one way or another.
+ *
+ * skb->pkt_type needs to be set to PACKET_{BROADCAST,MULTICAST,OTHERHOST}
+ * maybe skb->mac.raw must point to ether header.
+ * skb->protocol must be set to a htons(ETHERTYPE_?).
+ * skb->data must point to the ethernet payload (e.g. the IP header).
+ * skb->nh.raw must point to the ethernet payload also.
+ *
+ * Returns 1 if caller should skip bugdet, etc.
+ */
+int ptype_dispatch(struct sk_buff *skb, unsigned short type)
+{
+	struct packet_type *ptype, *pt_prev;
+
+	pt_prev = NULL;
+	for (ptype = ptype_all; ptype; ptype = ptype->next) {
+		if (!ptype->dev || ptype->dev == skb->dev) {
+			if (pt_prev) {
+				if (!pt_prev->data) {
+					deliver_to_old_ones(pt_prev, skb, 0);
+				} else {
+					atomic_inc(&skb->users);
+					pt_prev->func(skb,
+						      skb->dev,
+						      pt_prev);
+				}
+			}
+			pt_prev = ptype;
+		}
+	}
+
+	/* Click: exit if sniffers only */
+	if (type == 0xFFFF)
+		goto done;
+	
+#ifdef CONFIG_NET_DIVERT
+	if (skb->dev->divert && skb->dev->divert->divert)
+		handle_diverter(skb);
+#endif /* CONFIG_NET_DIVERT */
+
+			
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	if (skb->dev->br_port != NULL &&
+	    br_handle_frame_hook != NULL) {
+		handle_bridge(skb, pt_prev);
+		return 1;
+	}
+#endif
+
+	for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {
+		if (ptype->type == type &&
+		    (!ptype->dev || ptype->dev == skb->dev)) {
+			if (pt_prev) {
+				if (!pt_prev->data)
+					deliver_to_old_ones(pt_prev, skb, 0);
+				else {
+					atomic_inc(&skb->users);
+					pt_prev->func(skb,
+						      skb->dev,
+						      pt_prev);
+				}
+			}
+			pt_prev = ptype;
+		}
+	}
+
+done:
+	if (pt_prev) {
+		if (!pt_prev->data)
+			deliver_to_old_ones(pt_prev, skb, 1);
+		else
+			pt_prev->func(skb, skb->dev, pt_prev);
+	} else
+		kfree_skb(skb);
+	return 0;
+}
+
 static void net_rx_action(struct softirq_action *h)
 {
 	int this_cpu = smp_processor_id();
diff -Naur kernel-source-2.4.20-orig/net/core/skbuff.c kernel-source-2.4.20/net/core/skbuff.c
--- kernel-source-2.4.20-orig/net/core/skbuff.c	2003-01-12 21:54:43.000000000 +0100
+++ kernel-source-2.4.20/net/core/skbuff.c	2003-02-03 18:33:17.000000000 +0100
@@ -443,6 +443,65 @@
 #endif
 }
 
+/* Click: attempts to recycle a sk_buff. if it can be recycled, return it
+ * without reinitializing any bits */
+struct sk_buff *skb_recycle(struct sk_buff *skb)
+{
+	if (atomic_dec_and_test(&skb->users)) { 
+
+		if (skb->list) {
+		 	printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+			       "on a list (from %p).\n", NET_CALLER(skb));
+			BUG();
+		}
+
+		dst_release(skb->dst); 
+		if(skb->destructor) {
+			if (in_irq()) {
+				printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
+					NET_CALLER(skb));
+			}
+			skb->destructor(skb);
+		}
+#ifdef CONFIG_NETFILTER
+		nf_conntrack_put(skb->nfct);
+#endif
+		skb_headerinit(skb, NULL, 0);
+
+		if (!skb->cloned ||
+		    atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
+			if (skb_shinfo(skb)->nr_frags) {
+				int i;
+				for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+					put_page(skb_shinfo(skb)->frags[i].page);
+			}
+
+			if (skb_shinfo(skb)->frag_list)
+				skb_drop_fraglist(skb);
+
+			/* Load the data pointers. */
+			skb->data = skb->head;
+			skb->tail = skb->data;
+			/* end and truesize should have never changed */
+			/* skb->end = skb->data + skb->truesize; */
+
+			/* set up other state */
+			skb->len = 0;
+			skb->cloned = 0;
+
+			atomic_set(&skb->users, 1);
+			atomic_set(&(skb_shinfo(skb)->dataref), 1);
+
+			return skb;
+		}
+
+		skb_head_to_pool(skb);
+	}
+
+	return 0;
+}
+
+
 /**
  *	skb_copy	-	create private copy of an sk_buff
  *	@skb: buffer to copy
diff -Naur kernel-source-2.4.20-orig/net/ipv4/arp.c kernel-source-2.4.20/net/ipv4/arp.c
--- kernel-source-2.4.20-orig/net/ipv4/arp.c	2002-11-29 00:53:15.000000000 +0100
+++ kernel-source-2.4.20/net/ipv4/arp.c	2003-02-03 18:33:17.000000000 +0100
@@ -318,6 +318,7 @@
 {
 	u32 saddr;
 	u8  *dst_ha = NULL;
+	u8  dst_ha_buf[MAX_ADDR_LEN+sizeof(unsigned long)];
 	struct net_device *dev = neigh->dev;
 	u32 target = *(u32*)neigh->primary_key;
 	int probes = atomic_read(&neigh->probes);
@@ -330,8 +331,8 @@
 	if ((probes -= neigh->parms->ucast_probes) < 0) {
 		if (!(neigh->nud_state&NUD_VALID))
 			printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
-		dst_ha = neigh->ha;
-		read_lock_bh(&neigh->lock);
+		memcpy(dst_ha_buf, neigh->ha, sizeof(neigh->ha));
+		dst_ha = dst_ha_buf;
 	} else if ((probes -= neigh->parms->app_probes) < 0) {
 #ifdef CONFIG_ARPD
 		neigh_app_ns(neigh);
@@ -341,8 +342,6 @@
 
 	arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
 		 dst_ha, dev->dev_addr, NULL);
-	if (dst_ha)
-		read_unlock_bh(&neigh->lock);
 }
 
 static int arp_filter(__u32 sip, __u32 tip, struct net_device *dev)
diff -Naur kernel-source-2.4.20-orig/net/netsyms.c kernel-source-2.4.20/net/netsyms.c
--- kernel-source-2.4.20-orig/net/netsyms.c	2003-01-12 21:54:43.000000000 +0100
+++ kernel-source-2.4.20/net/netsyms.c	2003-02-03 18:33:17.000000000 +0100
@@ -150,6 +150,7 @@
 EXPORT_SYMBOL(skb_copy_and_csum_bits);
 EXPORT_SYMBOL(skb_copy_and_csum_dev);
 EXPORT_SYMBOL(skb_copy_expand);
+EXPORT_SYMBOL(skb_recycle);
 EXPORT_SYMBOL(___pskb_trim);
 EXPORT_SYMBOL(__pskb_pull_tail);
 EXPORT_SYMBOL(pskb_expand_head);
@@ -277,6 +278,11 @@
 EXPORT_SYMBOL(register_inetaddr_notifier);
 EXPORT_SYMBOL(unregister_inetaddr_notifier);
 
+/* Click */
+EXPORT_SYMBOL(register_net_in);
+EXPORT_SYMBOL(unregister_net_in);
+EXPORT_SYMBOL(ptype_dispatch);
+
 /* needed for ip_gre -cw */
 EXPORT_SYMBOL(ip_statistics);
 
-------------- next part --------------
diff -Naur click-1.2.4-orig/elements/linuxmodule/tohost.cc click-1.2.4-replaced-get-fast-time/elements/linuxmodule/tohost.cc
--- click-1.2.4-orig/elements/linuxmodule/tohost.cc	2003-02-03 18:24:08.000000000 +0100
+++ click-1.2.4-replaced-get-fast-time/elements/linuxmodule/tohost.cc	2003-02-03 18:27:27.000000000 +0100
@@ -116,8 +116,12 @@
   // be nice to libpcap
   if (skb->stamp.tv_sec == 0) {
 #ifndef CONFIG_CPU_IS_SLOW
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
     get_fast_time(&skb->stamp);
 #else
+    do_gettimeofday(&skb->stamp);
+#endif
+#else
     skb->stamp = xtime;
 #endif
   }
diff -Naur click-1.2.4-orig/elements/linuxmodule/tohostsniffers.cc click-1.2.4-replaced-get-fast-time/elements/linuxmodule/tohostsniffers.cc
--- click-1.2.4-orig/elements/linuxmodule/tohostsniffers.cc	2003-02-03 18:25:46.000000000 +0100
+++ click-1.2.4-replaced-get-fast-time/elements/linuxmodule/tohostsniffers.cc	2003-02-03 18:27:39.000000000 +0100
@@ -109,8 +109,12 @@
   // be nice to libpcap
   if (skb->stamp.tv_sec == 0) {
 #ifndef CONFIG_CPU_IS_SLOW
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
     get_fast_time(&skb->stamp);
 #else
+    do_gettimeofday(&skb->stamp);
+#endif
+#else
     skb->stamp = xtime;
 #endif
   }
-------------- next part --------------
A non-text attachment was scrubbed...
Name: cxxabi.cc
Type: text/x-c++src
Size: 918 bytes
Desc: not available
Url : https://amsterdam.lcs.mit.edu/pipermail/click/attachments/20030203/bb485887/cxxabi.bin


More information about the click mailing list