[Click] e1000-5.7.6 polling driver
Eddie Kohler
kohler at cs.ucla.edu
Mon Sep 18 22:14:22 EDT 2006
Hi Beyers,
I actually looked through this >1-year-old diff a bit to see if I could see
any obvious bugs in our driver. There are differences, but no glaring bugs,
and since you appear to be using our/Max's more recent driver, I assume it's
OK to drop the diff. Nevertheless it would be an interesting exercise, for
either the 6.x or 7.x driver, to look more closely at the relationship between
rx_poll and clean_rx_irq, etc....
Thanks as usual!
Eddie
Beyers Cronje wrote:
> Hi Eddie,
>
> Eish, I should've checked the repository, could've spared me a couple
> of gray hair :)
>
> There are some differences as I've tried to use as much of Intel's
> code as possible so it'll be easier for others to follow. In example:
>
> e1000_rx_poll is roughly based on e1000_clean_rx_irq
> e1000_rx_refill on e1000_alloc_rx_buffers
> etc..
>
> Attached is the diffs between your e1000_main.c and mine obtained by:
>
> diff -Naur e1000_main.c.eddie e1000_main.c.beyers > e1000_main.diff
>
> If you want to have a look at all the sources let me know and I'll
> forward it to you.
>
> Beyers
>
>
> On Thu, 10 Mar 2005 05:06:39 -0800, Eddie Kohler <kohler at cs.ucla.edu> wrote:
>> Hi Beyers,
>>
>> Very cool! So we've got a version of the 5.7.6 driver checked into the Click
>> repository at this point, in drivers/e1000-5.x. I'd be interested in the diffs
>> between our versions.
>>
>> Eddie
>>
>>
>> Beyers Cronje wrote:
>>> Hi,
>>>
>>> I've got a working click polling driver based on Intel's latest e1000
>>> v5.7.6 driver. Anyone interested in testing/using the driver pop me an
>>> email.
>>>
>>> Stability seems good. Current interface stats runnin in polling mode:
>>> RX packets:519393113 errors:25 dropped:0 overruns:0 frame:25
>>> TX shows 0 errors.
>>>
>>> (Not sure where the 25 frame errors are from, but minute in comparison
>>> with RX packets)
>>>
>>> I havent done much performance testing, but should theoretically have
>>> similar performance to the click supplied polling driver.
>>>
>>> Tested on Linux 2.4.26 with 82541GI and 82545GM controllers (both
>>> controllers not supported on the supplied click driver).
>>>
>>> The driver should provide polling support for the following controllers:
>>>
>>> E1000_DEV_ID_82542 0x1000
>>> E1000_DEV_ID_82543GC_FIBER 0x1001
>>> E1000_DEV_ID_82543GC_COPPER 0x1004
>>> E1000_DEV_ID_82544EI_COPPER 0x1008
>>> E1000_DEV_ID_82544EI_FIBER 0x1009
>>> E1000_DEV_ID_82544GC_COPPER 0x100C
>>> E1000_DEV_ID_82544GC_LOM 0x100D
>>> E1000_DEV_ID_82540EM 0x100E
>>> E1000_DEV_ID_82540EM_LOM 0x1015
>>> E1000_DEV_ID_82540EP_LOM 0x1016
>>> E1000_DEV_ID_82540EP 0x1017
>>> E1000_DEV_ID_82540EP_LP 0x101E
>>> E1000_DEV_ID_82545EM_COPPER 0x100F
>>> E1000_DEV_ID_82545EM_FIBER 0x1011
>>> E1000_DEV_ID_82545GM_COPPER 0x1026
>>> E1000_DEV_ID_82545GM_FIBER 0x1027
>>> E1000_DEV_ID_82545GM_SERDES 0x1028
>>> E1000_DEV_ID_82546EB_COPPER 0x1010
>>> E1000_DEV_ID_82546EB_FIBER 0x1012
>>> E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
>>> E1000_DEV_ID_82541EI 0x1013
>>> E1000_DEV_ID_82541EI_MOBILE 0x1018
>>> E1000_DEV_ID_82541ER 0x1078
>>> E1000_DEV_ID_82547GI 0x1075
>>> E1000_DEV_ID_82541GI 0x1076
>>> E1000_DEV_ID_82541GI_MOBILE 0x1077
>>> E1000_DEV_ID_82541GI_LF 0x107C
>>> E1000_DEV_ID_82546GB_COPPER 0x1079
>>> E1000_DEV_ID_82546GB_FIBER 0x107A
>>> E1000_DEV_ID_82546GB_SERDES 0x107B
>>> E1000_DEV_ID_82546GB_PCIE 0x108A
>>> E1000_DEV_ID_82547EI 0x1019
>>>
>>> Regards
>>>
>>> Beyers Cronje
>>> _______________________________________________
>>> click mailing list
>>> click at amsterdam.lcs.mit.edu
>>> https://amsterdam.lcs.mit.edu/mailman/listinfo/click
>>
>>
>> ------------------------------------------------------------------------
>>
>> --- e1000_main.c.eddie 2005-03-09 20:59:51.000000000 +0000
>> +++ e1000_main.c.beyers 2005-03-10 23:40:20.883853456 +0000
>> @@ -223,7 +223,6 @@
>>
>> /* For Click polling */
>> static int e1000_tx_pqueue(struct net_device *dev, struct sk_buff *skb);
>> -static int e1000_xmit_frame_clickpoll(struct net_device *netdev, struct sk_buff *skb);
>> static int e1000_tx_start(struct net_device *dev);
>> static int e1000_rx_refill(struct net_device *dev, struct sk_buff **);
>> static int e1000_tx_eob(struct net_device *dev);
>> @@ -557,26 +556,28 @@
>> netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
>> netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
>> #endif
>> -#ifdef CONFIG_NET_POLL_CONTROLLER
>> - netdev->poll_controller = e1000_netpoll;
>> -#endif
>> - strcpy(netdev->name, pci_name(pdev));
>> -
>> - netdev->mem_start = mmio_start;
>> - netdev->mem_end = mmio_start + mmio_len;
>> - netdev->base_addr = adapter->hw.io_base;
>>
>> /* Click - polling extensions */
>> netdev->polling = 0;
>> netdev->rx_poll = e1000_rx_poll;
>> netdev->rx_refill = e1000_rx_refill;
>> - netdev->tx_queue = e1000_tx_pqueue; // e1000_xmit_frame_clickpoll;
>> + netdev->tx_queue = e1000_tx_pqueue;
>> netdev->tx_eob = e1000_tx_eob;
>> netdev->tx_start = e1000_tx_start;
>> netdev->tx_clean = e1000_tx_clean;
>> netdev->poll_off = e1000_poll_off;
>> netdev->poll_on = e1000_poll_on;
>>
>> +
>> +#ifdef CONFIG_NET_POLL_CONTROLLER
>> + netdev->poll_controller = e1000_netpoll;
>> +#endif
>> + strcpy(netdev->name, pci_name(pdev));
>> +
>> + netdev->mem_start = mmio_start;
>> + netdev->mem_end = mmio_start + mmio_len;
>> + netdev->base_addr = adapter->hw.io_base;
>> +
>> adapter->bd_number = cards_found;
>>
>> /* setup the private structure */
>> @@ -1599,6 +1600,9 @@
>> }
>> }
>>
>> +
>> +
>> +
>> /**
>> * e1000_watchdog - Timer Call-back
>> * @data: pointer to netdev cast into an unsigned long
>> @@ -1607,26 +1611,26 @@
>> static void
>> e1000_watchdog(unsigned long data)
>> {
>> - struct e1000_adapter *adapter = (struct e1000_adapter *) data;
>> + struct e1000_adapter *adapter = (struct e1000_adapter *) data;
>>
>> - if (adapter->netdev->polling)
>> - adapter->do_poll_watchdog = 1;
>> - else
>> - e1000_watchdog_1(adapter);
>> + if(adapter->netdev->polling)
>> + adapter->do_poll_watchdog = 1;
>> + else
>> + e1000_watchdog_1(adapter);
>>
>> - /* Reset the timer */
>> - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
>> + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
>> }
>>
>> void
>> e1000_watchdog_1(struct e1000_adapter *adapter)
>> {
>> +// struct e1000_adapter *adapter = (struct e1000_adapter *) data;
>> struct net_device *netdev = adapter->netdev;
>> struct e1000_desc_ring *txdr = &adapter->tx_ring;
>> uint32_t link;
>>
>> e1000_check_for_link(&adapter->hw);
>> -
>> +
>> if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
>> !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
>> link = !adapter->hw.serdes_link_down;
>> @@ -2377,21 +2381,14 @@
>> E1000_WRITE_REG(&adapter->hw, IMC, ~0);
>> }
>>
>> - /* 26.Jun.2004 - Do not print a message if we get an interrupt
>> - in polling mode. Andy Van Maele reports that e1000
>> - adapters can share interrupts with other devices, such as
>> - other network cards. Thus, it is not necessarily a problem
>> - if we get an interrupt; and printing a message is very
>> - expensive. So scrap it. It might be better to keep a
>> - counter. */
>> - if (!netdev->polling)
>> - for(i = 0; i < E1000_MAX_INTR; i++)
>> - if(unlikely(!e1000_clean_rx_irq(adapter) &
>> - !e1000_clean_tx_irq(adapter)))
>> - break;
>> + if(!netdev->polling) {
>> + for(i = 0; i < E1000_MAX_INTR; i++)
>> + if(unlikely(!e1000_clean_rx_irq(adapter) &
>> + !e1000_clean_tx_irq(adapter)))
>> + break;
>> + }
>>
>> - if ((hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
>> - && !netdev->polling)
>> + if((hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) && !netdev->polling)
>> e1000_irq_enable(adapter);
>> #endif
>> #ifdef E1000_COUNT_ICR
>> @@ -3291,281 +3288,218 @@
>> }
>> #endif
>>
>> -
>> /* Click polling support */
>>
>> static struct sk_buff *
>> e1000_rx_poll(struct net_device *dev, int *want)
>> {
>> + /*
>> + * Based on e1000_clean_rx_irq
>> + *
>> + */
>> struct e1000_adapter *adapter = dev->priv;
>> + struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
>> + struct net_device *netdev = adapter->netdev;
>> struct pci_dev *pdev = adapter->pdev;
>> struct e1000_rx_desc *rx_desc;
>> - int i;
>> - uint32_t length;
>> + struct e1000_buffer *buffer_info;
>> struct sk_buff *skb;
>> - uint8_t last_byte;
>> unsigned long flags;
>> + uint32_t length;
>> + uint8_t last_byte;
>> + unsigned int i;
>> + boolean_t cleaned = FALSE;
>> struct sk_buff *skb_head = 0, *skb_last = 0;
>> - int good;
>> int got = 0;
>>
>> - i = adapter->rx_ring.next_to_clean;
>> - rx_desc = E1000_RX_DESC(adapter->rx_ring, i);
>> + i = rx_ring->next_to_clean;
>> + rx_desc = E1000_RX_DESC(*rx_ring, i);
>>
>> - while(got < *want){
>> - if((rx_desc->status & E1000_RXD_STAT_DD) == 0)
>> - break;
>> - pci_unmap_single(pdev, adapter->rx_ring.buffer_info[i].dma,
>> - adapter->rx_ring.buffer_info[i].length,
>> - PCI_DMA_FROMDEVICE);
>> + while(rx_desc->status & E1000_RXD_STAT_DD && got < *want) {
>> + buffer_info = &rx_ring->buffer_info[i];
>> + cleaned = TRUE;
>>
>> - skb = adapter->rx_ring.buffer_info[i].skb;
>> + pci_unmap_single(pdev,
>> + buffer_info->dma,
>> + buffer_info->length,
>> + PCI_DMA_FROMDEVICE);
>> +
>> + skb = buffer_info->skb;
>> length = le16_to_cpu(rx_desc->length);
>> - good = 1;
>>
>> - if(!(rx_desc->status & E1000_RXD_STAT_EOP))
>> - good = 0;
>> -
>> - if(good && (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)){
>> + if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
>> + /* All receives must fit into a single buffer */
>> + E1000_DBG("%s: Receive packet consumed multiple"
>> + " buffers\n", netdev->name);
>> + dev_kfree_skb(skb);
>> + goto next_desc;
>> + }
>> +
>> + if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
>> last_byte = *(skb->data + length - 1);
>> - if(TBI_ACCEPT(&adapter->hw,rx_desc->status,rx_desc->errors,length,
>> - last_byte)) {
>> + if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
>> + rx_desc->errors, length, last_byte)) {
>> spin_lock_irqsave(&adapter->stats_lock, flags);
>> - e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats,
>> - length, skb->data);
>> - spin_unlock_irqrestore(&adapter->stats_lock, flags);
>> + e1000_tbi_adjust_stats(&adapter->hw,
>> + &adapter->stats,
>> + length, skb->data);
>> + spin_unlock_irqrestore(&adapter->stats_lock,
>> + flags);
>> length--;
>> } else {
>> - good = 0;
>> + dev_kfree_skb(skb);
>> + goto next_desc;
>> }
>> }
>>
>> - if(good){
>> - skb_put(skb, length - CRC_LENGTH);
>> - e1000_rx_checksum(adapter, rx_desc, skb);
>> - skb_pull(skb, dev->hard_header_len);
>> - if (got == 0) {
>> - skb_head = skb;
>> - skb_last = skb;
>> - skb_last->next = NULL;
>> - } else {
>> - skb_last->next = skb;
>> - skb->next = NULL;
>> - skb_last = skb;
>> - }
>> - got++;
>> + /* Good Receive */
>> + skb_put(skb, length - ETHERNET_FCS_SIZE);
>> + e1000_rx_checksum(adapter, rx_desc, skb);
>> + skb_pull(skb, dev->hard_header_len);
>> + if (got == 0) {
>> + skb_head = skb;
>> + skb_last = skb;
>> + skb_last->next = NULL;
>> } else {
>> - dev_kfree_skb(skb);
>> + skb_last->next = skb;
>> + skb->next = NULL;
>> + skb_last = skb;
>> }
>> -
>> -#if 0
>> - memset(rx_desc, 0, 16);
>> -#endif
>> + got++;
>>
>> -#if 0
>> - /*
>> - * This mb() generates a "lock addl" even on a uniprocessor.
>> - * It's slow (slows down ip forwarding by 15%?) and I do
>> - * not believe it's needed.
>> - */
>> - mb();
>> -#endif
>> - adapter->rx_ring.buffer_info[i].skb = NULL;
>> - i = i + 1;
>> - if(i >= adapter->rx_ring.count)
>> - i = 0;
>> - rx_desc = E1000_RX_DESC(adapter->rx_ring, i);
>> + /* Receive Checksum Offload */
>> + e1000_rx_checksum(adapter, rx_desc, skb);
>> +
>> +//XXX skb->protocol = eth_type_trans(skb, netdev); //Polling does not work with this seq ??
>> + netdev->last_rx = jiffies;
>> +
>> +next_desc:
>> + rx_desc->status = 0;
>> + buffer_info->skb = NULL;
>> + if(unlikely(++i == rx_ring->count)) i = 0;
>> +
>> + rx_desc = E1000_RX_DESC(*rx_ring, i);
>> }
>>
>> - adapter->rx_ring.next_to_clean = i;
>> + rx_ring->next_to_clean = i;
>> *want = got;
>> -
>> return skb_head;
>> }
>>
>> int
>> e1000_rx_refill(struct net_device *dev, struct sk_buff **skbs)
>> {
>> + /* Based on e1000_alloc_rx_buffers */
>> struct e1000_adapter *adapter = dev->priv;
>> + struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
>> + struct net_device *netdev = adapter->netdev;
>> struct pci_dev *pdev = adapter->pdev;
>> struct e1000_rx_desc *rx_desc;
>> - int i, nfilled = 0, last_filled = -1;
>> + struct e1000_buffer *buffer_info;
>> + struct sk_buff *skb;
>> + unsigned int i, bufsz;
>> struct sk_buff *skb_list;
>>
>> if(skbs == 0)
>> - return E1000_NEW_RX_DESC_UNUSED(&adapter->rx_ring);
>> + return E1000_DESC_UNUSED(&adapter->rx_ring);
>>
>> - i = adapter->rx_ring.next_to_use;
>> skb_list = *skbs;
>> -
>> - while(adapter->rx_ring.buffer_info[i].skb == NULL && skb_list){
>> - struct sk_buff *skb = skb_list;
>> - skb_list = skb_list->next;
>> - rx_desc = E1000_RX_DESC(adapter->rx_ring, i);
>> - skb->dev = dev;
>> - adapter->rx_ring.buffer_info[i].skb = skb;
>> - adapter->rx_ring.buffer_info[i].length = adapter->rx_buffer_len;
>> - adapter->rx_ring.buffer_info[i].dma =
>> - pci_map_single(pdev, skb->data, adapter->rx_buffer_len,
>> - PCI_DMA_FROMDEVICE);
>> - rx_desc->status = 0;
>> - rx_desc->buffer_addr = cpu_to_le64(adapter->rx_ring.buffer_info[i].dma);
>> - last_filled = i;
>> - i = i + 1;
>> - if(i >= adapter->rx_ring.count)
>> - i = 0;
>> - nfilled++;
>> - }
>> -
>> - *skbs = skb_list;
>> -
>> - adapter->rx_ring.next_to_use = i;
>> - if(nfilled){
>> -#if 0
>> - mb();
>> -#endif
>> - /*
>> - * Intel driver code sets RDT to last filled slot.
>> - * e1000 manual implies (I think) one beyond.
>> - */
>> - E1000_WRITE_REG(&adapter->hw, RDT, last_filled);
>> - }
>> -
>> - /*
>> - * Update statistics counters, check link.
>> - * do_poll_watchdog is set by the timer interrupt e1000_watchdog(),
>> - * but we don't want to do the work in an interrupt (since it may
>> - * happen while polling code is active), so defer it to here.
>> - */
>> - if(adapter->do_poll_watchdog){
>> - adapter->do_poll_watchdog = 0;
>> - e1000_watchdog_1(adapter);
>> - }
>> -
>> - return E1000_NEW_RX_DESC_UNUSED(&adapter->rx_ring);
>> -}
>>
>> -static int
>> -e1000_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
>> -{
>> - /*
>> - * This function is just a streamlined version of
>> - * return e1000_xmit_frame(skb, netdev);
>> - */
>> -
>> - struct e1000_adapter *adapter = netdev->priv;
>> - struct pci_dev *pdev = adapter->pdev;
>> - struct e1000_tx_desc *tx_desc;
>> - int i, len, offset, txd_needed;
>> - uint32_t txd_upper, txd_lower;
>> -
>> - if(unlikely(!netif_carrier_ok(netdev))) {
>> - netif_stop_queue(netdev);
>> - return NETDEV_TX_BUSY;
>> - }
>> -
>> - txd_needed = TXD_USE_COUNT(skb->len, E1000_MAX_TXD_PWR);
>> + i = rx_ring->next_to_use;
>> + buffer_info = &rx_ring->buffer_info[i];
>>
>> - /* make sure there are enough Tx descriptors available in the ring */
>> - if(E1000_DESC_UNUSED(&adapter->tx_ring) <= (txd_needed + 1)) {
>> - adapter->net_stats.tx_dropped++;
>> - netif_stop_queue(netdev);
>> - return NETDEV_TX_BUSY;
>> - }
>> + while(!buffer_info->skb && skb_list) {
>> + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
>>
>> - txd_upper = 0;
>> - txd_lower = adapter->txd_cmd;
>> -
>> - if(e1000_tx_csum(adapter, skb)){
>> - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
>> - txd_upper |= E1000_TXD_POPTS_TXSM << 8;
>> - }
>> + skb = skb_list;
>> + skb_list = skb_list->next;
>>
>> - i = adapter->tx_ring.next_to_use;
>> - tx_desc = E1000_TX_DESC(adapter->tx_ring, i);
>> + /* fix for errata 23, cant cross 64kB boundary */
>> + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
>> + DPRINTK(RX_ERR,ERR, "skb align check failed: %u bytes at %p\n", bufsz, skb->data);
>> + break;
>> +
>> + }
>>
>> - len = skb->len;
>> - offset = 0;
>> + /* Make buffer alignment 2 beyond a 16 byte boundary
>> + * this will result in a 16 byte aligned IP header after
>> + * the 14 byte MAC header is removed
>> + */
>> + skb_reserve(skb, NET_IP_ALIGN);
>>
>> - adapter->tx_ring.buffer_info[i].length = len;
>> - adapter->tx_ring.buffer_info[i].dma =
>> - pci_map_single(pdev,
>> - skb->data + offset,
>> - len,
>> - PCI_DMA_TODEVICE);
>> + skb->dev = netdev;
>>
>> - tx_desc->buffer_addr = cpu_to_le64(adapter->tx_ring.buffer_info[i].dma);
>> - tx_desc->lower.data = cpu_to_le32(txd_lower | len);
>> - tx_desc->upper.data = cpu_to_le32(txd_upper);
>> -
>> - /* EOP and SKB pointer go with the last fragment */
>> - tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP);
>> - adapter->tx_ring.buffer_info[i].skb = skb;
>> -
>> - i = i + 1;
>> - if(i >= adapter->tx_ring.count)
>> - i = 0;
>> + buffer_info->skb = skb;
>> + buffer_info->length = adapter->rx_buffer_len;
>> + buffer_info->dma = pci_map_single(pdev,
>> + skb->data,
>> + adapter->rx_buffer_len,
>> + PCI_DMA_FROMDEVICE);
>>
>> - /* Move the HW Tx Tail Pointer */
>> - adapter->tx_ring.next_to_use = i;
>> -
>> - netdev->trans_start = jiffies;
>> -
>> - return 0;
>> -}
>> + /* fix for errata 23, cant cross 64kB boundary */
>> + if(!e1000_check_64k_bound(adapter,
>> + (void *)(unsigned long)buffer_info->dma,
>> + adapter->rx_buffer_len)) {
>> + DPRINTK(RX_ERR,ERR,
>> + "dma align check failed: %u bytes at %ld\n",
>> + adapter->rx_buffer_len, (unsigned long)buffer_info->dma);
>>
>> + buffer_info->skb = NULL;
>>
>> + pci_unmap_single(pdev,
>> + buffer_info->dma,
>> + adapter->rx_buffer_len,
>> + PCI_DMA_FROMDEVICE);
>> + break; /* while !buffer_info->skb */
>> + }
>>
>> -static inline void
>> -e1000_tx_queue_clickpoll(struct e1000_adapter *adapter, int count, int tx_flags)
>> -{
>> - struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
>> - struct e1000_tx_desc *tx_desc = NULL;
>> - struct e1000_buffer *buffer_info;
>> - uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
>> - unsigned int i;
>> + rx_desc = E1000_RX_DESC(*rx_ring, i);
>> + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
>>
>> - if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
>> - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
>> - E1000_TXD_CMD_TSE;
>> - txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
>> - }
>> + if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
>> + /* Force memory writes to complete before letting h/w
>> + * know there are new descriptors to fetch. (Only
>> + * applicable for weak-ordered memory model archs,
>> + * such as IA-64). */
>> + wmb();
>>
>> - if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
>> - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
>> - txd_upper |= E1000_TXD_POPTS_TXSM << 8;
>> - }
>> + E1000_WRITE_REG(&adapter->hw, RDT, i);
>> + }
>>
>> - if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
>> - txd_lower |= E1000_TXD_CMD_VLE;
>> - txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
>> + if(unlikely(++i == rx_ring->count)) i = 0;
>> + buffer_info = &rx_ring->buffer_info[i];
>> }
>>
>> - i = tx_ring->next_to_use;
>> -
>> - while(count--) {
>> - buffer_info = &tx_ring->buffer_info[i];
>> - tx_desc = E1000_TX_DESC(*tx_ring, i);
>> - tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
>> - tx_desc->lower.data =
>> - cpu_to_le32(txd_lower | buffer_info->length);
>> - tx_desc->upper.data = cpu_to_le32(txd_upper);
>> - if(unlikely(++i == tx_ring->count)) i = 0;
>> + rx_ring->next_to_use = i;
>> + *skbs = skb_list;
>> +
>> + /*
>> + * Update statistics counters, check link.
>> + * do_poll_watchdog is set by the timer interrupt e1000_watchdog(),
>> + * but we don't want to do the work in an interrupt (since it may
>> + * happen while polling code is active), so defer it to here.
>> + */
>> + if(adapter->do_poll_watchdog){
>> + adapter->do_poll_watchdog = 0;
>> + e1000_watchdog_1(adapter);
>> }
>>
>> - tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
>> -
>> - tx_ring->next_to_use = i;
>> + return E1000_DESC_UNUSED(&adapter->rx_ring);
>> }
>>
>> static int
>> -e1000_xmit_frame_clickpoll(struct net_device *netdev, struct sk_buff *skb)
>> +e1000_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
>> {
>> + /*
>> + * This function is just a streamlined version of
>> + * return e1000_xmit_frame(skb, netdev);
>> + */
>> struct e1000_adapter *adapter = netdev->priv;
>> unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
>> unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
>> unsigned int tx_flags = 0;
>> unsigned int len = skb->len;
>> + unsigned long flags;
>> unsigned int nr_frags = 0;
>> unsigned int mss = 0;
>> int count = 0;
>> @@ -3576,7 +3510,9 @@
>>
>> if(unlikely(skb->len <= 0)) {
>> dev_kfree_skb_any(skb);
>> - return NETDEV_TX_OK;
>> + printk("skb len <= 0");
>> +// return NETDEV_TX_OK;
>> + return 0;
>> }
>>
>> #ifdef NETIF_F_TSO
>> @@ -3613,21 +3549,44 @@
>> count += nr_frags;
>> #endif
>>
>> +#ifdef NETIF_F_LLTX
>> + local_irq_save(flags);
>> + if (!spin_trylock(&adapter->tx_lock)) {
>> + /* Collision - tell upper layer to requeue */
>> + local_irq_restore(flags);
>> + printk("collision occured");
>> +// return NETDEV_TX_LOCKED;
>> + return 0;
>> + }
>> +#else
>> + spin_lock_irqsave(&adapter->tx_lock, flags);
>> +#endif
>> +
>> /* need: count + 2 desc gap to keep tail from touching
>> * head, otherwise try next time */
>> if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
>> netif_stop_queue(netdev);
>> - return NETDEV_TX_BUSY;
>> + spin_unlock_irqrestore(&adapter->tx_lock, flags);
>> + printk("ERROR TX BUSY");
>> +// return NETDEV_TX_BUSY;
>> + return 0;
>> }
>>
>> if(unlikely(adapter->hw.mac_type == e1000_82547)) {
>> if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
>> netif_stop_queue(netdev);
>> mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
>> - return NETDEV_TX_BUSY;
>> + spin_unlock_irqrestore(&adapter->tx_lock, flags);
>> + printk("TX Busy");
>> +// return NETDEV_TX_BUSY;
>> + return 0;
>> }
>> }
>>
>> +#ifndef NETIF_F_LLTX
>> + spin_unlock_irqrestore(&adapter->tx_lock, flags);
>> +#endif
>> +
>> #ifdef NETIF_F_HW_VLAN_TX
>> if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
>> tx_flags |= E1000_TX_FLAGS_VLAN;
>> @@ -3642,7 +3601,7 @@
>> else if(likely(e1000_tx_csum(adapter, skb)))
>> tx_flags |= E1000_TX_FLAGS_CSUM;
>>
>> - e1000_tx_queue_clickpoll(adapter,
>> + e1000_tx_queue(adapter,
>> e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
>> tx_flags);
>>
>> @@ -3652,57 +3611,59 @@
>> /* Make sure there is space in the ring for the next send. */
>> if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
>> netif_stop_queue(netdev);
>> +
>> + spin_unlock_irqrestore(&adapter->tx_lock, flags);
>> #endif
>>
>> - return NETDEV_TX_OK;
>> + return 0;
>> }
>>
>> -
>> -
>> static int
>> e1000_tx_eob(struct net_device *dev)
>> {
>> - struct e1000_adapter *adapter = dev->priv;
>> - E1000_WRITE_REG(&adapter->hw, TDT, adapter->tx_ring.next_to_use);
>> - return 0;
>> + struct e1000_adapter *adapter = dev->priv;
>> + E1000_WRITE_REG(&adapter->hw, TDT, adapter->tx_ring.next_to_use);
>> + return 0;
>> }
>>
>> static int
>> e1000_tx_start(struct net_device *dev)
>> {
>> - /* printk("e1000_tx_start called\n"); */
>> - e1000_tx_eob(dev);
>> - return 0;
>> + /* printk("e1000_tx_start called\n"); */
>> + e1000_tx_eob(dev);
>> + return 0;
>> }
>>
>> static struct sk_buff *
>> e1000_tx_clean(struct net_device *netdev)
>> {
>> - /*
>> - * This function is a streamlined version of
>> - * return e1000_clean_tx_irq(adapter, 1);
>> - */
>> -
>> + /*
>> + * This function is a streamlined version of
>> + * return e1000_clean_tx_irq(adapter, 1);
>> + */
>> struct e1000_adapter *adapter = netdev->priv;
>> - struct pci_dev *pdev = adapter->pdev;
>> - int i;
>> - struct e1000_tx_desc *tx_desc;
>> + struct pci_dev *pdev = adapter->pdev;
>> + struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
>> + struct e1000_tx_desc *tx_desc, *eop_desc;
>> + unsigned int i, eop;
>> +
>> struct sk_buff *skb_head, *skb_last;
>>
>> skb_head = skb_last = 0;
>> +
>> + i = tx_ring->next_to_clean;
>> + eop = tx_ring->buffer_info[i].next_to_watch;
>> + eop_desc = E1000_TX_DESC(*tx_ring, eop);
>>
>> - i = adapter->tx_ring.next_to_clean;
>> - tx_desc = E1000_TX_DESC(adapter->tx_ring, i);
>> -
>> - while(tx_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
>> - if(adapter->tx_ring.buffer_info[i].dma != 0) {
>> - pci_unmap_page(pdev, adapter->tx_ring.buffer_info[i].dma,
>> - adapter->tx_ring.buffer_info[i].length,
>> - PCI_DMA_TODEVICE);
>> + while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
>> + /* pre-mature writeback of Tx descriptors */
>> + /* clear (free buffers and unmap pci_mapping) */
>> + /* previous_buffer_info */
>> + if(likely(adapter->tx_ring.buffer_info[i].dma != 0)) {
>> + pci_unmap_page(pdev, adapter->tx_ring.buffer_info[i].dma, adapter->tx_ring.buffer_info[i].length, PCI_DMA_TODEVICE);
>> adapter->tx_ring.buffer_info[i].dma = 0;
>> }
>> -
>> - if(adapter->tx_ring.buffer_info[i].skb != NULL) {
>> + if(likely(adapter->tx_ring.buffer_info[i].skb != NULL)) {
>> struct sk_buff *skb = adapter->tx_ring.buffer_info[i].skb;
>> if (skb_head == 0) {
>> skb_head = skb;
>> @@ -3715,59 +3676,62 @@
>> }
>> adapter->tx_ring.buffer_info[i].skb = NULL;
>> }
>> -
>> - i = i + 1;
>> - if(i >= adapter->tx_ring.count)
>> - i = 0;
>> -
>> +
>> + tx_desc = E1000_TX_DESC(*tx_ring, i);
>> + tx_desc->buffer_addr = 0;
>> + tx_desc->lower.data = 0;
>> tx_desc->upper.data = 0;
>> - tx_desc = E1000_TX_DESC(adapter->tx_ring, i);
>> +
>> + if(unlikely(++i == tx_ring->count)) i = 0;
>> + eop = tx_ring->buffer_info[i].next_to_watch;
>> + eop_desc = E1000_TX_DESC(*tx_ring, eop);
>> }
>>
>> - adapter->tx_ring.next_to_clean = i;
>> + tx_ring->next_to_clean = i;
>> +
>> + spin_lock(&adapter->tx_lock);
>>
>> - if(netif_queue_stopped(netdev) &&
>> - (E1000_DESC_UNUSED(&adapter->tx_ring) > E1000_TX_QUEUE_WAKE)) {
>> + if(netif_queue_stopped(netdev) && (E1000_DESC_UNUSED(&adapter->tx_ring) > E1000_TX_QUEUE_WAKE)) {
>> netif_start_queue(netdev);
>> }
>> + spin_unlock(&adapter->tx_lock);
>>
>> return skb_head;
>> }
>>
>> -
>> static int
>> e1000_poll_on(struct net_device *dev)
>> {
>> - struct e1000_adapter *adapter = dev->priv;
>> - unsigned long flags;
>> + struct e1000_adapter *adapter = dev->priv;
>> + unsigned long flags;
>>
>> - if (!dev->polling) {
>> - printk("e1000_poll_on\n");
>> + if (!dev->polling) {
>> + printk("e1000_poll_on\n");
>>
>> - save_flags(flags);
>> - cli();
>> + save_flags(flags);
>> + cli();
>>
>> - dev->polling = 2;
>> - e1000_irq_disable(adapter);
>> + dev->polling = 2;
>> + e1000_irq_disable(adapter);
>>
>> - restore_flags(flags);
>> - }
>> + restore_flags(flags);
>> + }
>>
>> - return 0;
>> + return 0;
>> }
>>
>> static int
>> e1000_poll_off(struct net_device *dev)
>> {
>> - struct e1000_adapter *adapter = dev->priv;
>> + struct e1000_adapter *adapter = dev->priv;
>>
>> - if (dev->polling > 0) {
>> - dev->polling = 0;
>> - e1000_irq_enable(adapter);
>> - printk("e1000_poll_off\n");
>> - }
>> + if(dev->polling > 0){
>> + dev->polling = 0;
>> + e1000_irq_enable(adapter);
>> + printk("e1000_poll_off\n");
>> + }
>>
>> - return 0;
>> + return 0;
>> }
>>
>> /* e1000_main.c */
More information about the click
mailing list