Le lundi 30 janvier 2012 à 15:57 +0100, Eric Dumazet a écrit :

> Hmm, TX _completion_ is not run from tasklet but hardware IRQ, this is
> why I added the spin_lock_irqsave().
> 
> 
> Tasklet fires the TX, but hardware IRQ does the TX completion part.
> 
> This driver is ... interesting :)
> 

Oh well, we also must make sure we held np->lock in TX completion when
doing our test to eventually call netif_wake_queue(), I missed it was
released too early.

here is a more complete patch.

diff --git a/drivers/net/ethernet/dlink/sundance.c 
b/drivers/net/ethernet/dlink/sundance.c
index 28a3a9b..d5e9472 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1099,11 +1099,13 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
        tasklet_schedule(&np->tx_tasklet);
 
        /* On some architectures: explicitly flush cache lines here. */
-       if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
-           !netif_queue_stopped(dev)) {
-               /* do nothing */
-       } else {
-               netif_stop_queue (dev);
+       if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&np->lock, flags);
+               if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+                       netif_stop_queue(dev);
+               spin_unlock_irqrestore(&np->lock, flags);
        }
        if (netif_msg_tx_queued(np)) {
                printk (KERN_DEBUG
@@ -1242,8 +1244,8 @@ static irqreturn_t intr_handler(int irq, void 
*dev_instance)
                        hw_frame_id = ioread8(ioaddr + TxFrameId);
                }
 
+               spin_lock(&np->lock);
                if (np->pci_dev->revision >= 0x14) {
-                       spin_lock(&np->lock);
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
                                int entry = np->dirty_tx % TX_RING_SIZE;
                                struct sk_buff *skb;
@@ -1267,9 +1269,7 @@ static irqreturn_t intr_handler(int irq, void 
*dev_instance)
                                np->tx_ring[entry].frag[0].addr = 0;
                                np->tx_ring[entry].frag[0].length = 0;
                        }
-                       spin_unlock(&np->lock);
                } else {
-                       spin_lock(&np->lock);
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
                                int entry = np->dirty_tx % TX_RING_SIZE;
                                struct sk_buff *skb;
@@ -1286,7 +1286,6 @@ static irqreturn_t intr_handler(int irq, void 
*dev_instance)
                                np->tx_ring[entry].frag[0].addr = 0;
                                np->tx_ring[entry].frag[0].length = 0;
                        }
-                       spin_unlock(&np->lock);
                }
 
                if (netif_queue_stopped(dev) &&
@@ -1294,6 +1293,7 @@ static irqreturn_t intr_handler(int irq, void 
*dev_instance)
                        /* The ring is no longer full, clear busy flag. */
                        netif_wake_queue (dev);
                }
+               spin_unlock(&np->lock);
                /* Abnormal error summary/uncommon events handlers. */
                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
                        netdev_error(dev, intr_status);





-- 
To UNSUBSCRIBE, email to debian-bugs-dist-requ...@lists.debian.org
with a subject of "unsubscribe". Trouble? Contact listmas...@lists.debian.org

Reply via email to