I can not think of any good reason to pull the bad txq skb off the
qdisc if the txq we plan to send this on is still frozen. So check
for frozen queue first and abort before dequeuing either skb_bad_txq
skb or normal qdisc dequeue() skb.

Signed-off-by: John Fastabend <john.r.fastab...@intel.com>
---
 net/sched/sch_generic.c |   11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d4194c2..db5f7a0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -203,7 +203,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool 
*validate,
                                   int *packets)
 {
        const struct netdev_queue *txq = q->dev_queue;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
 
        *packets = 1;
        if (unlikely(!skb_queue_empty(&q->gso_skb))) {
@@ -247,12 +247,15 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool 
*validate,
        }
 validate:
        *validate = true;
+
+       if ((q->flags & TCQ_F_ONETXQUEUE) &&
+           netif_xmit_frozen_or_stopped(txq))
+               return skb;
+
        skb = qdisc_dequeue_skb_bad_txq(q);
        if (unlikely(skb))
                goto bulk;
-       if (!(q->flags & TCQ_F_ONETXQUEUE) ||
-           !netif_xmit_frozen_or_stopped(txq))
-               skb = q->dequeue(q);
+       skb = q->dequeue(q);
        if (skb) {
 bulk:
                if (qdisc_may_bulk(q))

Reply via email to