I can not think of any reason to pull the bad txq skb off the qdisc if the txq we plan to send this on is still frozen. So check for frozen queue first and abort before dequeuing either skb_bad_txq skb or normal qdisc dequeue() skb.
Signed-off-by: John Fastabend <john.fastab...@gmail.com> --- net/sched/sch_generic.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 84cef05..5ff93c2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -204,7 +204,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, int *packets) { const struct netdev_queue *txq = q->dev_queue; - struct sk_buff *skb; + struct sk_buff *skb = NULL; *packets = 1; if (unlikely(!skb_queue_empty(&q->gso_skb))) { @@ -248,12 +248,15 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, } validate: *validate = true; + + if ((q->flags & TCQ_F_ONETXQUEUE) && + netif_xmit_frozen_or_stopped(txq)) + return skb; + skb = qdisc_dequeue_skb_bad_txq(q); if (unlikely(skb)) goto bulk; - if (!(q->flags & TCQ_F_ONETXQUEUE) || - !netif_xmit_frozen_or_stopped(txq)) - skb = q->dequeue(q); + skb = q->dequeue(q); if (skb) { bulk: if (qdisc_may_bulk(q))