This patch has two changes : 1) Use netdev_tx_sent_queue_more() for skbs with xmit_more This avoids mangling BQL status, since we only need to take care of it for the last skb of the batch.
2) doorbel only depends on xmit_more and netif_tx_queue_stopped() While not strictly necessary after 1), it is more consistent this way. Signed-off-by: Eric Dumazet <eduma...@google.com> Cc: Tariq Toukan <tar...@mellanox.com> --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1857ee0f0871d48285a6d3711f7c3e9a1e08a05f..3acce02ade6a115881ecd72e4710e332d3f380cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->packets++; } ring->bytes += tx_info->nr_bytes; - netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); if (tx_info->inl) @@ -1044,7 +1043,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(ring->tx_queue); ring->queue_stopped++; } - send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + if (skb->xmit_more) { + netdev_tx_sent_queue_more(ring->tx_queue, tx_info->nr_bytes); + send_doorbell = netif_tx_queue_stopped(ring->tx_queue); + } else { + netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); + send_doorbell = true; + } real_size = (real_size / 16) & 0x3f; -- 2.19.1.568.g152ad8e336-goog