On Thu, Nov 20, 2025 at 09:53:20AM +0800, [email protected] wrote: > From: Liming Wu <[email protected]> > > This patch refines and strengthens the statistics collection of TX queue > wake/stop events introduced by commit c39add9b2423 ("virtio_net: Add TX > stopped and wake counters"). > > Previously, the driver only recorded partial wake/stop statistics > for TX queues. Some wake events triggered by 'skb_xmit_done()' or resume > operations were not counted, which made the per-queue metrics incomplete. > > Signed-off-by: Liming Wu <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]> > --- > drivers/net/virtio_net.c | 44 ++++++++++++++++++++++++---------------- > 1 file changed, 26 insertions(+), 18 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 8e8a179aaa49..b714b190db2a 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -775,10 +775,26 @@ static bool virtqueue_napi_complete(struct napi_struct > *napi, > return false; > } > > +static void virtnet_tx_wake_queue(struct virtnet_info *vi, > + struct send_queue *sq) > +{ > + unsigned int index = vq2txq(sq->vq); > + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); > + > + if (netif_tx_queue_stopped(txq)) { > + u64_stats_update_begin(&sq->stats.syncp); > + u64_stats_inc(&sq->stats.wake); > + u64_stats_update_end(&sq->stats.syncp); > + netif_tx_wake_queue(txq); > + } > +} > + > static void skb_xmit_done(struct virtqueue *vq) > { > struct virtnet_info *vi = vq->vdev->priv; > - struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; > + unsigned int index = vq2txq(vq); > + struct send_queue *sq = &vi->sq[index]; > + struct napi_struct *napi = &sq->napi; > > /* Suppress further interrupts. */ > virtqueue_disable_cb(vq); > @@ -786,8 +802,7 @@ static void skb_xmit_done(struct virtqueue *vq) > if (napi->weight) > virtqueue_napi_schedule(napi, vq); > else > - /* We were probably waiting for more output buffers. */ > - netif_wake_subqueue(vi->dev, vq2txq(vq)); > + virtnet_tx_wake_queue(vi, sq); > } > > #define MRG_CTX_HEADER_SHIFT 22 > @@ -3068,13 +3083,8 @@ static void virtnet_poll_cleantx(struct receive_queue > *rq, int budget) > free_old_xmit(sq, txq, !!budget); > } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); > > - if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && > - netif_tx_queue_stopped(txq)) { > - u64_stats_update_begin(&sq->stats.syncp); > - u64_stats_inc(&sq->stats.wake); > - u64_stats_update_end(&sq->stats.syncp); > - netif_tx_wake_queue(txq); > - } > + if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) > + virtnet_tx_wake_queue(vi, sq); > > __netif_tx_unlock(txq); > } > @@ -3264,13 +3274,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, > int budget) > else > free_old_xmit(sq, txq, !!budget); > > - if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && > - netif_tx_queue_stopped(txq)) { > - u64_stats_update_begin(&sq->stats.syncp); > - u64_stats_inc(&sq->stats.wake); > - u64_stats_update_end(&sq->stats.syncp); > - netif_tx_wake_queue(txq); > - } > + if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) > + virtnet_tx_wake_queue(vi, sq); > > if (xsk_done >= budget) { > __netif_tx_unlock(txq); > @@ -3521,6 +3526,9 @@ static void virtnet_tx_pause(struct virtnet_info *vi, > struct send_queue *sq) > > /* Prevent the upper layer from trying to send packets. */ > netif_stop_subqueue(vi->dev, qindex); > + u64_stats_update_begin(&sq->stats.syncp); > + u64_stats_inc(&sq->stats.stop); > + u64_stats_update_end(&sq->stats.syncp); > > __netif_tx_unlock_bh(txq); > } > @@ -3537,7 +3545,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, > struct send_queue *sq) > > __netif_tx_lock_bh(txq); > sq->reset = false; > - netif_tx_wake_queue(txq); > + virtnet_tx_wake_queue(vi, sq); > __netif_tx_unlock_bh(txq); > > if (running) > -- > 2.34.1
