When dev is set to DOWN state, napi has been disabled, if we modify the
ring size at this time, we should not call napi_disable() again, which
will cause stuck.

And all operations are under the protection of rtnl_lock, so there is no
need to consider concurrency issues.

Reported-by: Kangjie Xu <[email protected]>
Signed-off-by: Xuan Zhuo <[email protected]>
---
 drivers/net/virtio_net.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 17687eb3f0bd..d9c434b00e9b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1915,12 +1915,14 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
 static int virtnet_tx_resize(struct virtnet_info *vi,
                             struct send_queue *sq, u32 ring_num)
 {
+       bool running = netif_running(vi->dev);
        struct netdev_queue *txq;
        int err, qindex;
 
        qindex = sq - vi->sq;
 
-       virtnet_napi_tx_disable(&sq->napi);
+       if (running)
+               virtnet_napi_tx_disable(&sq->napi);
 
        txq = netdev_get_tx_queue(vi->dev, qindex);
 
@@ -1946,7 +1948,8 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
        netif_tx_wake_queue(txq);
        __netif_tx_unlock_bh(txq);
 
-       virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+       if (running)
+               virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
        return err;
 }
 
-- 
2.31.0

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to