When XDP_USE_NEED_WAKEUP is used and the fill ring is empty so no buffer
is allocated on RX side, allow RX NAPI to be descheduled. This avoids
wasting CPU cycles on polling. Users will be notified and they need to
make a wakeup call after refilling the ring.

Signed-off-by: Bui Quang Minh <[email protected]>
---
 drivers/net/virtio_net.c | 38 ++++++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 8 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index db88dcaefb20..494acc904b2c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1454,8 +1454,19 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info 
*vi, struct receive_queue
        xsk_buffs = rq->xsk_buffs;
 
        num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
-       if (!num)
+       if (!num) {
+               if (xsk_uses_need_wakeup(pool)) {
+                       xsk_set_rx_need_wakeup(pool);
+                       /* Return 0 instead of -ENOMEM so that NAPI is
+                        * descheduled.
+                        */
+                       return 0;
+               }
+
                return -ENOMEM;
+       } else {
+               xsk_clear_rx_need_wakeup(pool);
+       }
 
        len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
 
@@ -1588,20 +1599,21 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, 
struct xsk_buff_pool *pool,
        return sent;
 }
 
-static void xsk_wakeup(struct send_queue *sq)
+static void xsk_wakeup(struct napi_struct *napi, struct virtqueue *vq)
 {
-       if (napi_if_scheduled_mark_missed(&sq->napi))
+       if (napi_if_scheduled_mark_missed(napi))
                return;
 
        local_bh_disable();
-       virtqueue_napi_schedule(&sq->napi, sq->vq);
+       virtqueue_napi_schedule(napi, vq);
        local_bh_enable();
 }
 
 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
 {
        struct virtnet_info *vi = netdev_priv(dev);
-       struct send_queue *sq;
+       struct napi_struct *napi;
+       struct virtqueue *vq;
 
        if (!netif_running(dev))
                return -ENETDOWN;
@@ -1609,9 +1621,19 @@ static int virtnet_xsk_wakeup(struct net_device *dev, 
u32 qid, u32 flag)
        if (qid >= vi->curr_queue_pairs)
                return -EINVAL;
 
-       sq = &vi->sq[qid];
+       if (flag == XDP_WAKEUP_TX) {
+               struct send_queue *sq = &vi->sq[qid];
+
+               napi = &sq->napi;
+               vq = sq->vq;
+       } else {
+               struct receive_queue *rq = &vi->rq[qid];
+
+               napi = &rq->napi;
+               vq = rq->vq;
+       }
 
-       xsk_wakeup(sq);
+       xsk_wakeup(napi, vq);
        return 0;
 }
 
@@ -1623,7 +1645,7 @@ static void virtnet_xsk_completed(struct send_queue *sq, 
int num)
         * wakeup the tx napi to consume the xsk tx queue, because the tx
         * interrupt may not be triggered.
         */
-       xsk_wakeup(sq);
+       xsk_wakeup(&sq->napi, sq->vq);
 }
 
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
-- 
2.43.0


Reply via email to