On 2021/2/11 5:40 上午, Michael S. Tsirkin wrote:
On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:
The number of queues implemented by many virtio backends is limited,
especially some machines have a large number of CPUs. In this case, it
is often impossible to allocate a separate queue for XDP_TX.

This patch allows XDP_TX to run by reuse the existing SQ with
__netif_tx_lock() hold when there are not enough queues.

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Reviewed-by: Dust Li <dust...@linux.alibaba.com>
I'd like to get some advice on whether this is ok from some
XDP experts - previously my understanding was that it is
preferable to disable XDP for such devices than
use locks on XDP fast path.


I think this is acceptable on the device that changing the number of queues is not easy. For virtio-net, it probably requires a lot of changes in the management.

Another example is TUN which use TX lock for XDP.

Thanks



---
  drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------
  1 file changed, 35 insertions(+), 12 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ba8e637..7a3b2a7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -195,6 +195,9 @@ struct virtnet_info {
        /* # of XDP queue pairs currently used by the driver */
        u16 xdp_queue_pairs;
+ /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
+       bool xdp_enabled;
+
        /* I like... big packets and I cannot lie! */
        bool big_packets;
@@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
        return 0;
  }
-static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
+static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
  {
        unsigned int qp;
+       struct netdev_queue *txq;
+
+       if (vi->curr_queue_pairs > nr_cpu_ids) {
+               qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 
smp_processor_id();
+       } else {
+               qp = smp_processor_id() % vi->curr_queue_pairs;
+               txq = netdev_get_tx_queue(vi->dev, qp);
+               __netif_tx_lock(txq, raw_smp_processor_id());
+       }
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
        return &vi->sq[qp];
  }
+static void virtnet_put_xdp_sq(struct virtnet_info *vi)
+{
+       unsigned int qp;
+       struct netdev_queue *txq;
+
+       if (vi->curr_queue_pairs <= nr_cpu_ids) {
+               qp = smp_processor_id() % vi->curr_queue_pairs;
+               txq = netdev_get_tx_queue(vi->dev, qp);
+               __netif_tx_unlock(txq);
+       }
+}
+
  static int virtnet_xdp_xmit(struct net_device *dev,
                            int n, struct xdp_frame **frames, u32 flags)
  {
@@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        if (!xdp_prog)
                return -ENXIO;
- sq = virtnet_xdp_sq(vi);
+       sq = virtnet_get_xdp_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
                ret = -EINVAL;
@@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        sq->stats.kicks += kicks;
        u64_stats_update_end(&sq->stats.syncp);
+ virtnet_put_xdp_sq(vi);
        return ret;
  }
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
  {
-       return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
+       return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
  }
/* We copy the packet for XDP in the following cases:
@@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int 
budget)
                xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
-               sq = virtnet_xdp_sq(vi);
+               sq = virtnet_get_xdp_sq(vi);
                if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 
{
                        u64_stats_update_begin(&sq->stats.syncp);
                        sq->stats.kicks++;
                        u64_stats_update_end(&sq->stats.syncp);
                }
+               virtnet_put_xdp_sq(vi);
        }
return received;
@@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, 
struct bpf_prog *prog,
                xdp_qp = nr_cpu_ids;
/* XDP requires extra queues for XDP_TX */
-       if (curr_qp + xdp_qp > vi->max_queue_pairs) {
-               NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
-               netdev_warn(dev, "request %i queues but max is %i\n",
-                           curr_qp + xdp_qp, vi->max_queue_pairs);
-               return -ENOMEM;
-       }
+       if (curr_qp + xdp_qp > vi->max_queue_pairs)
+               xdp_qp = 0;
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
        if (!prog && !old_prog)
@@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, 
struct bpf_prog *prog,
        netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        vi->xdp_queue_pairs = xdp_qp;
+ vi->xdp_enabled = false;
        if (prog) {
                for (i = 0; i < vi->max_queue_pairs; i++) {
                        rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
                        if (i == 0 && !old_prog)
                                virtnet_clear_guest_offloads(vi);
                }
+               vi->xdp_enabled = true;
        }
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,
        int err;
if ((dev->features ^ features) & NETIF_F_LRO) {
-               if (vi->xdp_queue_pairs)
+               if (vi->xdp_enabled)
                        return -EBUSY;
if (features & NETIF_F_LRO)
--
1.8.3.1

Reply via email to