On Tue, 20 Oct 2020 11:33:37 +0200
Lorenzo Bianconi <lore...@kernel.org> wrote:

> diff --git a/drivers/net/ethernet/marvell/mvneta.c 
> b/drivers/net/ethernet/marvell/mvneta.c
> index 54b0bf574c05..af33cc62ed4c 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -663,6 +663,8 @@ struct mvneta_tx_queue {
>  
>       /* Affinity mask for CPUs*/
>       cpumask_t affinity_mask;
> +
> +     struct xdp_frame_bulk bq;
>  };
>  
>  struct mvneta_rx_queue {
> @@ -1854,12 +1856,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port 
> *pp,
>                       dev_kfree_skb_any(buf->skb);
>               } else if (buf->type == MVNETA_TYPE_XDP_TX ||
>                          buf->type == MVNETA_TYPE_XDP_NDO) {
> -                     if (napi && buf->type == MVNETA_TYPE_XDP_TX)
> -                             xdp_return_frame_rx_napi(buf->xdpf);
> -                     else
> -                             xdp_return_frame(buf->xdpf);
> +                     xdp_return_frame_bulk(buf->xdpf, &txq->bq, napi);

Hmm, I don't think you can use 'napi' directly here.

You are circumventing check (buf->type == MVNETA_TYPE_XDP_TX), and will
now also allow XDP_NDO (XDP_REDIRECT) to basically use 
xdp_return_frame_rx_napi().


>               }
>       }
> +     xdp_flush_frame_bulk(&txq->bq, napi);
>  
>       netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
>  }



-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer

Reply via email to