On 05/02/2018 01:01 PM, Björn Töpel wrote:
> From: Björn Töpel <bjorn.to...@intel.com>
> 
> Here the actual receive functions of AF_XDP are implemented, that in a
> later commit, will be called from the XDP layers.
> 
> There's one set of functions for the XDP_DRV side and another for
> XDP_SKB (generic).
> 
> A new XDP API, xdp_return_buff, is also introduced.
> 
> Adding xdp_return_buff, which is analogous to xdp_return_frame, but
> acts upon an struct xdp_buff. The API will be used by AF_XDP in future
> commits.
> 
> Support for the poll syscall is also implemented.
> 
> v2: xskq_validate_id did not update cons_tail.
>     The entries variable was calculated twice in xskq_nb_avail.
>     Squashed xdp_return_buff commit.
> 
> Signed-off-by: Björn Töpel <bjorn.to...@intel.com>
> ---
>  include/net/xdp.h      |   1 +
>  include/net/xdp_sock.h |  22 ++++++++++
>  net/core/xdp.c         |  15 +++++--
>  net/xdp/xdp_umem.h     |  18 ++++++++
>  net/xdp/xsk.c          |  73 ++++++++++++++++++++++++++++++-
>  net/xdp/xsk_queue.h    | 114 
> ++++++++++++++++++++++++++++++++++++++++++++++++-
>  6 files changed, 238 insertions(+), 5 deletions(-)
> 
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index 137ad5f9f40f..0b689cf561c7 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -104,6 +104,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff 
> *xdp)
>  }
>  
>  void xdp_return_frame(struct xdp_frame *xdpf);
> +void xdp_return_buff(struct xdp_buff *xdp);
>  
>  int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
>                    struct net_device *dev, u32 queue_index);
[...]
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index bf2c97b87992..4e1e6c581e1d 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -41,6 +41,74 @@ static struct xdp_sock *xdp_sk(struct sock *sk)
>       return (struct xdp_sock *)sk;
>  }
>  
> +static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
> +{
> +     u32 *id, len = xdp->data_end - xdp->data;
> +     void *buffer;
> +     int err = 0;
> +
> +     if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
> +             return -EINVAL;
> +
> +     id = xskq_peek_id(xs->umem->fq);
> +     if (!id)
> +             return -ENOSPC;
> +
> +     buffer = xdp_umem_get_data_with_headroom(xs->umem, *id);
> +     memcpy(buffer, xdp->data, len);
> +     err = xskq_produce_batch_desc(xs->rx, *id, len,
> +                                   xs->umem->frame_headroom);
> +     if (!err)
> +             xskq_discard_id(xs->umem->fq);
> +
> +     return err;
> +}
> +
> +int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
> +{
> +     int err;
> +
> +     err = __xsk_rcv(xs, xdp);
> +     if (likely(!err))
> +             xdp_return_buff(xdp);
> +     else
> +             xs->rx_dropped++;

This is triggered from __bpf_tx_xdp_map() -> __xsk_map_redirect().
Should this be percpu counter instead?

> +     return err;
> +}
> +
> +void xsk_flush(struct xdp_sock *xs)
> +{
> +     xskq_produce_flush_desc(xs->rx);
> +     xs->sk.sk_data_ready(&xs->sk);
> +}
> +
> +int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
> +{
> +     int err;
> +
> +     err = __xsk_rcv(xs, xdp);
> +     if (!err)
> +             xsk_flush(xs);
> +     else
> +             xs->rx_dropped++;
> +
> +     return err;
> +}
> +

Reply via email to