> On Tue, 27 Oct 2020 20:04:07 +0100
> Lorenzo Bianconi <lore...@kernel.org> wrote:
> 
> > diff --git a/net/core/xdp.c b/net/core/xdp.c
> > index 48aba933a5a8..93eabd789246 100644
> > --- a/net/core/xdp.c
> > +++ b/net/core/xdp.c
> > @@ -380,6 +380,57 @@ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
> >  }
> >  EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
> >  
> > +void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
> > +{
> > +   struct xdp_mem_allocator *xa = bq->xa;
> > +   int i;
> > +
> > +   if (unlikely(!xa))
> > +           return;
> > +
> > +   for (i = 0; i < bq->count; i++) {
> > +           struct page *page = virt_to_head_page(bq->q[i]);
> > +
> > +           page_pool_put_full_page(xa->page_pool, page, false);
> > +   }
> > +   bq->count = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
> > +
> > +void xdp_return_frame_bulk(struct xdp_frame *xdpf,
> > +                      struct xdp_frame_bulk *bq)
> > +{
> > +   struct xdp_mem_info *mem = &xdpf->mem;
> > +   struct xdp_mem_allocator *xa, *nxa;
> > +
> > +   if (mem->type != MEM_TYPE_PAGE_POOL) {
> > +           __xdp_return(xdpf->data, &xdpf->mem, false);
> > +           return;
> > +   }
> > +
> > +   rcu_read_lock();
> > +
> > +   xa = bq->xa;
> > +   if (unlikely(!xa || mem->id != xa->mem.id)) {
> > +           nxa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
> > +           if (unlikely(!xa)) {
> > +                   bq->count = 0;
> > +                   bq->xa = nxa;
> > +                   xa = nxa;
> > +           }
> > +   }
> > +
> > +   if (mem->id != xa->mem.id || bq->count == XDP_BULK_QUEUE_SIZE)
> > +           xdp_flush_frame_bulk(bq);
> > +
> > +   bq->q[bq->count++] = xdpf->data;
> > +   if (mem->id != xa->mem.id)
> > +           bq->xa = nxa;
> > +
> > +   rcu_read_unlock();
> > +}
> > +EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
> 
> We (Ilias my co-maintainer and I) think above code is hard to read and
> understand (as a reader you need to keep too many cases in your head).
> 
> I think we both have proposals to improve this, here is mine:
> 
> /* Defers return when frame belongs to same mem.id as previous frame */
> void xdp_return_frame_bulk(struct xdp_frame *xdpf,
>                            struct xdp_frame_bulk *bq)
> {
>         struct xdp_mem_info *mem = &xdpf->mem;
>         struct xdp_mem_allocator *xa;
> 
>         if (mem->type != MEM_TYPE_PAGE_POOL) {
>                 __xdp_return(xdpf->data, &xdpf->mem, false);
>                 return;
>         }
> 
>         rcu_read_lock();
> 
>         xa = bq->xa;
>         if (unlikely(!xa)) {
>               xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
>                 bq->count = 0;
>                 bq->xa = xa;
>         }
> 
>         if (bq->count == XDP_BULK_QUEUE_SIZE)
>                 xdp_flush_frame_bulk(bq);
> 
>         if (mem->id != xa->mem.id) {
>               xdp_flush_frame_bulk(bq);
>               bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, 
> mem_id_rht_params);
>         }
> 
>       bq->q[bq->count++] = xdpf->data;
> 
>         rcu_read_unlock();
> }
> 
> Please review for correctness, and also for readability.

the code seems fine to me (and even easier to read :)).
I will update v2 using this approach. Thx.

Regards,
Lorenzo

> 
> -- 
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn: http://www.linkedin.com/in/brouer
> 

Attachment: signature.asc
Description: PGP signature

Reply via email to