Lorenzo Bianconi wrote:
> Introduce the capability to batch page_pool ptr_ring refill since it is
> usually run inside the driver NAPI tx completion loop.
> 
> Suggested-by: Jesper Dangaard Brouer <bro...@redhat.com>
> Co-developed-by: Jesper Dangaard Brouer <bro...@redhat.com>
> Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
> Signed-off-by: Lorenzo Bianconi <lore...@kernel.org>
> ---
>  include/net/page_pool.h | 26 ++++++++++++++++
>  net/core/page_pool.c    | 69 +++++++++++++++++++++++++++++++++++------
>  net/core/xdp.c          |  9 ++----
>  3 files changed, 87 insertions(+), 17 deletions(-)

[...]

> +/* Caller must not use data area after call, as this function overwrites it 
> */
> +void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> +                          int count)
> +{
> +     int i, bulk_len = 0, pa_len = 0;
> +
> +     for (i = 0; i < count; i++) {
> +             struct page *page = virt_to_head_page(data[i]);
> +
> +             page = __page_pool_put_page(pool, page, -1, false);
> +             /* Approved for bulk recycling in ptr_ring cache */
> +             if (page)
> +                     data[bulk_len++] = page;
> +     }
> +
> +     if (unlikely(!bulk_len))
> +             return;
> +
> +     /* Bulk producer into ptr_ring page_pool cache */
> +     page_pool_ring_lock(pool);
> +     for (i = 0; i < bulk_len; i++) {
> +             if (__ptr_ring_produce(&pool->ring, data[i]))
> +                     data[pa_len++] = data[i];

How about bailing out on the first error? bulk_len should be less than
16 right, so should we really keep retying hoping ring->size changes?

> +     }
> +     page_pool_ring_unlock(pool);
> +
> +     if (likely(!pa_len))
> +             return;
> +
> +     /* ptr_ring cache full, free pages outside producer lock since
> +      * put_page() with refcnt == 1 can be an expensive operation
> +      */
> +     for (i = 0; i < pa_len; i++)
> +             page_pool_return_page(pool, data[i]);
> +}
> +EXPORT_SYMBOL(page_pool_put_page_bulk);
> +

Otherwise LGTM.

Reply via email to