Hi Michael,

I've tested this on the system that was reproducing the panic, and it
everything is working now as expected.

For the series then:

Tested-by: Darren Kenny <[email protected]>

Thanks,

Darren.

On Wednesday, 2024-08-14 at 02:59:20 -04, Michael S. Tsirkin wrote:
> This reverts commit defd28aa5acb0fd7c15adc6bc40a8ac277d04dea.
>
> leads to crashes with no ACCESS_PLATFORM when
> sysctl net.core.high_order_alloc_disable=1
>
> Reported-by: Si-Wei Liu <[email protected]>
> Message-ID: <[email protected]>
> Signed-off-by: Michael S. Tsirkin <[email protected]>
> ---
>  drivers/net/virtio_net.c | 89 +++++++++++++++++++++++-----------------
>  1 file changed, 52 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index fd3d7e926022..4f7e686b8bf9 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -348,6 +348,9 @@ struct receive_queue {
>  
>       /* Record the last dma info to free after new pages is allocated. */
>       struct virtnet_rq_dma *last_dma;
> +
> +     /* Do dma by self */
> +     bool do_dma;
>  };
>  
>  /* This structure can contain rss message with maximum settings for 
> indirection table and keysize
> @@ -848,7 +851,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, 
> u32 *len, void **ctx)
>       void *buf;
>  
>       buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> -     if (buf)
> +     if (buf && rq->do_dma)
>               virtnet_rq_unmap(rq, buf, *len);
>  
>       return buf;
> @@ -861,6 +864,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue 
> *rq, void *buf, u32 len)
>       u32 offset;
>       void *head;
>  
> +     if (!rq->do_dma) {
> +             sg_init_one(rq->sg, buf, len);
> +             return;
> +     }
> +
>       head = page_address(rq->alloc_frag.page);
>  
>       offset = buf - head;
> @@ -886,42 +894,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, 
> u32 size, gfp_t gfp)
>  
>       head = page_address(alloc_frag->page);
>  
> -     dma = head;
> +     if (rq->do_dma) {
> +             dma = head;
>  
> -     /* new pages */
> -     if (!alloc_frag->offset) {
> -             if (rq->last_dma) {
> -                     /* Now, the new page is allocated, the last dma
> -                      * will not be used. So the dma can be unmapped
> -                      * if the ref is 0.
> +             /* new pages */
> +             if (!alloc_frag->offset) {
> +                     if (rq->last_dma) {
> +                             /* Now, the new page is allocated, the last dma
> +                              * will not be used. So the dma can be unmapped
> +                              * if the ref is 0.
> +                              */
> +                             virtnet_rq_unmap(rq, rq->last_dma, 0);
> +                             rq->last_dma = NULL;
> +                     }
> +
> +                     dma->len = alloc_frag->size - sizeof(*dma);
> +
> +                     addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> +                                                           dma->len, 
> DMA_FROM_DEVICE, 0);
> +                     if (virtqueue_dma_mapping_error(rq->vq, addr))
> +                             return NULL;
> +
> +                     dma->addr = addr;
> +                     dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> +
> +                     /* Add a reference to dma to prevent the entire dma from
> +                      * being released during error handling. This reference
> +                      * will be freed after the pages are no longer used.
>                        */
> -                     virtnet_rq_unmap(rq, rq->last_dma, 0);
> -                     rq->last_dma = NULL;
> +                     get_page(alloc_frag->page);
> +                     dma->ref = 1;
> +                     alloc_frag->offset = sizeof(*dma);
> +
> +                     rq->last_dma = dma;
>               }
>  
> -             dma->len = alloc_frag->size - sizeof(*dma);
> -
> -             addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> -                                                   dma->len, 
> DMA_FROM_DEVICE, 0);
> -             if (virtqueue_dma_mapping_error(rq->vq, addr))
> -                     return NULL;
> -
> -             dma->addr = addr;
> -             dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> -
> -             /* Add a reference to dma to prevent the entire dma from
> -              * being released during error handling. This reference
> -              * will be freed after the pages are no longer used.
> -              */
> -             get_page(alloc_frag->page);
> -             dma->ref = 1;
> -             alloc_frag->offset = sizeof(*dma);
> -
> -             rq->last_dma = dma;
> +             ++dma->ref;
>       }
>  
> -     ++dma->ref;
> -
>       buf = head + alloc_frag->offset;
>  
>       get_page(alloc_frag->page);
> @@ -938,9 +948,12 @@ static void virtnet_rq_set_premapped(struct virtnet_info 
> *vi)
>       if (!vi->mergeable_rx_bufs && vi->big_packets)
>               return;
>  
> -     for (i = 0; i < vi->max_queue_pairs; i++)
> -             /* error should never happen */
> -             BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
> +     for (i = 0; i < vi->max_queue_pairs; i++) {
> +             if (virtqueue_set_dma_premapped(vi->rq[i].vq))
> +                     continue;
> +
> +             vi->rq[i].do_dma = true;
> +     }
>  }
>  
>  static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
> @@ -2036,7 +2049,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, 
> struct receive_queue *rq,
>  
>       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>       if (err < 0) {
> -             virtnet_rq_unmap(rq, buf, 0);
> +             if (rq->do_dma)
> +                     virtnet_rq_unmap(rq, buf, 0);
>               put_page(virt_to_head_page(buf));
>       }
>  
> @@ -2150,7 +2164,8 @@ static int add_recvbuf_mergeable(struct virtnet_info 
> *vi,
>       ctx = mergeable_len_to_ctx(len + room, headroom);
>       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>       if (err < 0) {
> -             virtnet_rq_unmap(rq, buf, 0);
> +             if (rq->do_dma)
> +                     virtnet_rq_unmap(rq, buf, 0);
>               put_page(virt_to_head_page(buf));
>       }
>  
> @@ -5231,7 +5246,7 @@ static void free_receive_page_frags(struct virtnet_info 
> *vi)
>       int i;
>       for (i = 0; i < vi->max_queue_pairs; i++)
>               if (vi->rq[i].alloc_frag.page) {
> -                     if (vi->rq[i].last_dma)
> +                     if (vi->rq[i].do_dma && vi->rq[i].last_dma)
>                               virtnet_rq_unmap(&vi->rq[i], 
> vi->rq[i].last_dma, 0);
>                       put_page(vi->rq[i].alloc_frag.page);
>               }
> -- 
> MST

Reply via email to