On Mon, Mar 27, 2023 at 12:05 PM Xuan Zhuo <[email protected]> wrote:
>
> virtio core only supports virtual addresses, dma is completed in virtio
> core.
>
> In some scenarios (such as the AF_XDP), the memory is allocated
> and DMA mapping is completed in advance, so it is necessary for us to
> support passing the DMA address to virtio core.
>
> Drives can use sg->dma_address to pass the mapped dma address to virtio
> core. If one sg->dma_address is used then all sgs must use sg->dma_address,
> otherwise all dma_address must be null when passing it to the APIs of
> virtio.
>
> Signed-off-by: Xuan Zhuo <[email protected]>

Acked-by: Jason Wang <[email protected]>

Thanks

> ---
>  drivers/virtio/virtio_ring.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 2afff1dc6c74..d5dffbe50070 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -1338,6 +1338,7 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>         unsigned int i, n;
>         u16 head, id;
>         dma_addr_t addr;
> +       bool dma_map_internal;
>
>         head = vq->packed.next_avail_idx;
>         desc = alloc_indirect_packed(total_sg, gfp);
> @@ -1355,7 +1356,8 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>         id = vq->free_head;
>         BUG_ON(id == vq->packed.vring.num);
>
> -       if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs))
> +       dma_map_internal = !sgs[0]->dma_address;
> +       if (dma_map_internal && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, 
> in_sgs))
>                 goto err_map;
>
>         for (n = 0; n < out_sgs + in_sgs; n++) {
> @@ -1417,6 +1419,8 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>         vq->packed.desc_state[id].data = data;
>         vq->packed.desc_state[id].indir_desc = desc;
>         vq->packed.desc_state[id].last = id;
> +       vq->packed.desc_state[id].flags = dma_map_internal ? 
> VRING_STATE_F_MAP_INTERNAL : 0;
> +
>
>         vq->num_added += 1;
>
> @@ -1426,7 +1430,8 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>         return 0;
>
>  unmap_release:
> -       virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> +       if (dma_map_internal)
> +               virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
>
>  err_map:
>         kfree(desc);
> @@ -1653,7 +1658,7 @@ static void detach_buf_packed(struct vring_virtqueue 
> *vq,
>                 if (!desc)
>                         return;
>
> -               if (vq->use_dma_api) {
> +               if (vq->use_dma_api && dma_map_internal) {
>                         len = vq->packed.desc_extra[id].len;
>                         for (i = 0; i < len / sizeof(struct 
> vring_packed_desc);
>                                         i++)
> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to