On Sat, Sep 21, 2024 at 04:21:41AM +0800, Wenyu Huang wrote:
> From: Wenyu Huang <[email protected]>
> 
> It's also available for packed ring now.
> 
> Signed-off-by: Wenyu Huang <[email protected]>


More specifically, what are you trying to address here?
Which configuration did not work but does now?

> ---
>  drivers/virtio/virtio_ring.c | 153 +++++++++++++++++++++--------------
>  1 file changed, 92 insertions(+), 61 deletions(-)
> 
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index be7309b1e860..664a0c40ee05 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -223,7 +223,7 @@ struct vring_virtqueue {
>  #endif
>  };
>  
> -static struct virtqueue *__vring_new_virtqueue(unsigned int index,
> +static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
>                                              struct vring_virtqueue_split 
> *vring_split,
>                                              struct virtio_device *vdev,
>                                              bool weak_barriers,
> @@ -232,6 +232,15 @@ static struct virtqueue *__vring_new_virtqueue(unsigned 
> int index,
>                                              void (*callback)(struct 
> virtqueue *),
>                                              const char *name,
>                                              struct device *dma_dev);
> +static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
> +                                            struct vring_virtqueue_packed 
> *vring_packed,
> +                                            struct virtio_device *vdev,
> +                                            bool weak_barriers,
> +                                            bool context,
> +                                            bool (*notify)(struct virtqueue 
> *),
> +                                            void (*callback)(struct 
> virtqueue *),
> +                                            const char *name,
> +                                            struct device *dma_dev);
>  static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
>  static void vring_free(struct virtqueue *_vq);
>  
> @@ -1160,7 +1169,7 @@ static struct virtqueue *vring_create_virtqueue_split(
>       if (err)
>               return NULL;
>  
> -     vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
> +     vq = __vring_new_virtqueue_split(index, &vring_split, vdev, 
> weak_barriers,
>                                  context, notify, callback, name, dma_dev);
>       if (!vq) {
>               vring_free_split(&vring_split, vdev, dma_dev);
> @@ -2064,62 +2073,21 @@ static struct virtqueue 
> *vring_create_virtqueue_packed(
>       struct device *dma_dev)
>  {
>       struct vring_virtqueue_packed vring_packed = {};
> -     struct vring_virtqueue *vq;
> -     int err;
> +     struct virtqueue *vq;
>  
>       if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
> -             goto err_ring;
> -
> -     vq = kmalloc(sizeof(*vq), GFP_KERNEL);
> -     if (!vq)
> -             goto err_vq;
> -
> -     vq->vq.callback = callback;
> -     vq->vq.vdev = vdev;
> -     vq->vq.name = name;
> -     vq->vq.index = index;
> -     vq->vq.reset = false;
> -     vq->we_own_ring = true;
> -     vq->notify = notify;
> -     vq->weak_barriers = weak_barriers;
> -#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
> -     vq->broken = true;
> -#else
> -     vq->broken = false;
> -#endif
> -     vq->packed_ring = true;
> -     vq->dma_dev = dma_dev;
> -     vq->use_dma_api = vring_use_dma_api(vdev);
> -     vq->premapped = false;
> -     vq->do_unmap = vq->use_dma_api;
> -
> -     vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
> -             !context;
> -     vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
> -
> -     if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
> -             vq->weak_barriers = false;
> -
> -     err = vring_alloc_state_extra_packed(&vring_packed);
> -     if (err)
> -             goto err_state_extra;
> -
> -     virtqueue_vring_init_packed(&vring_packed, !!callback);
> +             return NULL;
>  
> -     virtqueue_init(vq, num);
> -     virtqueue_vring_attach_packed(vq, &vring_packed);
> +     vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, 
> weak_barriers,
> +                                     context, notify, callback, name, 
> dma_dev);
> +     if (!vq) {
> +             vring_free_packed(&vring_packed, vdev, dma_dev);
> +             return NULL;
> +     }
>  
> -     spin_lock(&vdev->vqs_list_lock);
> -     list_add_tail(&vq->vq.list, &vdev->vqs);
> -     spin_unlock(&vdev->vqs_list_lock);
> -     return &vq->vq;
> +     to_vvq(vq)->we_own_ring = true;
>  
> -err_state_extra:
> -     kfree(vq);
> -err_vq:
> -     vring_free_packed(&vring_packed, vdev, dma_dev);
> -err_ring:
> -     return NULL;
> +     return vq;
>  }
>  
>  static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
> @@ -2599,7 +2567,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
>  EXPORT_SYMBOL_GPL(vring_interrupt);
>  
>  /* Only available for split ring */
> -static struct virtqueue *__vring_new_virtqueue(unsigned int index,
> +static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
>                                              struct vring_virtqueue_split 
> *vring_split,
>                                              struct virtio_device *vdev,
>                                              bool weak_barriers,
> @@ -2612,9 +2580,6 @@ static struct virtqueue *__vring_new_virtqueue(unsigned 
> int index,
>       struct vring_virtqueue *vq;
>       int err;
>  
> -     if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
> -             return NULL;
> -
>       vq = kmalloc(sizeof(*vq), GFP_KERNEL);
>       if (!vq)
>               return NULL;
> @@ -2662,6 +2627,66 @@ static struct virtqueue 
> *__vring_new_virtqueue(unsigned int index,
>       return &vq->vq;
>  }
>  
> +static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
> +                                            struct vring_virtqueue_packed 
> *vring_packed,
> +                                            struct virtio_device *vdev,
> +                                            bool weak_barriers,
> +                                            bool context,
> +                                            bool (*notify)(struct virtqueue 
> *),
> +                                            void (*callback)(struct 
> virtqueue *),
> +                                            const char *name,
> +                                            struct device *dma_dev)
> +{
> +     struct vring_virtqueue *vq;
> +     int err;
> +
> +     vq = kmalloc(sizeof(*vq), GFP_KERNEL);
> +     if (!vq)
> +             return NULL;
> +
> +     vq->vq.callback = callback;
> +     vq->vq.vdev = vdev;
> +     vq->vq.name = name;
> +     vq->vq.index = index;
> +     vq->vq.reset = false;
> +     vq->we_own_ring = false;
> +     vq->notify = notify;
> +     vq->weak_barriers = weak_barriers;
> +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
> +     vq->broken = true;
> +#else
> +     vq->broken = false;
> +#endif
> +     vq->packed_ring = true;
> +     vq->dma_dev = dma_dev;
> +     vq->use_dma_api = vring_use_dma_api(vdev);
> +     vq->premapped = false;
> +     vq->do_unmap = vq->use_dma_api;
> +
> +     vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
> +             !context;
> +     vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
> +
> +     if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
> +             vq->weak_barriers = false;
> +
> +     err = vring_alloc_state_extra_packed(vring_packed);
> +     if (err) {
> +             kfree(vq);
> +             return NULL;
> +     }
> +
> +     virtqueue_vring_init_packed(vring_packed, !!callback);
> +
> +     virtqueue_init(vq, vring_packed->vring.num);
> +     virtqueue_vring_attach_packed(vq, vring_packed);
> +
> +     spin_lock(&vdev->vqs_list_lock);
> +     list_add_tail(&vq->vq.list, &vdev->vqs);
> +     spin_unlock(&vdev->vqs_list_lock);
> +     return &vq->vq;
> +}
> +
>  struct virtqueue *vring_create_virtqueue(
>       unsigned int index,
>       unsigned int num,
> @@ -2840,7 +2865,6 @@ int virtqueue_reset(struct virtqueue *_vq,
>  }
>  EXPORT_SYMBOL_GPL(virtqueue_reset);
>  
> -/* Only available for split ring */
>  struct virtqueue *vring_new_virtqueue(unsigned int index,
>                                     unsigned int num,
>                                     unsigned int vring_align,
> @@ -2853,12 +2877,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int 
> index,
>                                     const char *name)
>  {
>       struct vring_virtqueue_split vring_split = {};
> +     struct vring_virtqueue_packed vring_packed = {};
>  
> -     if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
> -             return NULL;
> +     if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> +             vring_packed.vring.num = num;
> +             vring_packed.vring.desc = pages;
> +             return __vring_new_virtqueue_packed(index, &vring_packed,
> +                                                 vdev, weak_barriers,
> +                                                 context, notify, callback,
> +                                                 name, vdev->dev.parent);
> +     }
>  
>       vring_init(&vring_split.vring, num, pages, vring_align);
> -     return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
> +     return __vring_new_virtqueue_split(index, &vring_split, vdev, 
> weak_barriers,
>                                    context, notify, callback, name,
>                                    vdev->dev.parent);
>  }
> -- 
> 2.43.0


Reply via email to