On Mon, Aug 23, 2021 at 10:14:37AM +0200, Vincent Whitchurch wrote:
> vhost always uses SMP-conditional barriers, but these may not be
> sufficient when vhost is used to communicate between heterogeneous
> processors in an AMP configuration, especially since they're NOPs on
> !SMP builds.
> 
> To solve this, use the virtio_*() barrier functions and ask them for
> non-weak barriers if requested by userspace.
> 
> Signed-off-by: Vincent Whitchurch <[email protected]>

I am inclined to say let's (ab)use VIRTIO_F_ORDER_PLATFORM for this.
Jason what do you think?

Also is the use of DMA variants really the intended thing here? Could
you point me at some examples please?


> ---
>  drivers/vhost/vhost.c      | 23 ++++++++++++++---------
>  drivers/vhost/vhost.h      |  2 ++
>  include/uapi/linux/vhost.h |  2 ++
>  3 files changed, 18 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index b9e853e6094d..f7172e1bc395 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -500,6 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
>               vq->indirect = NULL;
>               vq->heads = NULL;
>               vq->dev = dev;
> +             vq->weak_barriers = true;
>               mutex_init(&vq->mutex);
>               vhost_vq_reset(dev, vq);
>               if (vq->handle_kick)
> @@ -1801,6 +1802,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int 
> ioctl, void __user *argp)
>               if (ctx)
>                       eventfd_ctx_put(ctx);
>               break;
> +     case VHOST_SET_STRONG_BARRIERS:
> +             for (i = 0; i < d->nvqs; ++i)
> +                     d->vqs[i]->weak_barriers = false;
> +             break;
>       default:
>               r = -ENOIOCTLCMD;
>               break;
> @@ -1927,7 +1932,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct 
> vhost_log *log,
>       int i, r;
>  
>       /* Make sure data written is seen before log. */
> -     smp_wmb();
> +     virtio_wmb(vq->weak_barriers);
>  
>       if (vq->iotlb) {
>               for (i = 0; i < count; i++) {
> @@ -1964,7 +1969,7 @@ static int vhost_update_used_flags(struct 
> vhost_virtqueue *vq)
>               return -EFAULT;
>       if (unlikely(vq->log_used)) {
>               /* Make sure the flag is seen before log. */
> -             smp_wmb();
> +             virtio_wmb(vq->weak_barriers);
>               /* Log used flag write. */
>               used = &vq->used->flags;
>               log_used(vq, (used - (void __user *)vq->used),
> @@ -1982,7 +1987,7 @@ static int vhost_update_avail_event(struct 
> vhost_virtqueue *vq, u16 avail_event)
>       if (unlikely(vq->log_used)) {
>               void __user *used;
>               /* Make sure the event is seen before log. */
> -             smp_wmb();
> +             virtio_wmb(vq->weak_barriers);
>               /* Log avail event write */
>               used = vhost_avail_event(vq);
>               log_used(vq, (used - (void __user *)vq->used),
> @@ -2228,7 +2233,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>               /* Only get avail ring entries after they have been
>                * exposed by guest.
>                */
> -             smp_rmb();
> +             virtio_rmb(vq->weak_barriers);
>       }
>  
>       /* Grab the next descriptor number they're advertising, and increment
> @@ -2367,7 +2372,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue 
> *vq,
>       }
>       if (unlikely(vq->log_used)) {
>               /* Make sure data is seen before log. */
> -             smp_wmb();
> +             virtio_wmb(vq->weak_barriers);
>               /* Log used ring entry write. */
>               log_used(vq, ((void __user *)used - (void __user *)vq->used),
>                        count * sizeof *used);
> @@ -2402,14 +2407,14 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, 
> struct vring_used_elem *heads,
>       r = __vhost_add_used_n(vq, heads, count);
>  
>       /* Make sure buffer is written before we update index. */
> -     smp_wmb();
> +     virtio_wmb(vq->weak_barriers);
>       if (vhost_put_used_idx(vq)) {
>               vq_err(vq, "Failed to increment used idx");
>               return -EFAULT;
>       }
>       if (unlikely(vq->log_used)) {
>               /* Make sure used idx is seen before log. */
> -             smp_wmb();
> +             virtio_wmb(vq->weak_barriers);
>               /* Log used index update. */
>               log_used(vq, offsetof(struct vring_used, idx),
>                        sizeof vq->used->idx);
> @@ -2428,7 +2433,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct 
> vhost_virtqueue *vq)
>       /* Flush out used index updates. This is paired
>        * with the barrier that the Guest executes when enabling
>        * interrupts. */
> -     smp_mb();
> +     virtio_mb(vq->weak_barriers);
>  
>       if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
>           unlikely(vq->avail_idx == vq->last_avail_idx))
> @@ -2530,7 +2535,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct 
> vhost_virtqueue *vq)
>       }
>       /* They could have slipped one in as we were doing that: make
>        * sure it's written, then check again. */
> -     smp_mb();
> +     virtio_mb(vq->weak_barriers);
>       r = vhost_get_avail_idx(vq, &avail_idx);
>       if (r) {
>               vq_err(vq, "Failed to check avail idx at %p: %d\n",
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 638bb640d6b4..5bd20d0db457 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -108,6 +108,8 @@ struct vhost_virtqueue {
>       bool log_used;
>       u64 log_addr;
>  
> +     bool weak_barriers;
> +
>       struct iovec iov[UIO_MAXIOV];
>       struct iovec iotlb_iov[64];
>       struct iovec *indirect;
> diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
> index c998860d7bbc..4b8656307f51 100644
> --- a/include/uapi/linux/vhost.h
> +++ b/include/uapi/linux/vhost.h
> @@ -97,6 +97,8 @@
>  #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
>  #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
>  
> +#define VHOST_SET_STRONG_BARRIERS _IO(VHOST_VIRTIO, 0x27)
> +
>  /* VHOST_NET specific defines */
>  
>  /* Attach virtio net ring to a raw socket, or tap device.
> -- 
> 2.28.0

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to