Use READ_ONCE()/WRITE_ONCE() for the wait_event() flags (done and
wq_buf_avail). They are observed by waiters without pmem_lock, so make
the accesses explicit single loads/stores and avoid compiler
reordering/caching across the wait/wake paths.

Signed-off-by: Li Chen <[email protected]>
---
v2->v3:
- Split out READ_ONCE()/WRITE_ONCE() updates from patch 1/5 (no functional
  change intended).

 drivers/nvdimm/nd_virtio.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index a1ad8d67ad2d..ada0c679cf2e 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -18,9 +18,9 @@ static void virtio_pmem_wake_one_waiter(struct virtio_pmem 
*vpmem)
 
        req_buf = list_first_entry(&vpmem->req_list,
                                   struct virtio_pmem_request, list);
-       req_buf->wq_buf_avail = true;
+       list_del_init(&req_buf->list);
+       WRITE_ONCE(req_buf->wq_buf_avail, true);
        wake_up(&req_buf->wq_buf);
-       list_del(&req_buf->list);
 }
 
  /* The interrupt handler */
@@ -34,7 +34,7 @@ void virtio_pmem_host_ack(struct virtqueue *vq)
        spin_lock_irqsave(&vpmem->pmem_lock, flags);
        while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
                virtio_pmem_wake_one_waiter(vpmem);
-               req_data->done = true;
+               WRITE_ONCE(req_data->done, true);
                wake_up(&req_data->host_acked);
        }
        spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
@@ -66,7 +66,7 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
        if (!req_data)
                return -ENOMEM;
 
-       req_data->done = false;
+       WRITE_ONCE(req_data->done, false);
        init_waitqueue_head(&req_data->host_acked);
        init_waitqueue_head(&req_data->wq_buf);
        INIT_LIST_HEAD(&req_data->list);
@@ -87,12 +87,12 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
                                        GFP_ATOMIC)) == -ENOSPC) {
 
                dev_info(&vdev->dev, "failed to send command to virtio pmem 
device, no free slots in the virtqueue\n");
-               req_data->wq_buf_avail = false;
+               WRITE_ONCE(req_data->wq_buf_avail, false);
                list_add_tail(&req_data->list, &vpmem->req_list);
                spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
 
                /* A host response results in "host_ack" getting called */
-               wait_event(req_data->wq_buf, req_data->wq_buf_avail);
+               wait_event(req_data->wq_buf, READ_ONCE(req_data->wq_buf_avail));
                spin_lock_irqsave(&vpmem->pmem_lock, flags);
        }
        err1 = virtqueue_kick(vpmem->req_vq);
@@ -106,7 +106,7 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
                err = -EIO;
        } else {
                /* A host response results in "host_ack" getting called */
-               wait_event(req_data->host_acked, req_data->done);
+               wait_event(req_data->host_acked, READ_ONCE(req_data->done));
                err = le32_to_cpu(req_data->resp.ret);
        }
 
-- 
2.52.0

Reply via email to