Current struct virtio_scsi_event_node layout has two problems:

The event (DMA_FROM_DEVICE) and work (CPU-written via
INIT_WORK/queue_work) fields share a cacheline.
On non-cache-coherent platforms, CPU writes to work can
corrupt device-written event data.

If DMA_MIN_ALIGN is large enough, the 8 events in event_list share
cachelines, triggering CONFIG_DMA_API_DEBUG warnings.

Fix the corruption by moving event buffers to a separate array and
aligning using __dma_from_device_aligned_begin/end.

Suppress the (now spurious) DMA debug warnings using
virtqueue_add_inbuf_cache_clean().

Signed-off-by: Michael S. Tsirkin <[email protected]>
---
 drivers/scsi/virtio_scsi.c | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 96a69edddbe5..b0ce3884e22a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,6 +29,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_devinfo.h>
 #include <linux/seqlock.h>
+#include <linux/dma-mapping.h>
 
 #include "sd.h"
 
@@ -61,7 +62,7 @@ struct virtio_scsi_cmd {
 
 struct virtio_scsi_event_node {
        struct virtio_scsi *vscsi;
-       struct virtio_scsi_event event;
+       struct virtio_scsi_event *event;
        struct work_struct work;
 };
 
@@ -89,6 +90,12 @@ struct virtio_scsi {
 
        struct virtio_scsi_vq ctrl_vq;
        struct virtio_scsi_vq event_vq;
+
+       /* DMA buffers for events - aligned, kept separate from CPU-written 
fields */
+       __dma_from_device_aligned_begin
+       struct virtio_scsi_event events[VIRTIO_SCSI_EVENT_LEN];
+       __dma_from_device_aligned_end
+
        struct virtio_scsi_vq req_vqs[];
 };
 
@@ -237,12 +244,12 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
        unsigned long flags;
 
        INIT_WORK(&event_node->work, virtscsi_handle_event);
-       sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+       sg_init_one(&sg, event_node->event, sizeof(struct virtio_scsi_event));
 
        spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
 
-       err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
-                                 GFP_ATOMIC);
+       err = virtqueue_add_inbuf_cache_clean(vscsi->event_vq.vq, &sg, 1, 
event_node,
+                                             GFP_ATOMIC);
        if (!err)
                virtqueue_kick(vscsi->event_vq.vq);
 
@@ -257,6 +264,7 @@ static int virtscsi_kick_event_all(struct virtio_scsi 
*vscsi)
 
        for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
                vscsi->event_list[i].vscsi = vscsi;
+               vscsi->event_list[i].event = &vscsi->events[i];
                virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
        }
 
@@ -380,7 +388,7 @@ static void virtscsi_handle_event(struct work_struct *work)
        struct virtio_scsi_event_node *event_node =
                container_of(work, struct virtio_scsi_event_node, work);
        struct virtio_scsi *vscsi = event_node->vscsi;
-       struct virtio_scsi_event *event = &event_node->event;
+       struct virtio_scsi_event *event = event_node->event;
 
        if (event->event &
            cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
-- 
MST


Reply via email to