Reorder struct virtio_vsock fields to place the DMA buffer (event_list)
last. This eliminates the padding from aligning the struct size on
ARCH_DMA_MINALIGN.

Suggested-by: Stefano Garzarella <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
---

changes from v2:
        move event_lock and event_run too, to keep
        event things logically together, as suggested by
        Stefano Garzarella.

Note: this is the only change in v3 and it's cosmetic, so I am
not reposting the whole patchset.


 net/vmw_vsock/virtio_transport.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 999a0839726a..b333a7591b26 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -55,15 +55,6 @@ struct virtio_vsock {
        int rx_buf_nr;
        int rx_buf_max_nr;
 
-       /* The following fields are protected by event_lock.
-        * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
-        */
-       struct mutex event_lock;
-       bool event_run;
-       __dma_from_device_group_begin();
-       struct virtio_vsock_event event_list[8];
-       __dma_from_device_group_end();
-
        u32 guest_cid;
        bool seqpacket_allow;
 
@@ -77,6 +68,15 @@ struct virtio_vsock {
         */
        struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
        struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
+
+       /* The following fields are protected by event_lock.
+        * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
+        */
+       struct mutex event_lock;
+       bool event_run;
+       __dma_from_device_group_begin();
+       struct virtio_vsock_event event_list[8];
+       __dma_from_device_group_end();
 };
 
 static u32 virtio_transport_get_local_cid(void)
-- 
MST


Reply via email to