mvneta driver can run on not cache coherent devices so it is
necessary to sync dma buffers before sending them to the device
in order to avoid memory corruption. This patch introduce a performance
penalty and it is necessary to introduce a more sophisticated logic
in order to avoid dma sync as much as we can

Signed-off-by: Lorenzo Bianconi <lore...@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index 79a6bac0192b..ba4aa9bbc798 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1821,6 +1821,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
                            struct mvneta_rx_queue *rxq,
                            gfp_t gfp_mask)
 {
+       enum dma_data_direction dma_dir;
        dma_addr_t phys_addr;
        struct page *page;
 
@@ -1830,6 +1831,9 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
                return -ENOMEM;
 
        phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
+       dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+       dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
+                                  MVNETA_MAX_RX_BUF_SIZE, dma_dir);
        mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
 
        return 0;
-- 
2.21.0

Reply via email to