Instead of just mangling the page link create a copy of the sg_table
but only copy over the DMA addresses and not the pages.

Still quite a hack but this at least allows the exporter to properly
keeps it's sg_table intact.

This is important for example for the system DMA-heap which needs it's
sg_table to sync CPU writes with device accesses.

Signed-off-by: Christian König <[email protected]>
---
 drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++++++++--------
 1 file changed, 54 insertions(+), 14 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 2305bb2cc1f1..1fe5781d8862 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -828,21 +828,59 @@ void dma_buf_put(struct dma_buf *dmabuf)
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
 
-static void mangle_sg_table(struct sg_table *sg_table)
+static int dma_buf_mangle_sg_table(struct sg_table **sg_table)
 {
-#ifdef CONFIG_DMABUF_DEBUG
-       int i;
-       struct scatterlist *sg;
-
-       /* To catch abuse of the underlying struct page by importers mix
-        * up the bits, but take care to preserve the low SG_ bits to
-        * not corrupt the sgt. The mixing is undone on unmap
-        * before passing the sgt back to the exporter.
+       struct sg_table *to, *from = *sg_table;
+       struct scatterlist *to_sg, *from_sg;
+       int i, ret;
+
+       if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
+               return 0;
+
+       /*
+        * To catch abuse of the underlying struct page by importers copy the
+        * sg_table without copying the page_link and give only the copy back to
+        * the importer.
         */
-       for_each_sgtable_sg(sg_table, sg, i)
-               sg->page_link ^= ~0xffUL;
-#endif
+       to = kzalloc(sizeof(*to), GFP_KERNEL);
+       if (!to)
+               return -ENOMEM;
 
+       ret = sg_alloc_table(to, from->nents, GFP_KERNEL);
+       if (ret)
+               goto free_to;
+
+       to_sg = to->sgl;
+       for_each_sgtable_dma_sg(from, from_sg, i) {
+               sg_set_page(to_sg, NULL, 0, 0);
+                sg_dma_address(to_sg) = sg_dma_address(from_sg);
+                sg_dma_len(to_sg) = sg_dma_len(from_sg);
+               to_sg = sg_next(to_sg);
+       }
+
+       /*
+        * Store the original sg_table in the first page_link of the copy so
+        * that we can revert everything back again on unmap.
+        */
+       to->sgl[0].page_link = (unsigned long)from;
+       *sg_table = to;
+       return 0;
+
+free_to:
+       kfree(to);
+       return ret;
+}
+
+static void dma_buf_demangle_sg_table(struct sg_table **sg_table)
+{
+       struct sg_table *copy = *sg_table;
+
+       if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
+               return;
+
+       *sg_table = (void *)copy->sgl[0].page_link;
+       sg_free_table(copy);
+       kfree(copy);
 }
 
 static inline bool
@@ -1139,7 +1177,9 @@ struct sg_table *dma_buf_map_attachment(struct 
dma_buf_attachment *attach,
                if (ret < 0)
                        goto error_unmap;
        }
-       mangle_sg_table(sg_table);
+       ret = dma_buf_mangle_sg_table(&sg_table);
+       if (ret)
+               goto error_unmap;
 
        if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) {
                struct scatterlist *sg;
@@ -1220,7 +1260,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment 
*attach,
 
        dma_resv_assert_held(attach->dmabuf->resv);
 
-       mangle_sg_table(sg_table);
+       dma_buf_demangle_sg_table(&sg_table);
        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 
        if (dma_buf_pin_on_map(attach))
-- 
2.43.0

Reply via email to