In ttm_bo_vmap(), set up single-page mappings with kmap() in certain
cases. The feature is already present in ttm_bo_kmap().

This functionality is require by DRM's xe driver, which claims that
using kmap() is an optimization over vmap(). [1] Reading the commit
at [2] indicates otherwise. It is not possible to use kmap_local_page()
and kunmap_local_page(), as TTM cannot guarantee the requirements for
ordering these calls. [3]

Signed-off-by: Thomas Zimmermann <[email protected]>
Link: 
https://elixir.bootlin.com/linux/v6.9/source/drivers/gpu/drm/xe/xe_bo.c#L1870 # 
1
Link: 
https://lore.kernel.org/all/[email protected]/T/#u # 2
Link: https://elixir.bootlin.com/linux/v6.9/source/include/linux/highmem.h#L70 
# 3
---
 drivers/gpu/drm/ttm/ttm_bo_util.c | 33 ++++++++++++++++++++++---------
 1 file changed, 24 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 31f9772f05dac..c06cfccace39d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -516,6 +516,8 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo,
                        .no_wait_gpu = false
                };
                struct ttm_tt *ttm = bo->ttm;
+               struct ttm_resource_manager *man =
+                       ttm_manager_type(bo->bdev, bo->resource->mem_type);
                unsigned long start_page = offset >> PAGE_SHIFT;
                unsigned long aligned_size = size + (offset - (start_page << 
PAGE_SHIFT));
                unsigned long num_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
@@ -527,15 +529,25 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo,
                if (ret)
                        return ret;
 
-               /*
-                * We need to use vmap to get the desired page protection
-                * or to make the buffer object look contiguous.
-                */
-               prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
-               vaddr = vmap(ttm->pages + start_page, num_pages, 0, prot);
-               if (!vaddr)
-                       return -ENOMEM;
-               alloc_flags = ttm_bo_map_vmap;
+               if (num_pages == 1 && ttm->caching == ttm_cached &&
+                   !(man->use_tt && (ttm->page_flags & 
TTM_TT_FLAG_DECRYPTED))) {
+                       /*
+                        * We're mapping a single page, and the desired
+                        * page protection is consistent with the bo.
+                        */
+                       vaddr = kmap(ttm->pages[start_page]);
+                       alloc_flags = ttm_bo_map_kmap;
+               } else {
+                       /*
+                        * We need to use vmap to get the desired page 
protection
+                        * or to make the buffer object look contiguous.
+                        */
+                       prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+                       vaddr = vmap(ttm->pages + start_page, num_pages, 0, 
prot);
+                       if (!vaddr)
+                               return -ENOMEM;
+                       alloc_flags = ttm_bo_map_vmap;
+               }
 
                iosys_map_set_vaddr(map, vaddr);
                map->alloc_flags = alloc_flags;
@@ -567,6 +579,9 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct 
iosys_map *map)
        case ttm_bo_map_vmap:
                vunmap(map->vaddr);
                break;
+       case ttm_bo_map_kmap:
+               kunmap(kmap_to_page(map->vaddr));
+               break;
        case ttm_bo_map_premapped:
                break;
        default:
-- 
2.45.2

Reply via email to