GPUs typically benefit from contiguous memory via reduced TLB pressure and
improved caching performance, where the maximum size of contiguous block
which adds a performance benefit is related to hardware design.

TTM pool allocator by default tries (hard) to allocate up to the system
MAX_PAGE_ORDER blocks. This varies by the CPU platform and can also be
configured via Kconfig.

If that limit was set to be higher than the GPU can make an extra use of,
lets allow the individual drivers to let TTM know over which allocation
order can the pool allocator afford to make a little bit less effort with.

We implement this by disabling direct reclaim for those allocations, which
reduces the allocation latency and lowers the demands on the page
allocator, in cases where expending this effort is not critical for the
GPU in question.

Signed-off-by: Tvrtko Ursulin <[email protected]>
Cc: Christian König <[email protected]>
Cc: Thadeu Lima de Souza Cascardo <[email protected]>
Reviewed-by: Christian König <[email protected]>
---
 drivers/gpu/drm/ttm/ttm_pool.c          | 8 ++++++++
 drivers/gpu/drm/ttm/ttm_pool_internal.h | 5 +++++
 include/drm/ttm/ttm_pool.h              | 6 ++++--
 3 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 5d007d1a15f2..183e39d0d505 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -136,6 +136,7 @@ static DECLARE_RWSEM(pool_shrink_rwsem);
 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
                                        unsigned int order)
 {
+       const unsigned int beneficial_order = ttm_pool_beneficial_order(pool);
        unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
        struct ttm_pool_dma *dma;
        struct page *p;
@@ -149,6 +150,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool 
*pool, gfp_t gfp_flags,
                gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
                        __GFP_THISNODE;
 
+       /*
+        * Do not add latency to the allocation path for allocations orders
+        * device tolds us do not bring them additional performance gains.
+        */
+       if (beneficial_order && order > beneficial_order)
+               gfp_flags &= ~__GFP_DIRECT_RECLAIM;
+
        if (!ttm_pool_uses_dma_alloc(pool)) {
                p = alloc_pages_node(pool->nid, gfp_flags, order);
                if (p)
diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h 
b/drivers/gpu/drm/ttm/ttm_pool_internal.h
index 91aa156ddded..638497fcc8a8 100644
--- a/drivers/gpu/drm/ttm/ttm_pool_internal.h
+++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h
@@ -16,4 +16,9 @@ static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
        return pool->flags & TTM_POOL_USE_DMA32;
 }
 
+static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool)
+{
+       return pool->flags & 0xff;
+}
+
 #endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 70db2e5c950f..60753b051c65 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -59,8 +59,10 @@ struct ttm_pool_type {
        struct list_head pages;
 };
 
-#define TTM_POOL_USE_DMA_ALLOC         BIT(0) /* Use coherent DMA allocations. 
*/
-#define TTM_POOL_USE_DMA32     BIT(1) /* Use GFP_DMA32 allocations. */
+/* Check helpers in ttm_pool_internal.h when modifying: */
+#define TTM_POOL_BENEFICIAL_ORDER(n)   ((n) & 0xff) /* Max order which caller 
can benefit from */
+#define TTM_POOL_USE_DMA_ALLOC                 BIT(8) /* Use coherent DMA 
allocations. */
+#define TTM_POOL_USE_DMA32             BIT(9) /* Use GFP_DMA32 allocations. */
 
 /**
  * struct ttm_pool - Pool for all caching and orders
-- 
2.48.0

Reply via email to