On 03.10.25 15:58, Tvrtko Ursulin wrote:
> No functional change but to allow easier refactoring in the future.
> 
> Signed-off-by: Tvrtko Ursulin <[email protected]>
> Cc: Christian König <[email protected]>
> Cc: Thomas Hellström <[email protected]>
> ---
>  drivers/gpu/drm/ttm/ttm_pool.c | 28 ++++++++++++++--------------
>  drivers/gpu/drm/ttm/ttm_tt.c   |  9 +++++----
>  include/drm/ttm/ttm_pool.h     | 10 ++++++++++
>  3 files changed, 29 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index baf27c70a419..a9430b516fc3 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -148,7 +148,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool 
> *pool, gfp_t gfp_flags,
>               gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
>                       __GFP_THISNODE;
>  
> -     if (!pool->use_dma_alloc) {
> +     if (!ttm_pool_uses_dma_alloc(pool)) {
>               p = alloc_pages_node(pool->nid, gfp_flags, order);
>               if (p)
>                       p->private = order;
> @@ -200,7 +200,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, 
> enum ttm_caching caching,
>               set_pages_wb(p, 1 << order);
>  #endif
>  
> -     if (!pool || !pool->use_dma_alloc) {
> +     if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
>               __free_pages(p, order);
>               return;
>       }
> @@ -243,7 +243,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned 
> int order,
>  {
>       dma_addr_t addr;
>  
> -     if (pool->use_dma_alloc) {
> +     if (ttm_pool_uses_dma_alloc(pool)) {
>               struct ttm_pool_dma *dma = (void *)p->private;
>  
>               addr = dma->addr;
> @@ -265,7 +265,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, 
> dma_addr_t dma_addr,
>                          unsigned int num_pages)
>  {
>       /* Unmapped while freeing the page */
> -     if (pool->use_dma_alloc)
> +     if (ttm_pool_uses_dma_alloc(pool))
>               return;
>  
>       dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
> @@ -339,7 +339,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct 
> ttm_pool *pool,
>                                                 enum ttm_caching caching,
>                                                 unsigned int order)
>  {
> -     if (pool->use_dma_alloc)
> +     if (ttm_pool_uses_dma_alloc(pool))
>               return &pool->caching[caching].orders[order];
>  
>  #ifdef CONFIG_X86
> @@ -348,7 +348,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct 
> ttm_pool *pool,
>               if (pool->nid != NUMA_NO_NODE)
>                       return &pool->caching[caching].orders[order];
>  
> -             if (pool->use_dma32)
> +             if (ttm_pool_uses_dma32(pool))
>                       return &global_dma32_write_combined[order];
>  
>               return &global_write_combined[order];
> @@ -356,7 +356,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct 
> ttm_pool *pool,
>               if (pool->nid != NUMA_NO_NODE)
>                       return &pool->caching[caching].orders[order];
>  
> -             if (pool->use_dma32)
> +             if (ttm_pool_uses_dma32(pool))
>                       return &global_dma32_uncached[order];
>  
>               return &global_uncached[order];
> @@ -396,7 +396,7 @@ static unsigned int ttm_pool_shrink(void)
>  /* Return the allocation order based for a page */
>  static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page 
> *p)
>  {
> -     if (pool->use_dma_alloc) {
> +     if (ttm_pool_uses_dma_alloc(pool)) {
>               struct ttm_pool_dma *dma = (void *)p->private;
>  
>               return dma->vaddr & ~PAGE_MASK;
> @@ -719,7 +719,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct 
> ttm_tt *tt,
>       if (ctx->gfp_retry_mayfail)
>               gfp_flags |= __GFP_RETRY_MAYFAIL;
>  
> -     if (pool->use_dma32)
> +     if (ttm_pool_uses_dma32(pool))
>               gfp_flags |= GFP_DMA32;
>       else
>               gfp_flags |= GFP_HIGHUSER;
> @@ -977,7 +977,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt 
> *tt,
>               return -EINVAL;
>  
>       if ((!ttm_backup_bytes_avail() && !flags->purge) ||
> -         pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
> +         ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt))
>               return -EBUSY;
>  
>  #ifdef CONFIG_X86
> @@ -1014,7 +1014,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct 
> ttm_tt *tt,
>       if (flags->purge)
>               return shrunken;
>  
> -     if (pool->use_dma32)
> +     if (ttm_pool_uses_dma32(pool))
>               gfp = GFP_DMA32;
>       else
>               gfp = GFP_HIGHUSER;
> @@ -1068,7 +1068,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device 
> *dev,
>  {
>       unsigned int i, j;
>  
> -     WARN_ON(!dev && use_dma_alloc);
> +     WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool));
>  
>       pool->dev = dev;
>       pool->nid = nid;
> @@ -1239,7 +1239,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct 
> seq_file *m)
>  {
>       unsigned int i;
>  
> -     if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
> +     if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) {
>               seq_puts(m, "unused\n");
>               return 0;
>       }
> @@ -1250,7 +1250,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct 
> seq_file *m)
>       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
>               if (!ttm_pool_select_type(pool, i, 0))
>                       continue;
> -             if (pool->use_dma_alloc)
> +             if (ttm_pool_uses_dma_alloc(pool))
>                       seq_puts(m, "DMA ");
>               else
>                       seq_printf(m, "N%d ", pool->nid);
> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> index 506e257dfba8..3b21ec33c877 100644
> --- a/drivers/gpu/drm/ttm/ttm_tt.c
> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> @@ -93,7 +93,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool 
> zero_alloc)
>        * mapped TT pages need to be decrypted or otherwise the drivers
>        * will end up sending encrypted mem to the gpu.
>        */
> -     if (bdev->pool.use_dma_alloc && 
> cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
> +     if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
> +         cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
>               page_flags |= TTM_TT_FLAG_DECRYPTED;
>               drm_info_once(ddev, "TT memory decryption enabled.");
>       }
> @@ -378,7 +379,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
>  
>       if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
>               atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
> -             if (bdev->pool.use_dma32)
> +             if (ttm_pool_uses_dma32(&bdev->pool))
>                       atomic_long_add(ttm->num_pages,
>                                       &ttm_dma32_pages_allocated);
>       }
> @@ -416,7 +417,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
>  error:
>       if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
>               atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
> -             if (bdev->pool.use_dma32)
> +             if (ttm_pool_uses_dma32(&bdev->pool))
>                       atomic_long_sub(ttm->num_pages,
>                                       &ttm_dma32_pages_allocated);
>       }
> @@ -439,7 +440,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct 
> ttm_tt *ttm)
>  
>       if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
>               atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
> -             if (bdev->pool.use_dma32)
> +             if (ttm_pool_uses_dma32(&bdev->pool))
>                       atomic_long_sub(ttm->num_pages,
>                                       &ttm_dma32_pages_allocated);
>       }
> diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
> index 54cd34a6e4c0..22154d84fef9 100644
> --- a/include/drm/ttm/ttm_pool.h
> +++ b/include/drm/ttm/ttm_pool.h
> @@ -100,4 +100,14 @@ int ttm_pool_restore_and_alloc(struct ttm_pool *pool, 
> struct ttm_tt *tt,
>  int ttm_pool_mgr_init(unsigned long num_pages);
>  void ttm_pool_mgr_fini(void);
>  
> +static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
> +{
> +     return pool->use_dma_alloc;
> +}
> +
> +static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
> +{
> +     return pool->use_dma32;
> +}
> +

Please not in the header. Neither drivers nor other TTM modules should mess 
with such properties.

That is all internal to the pool.

Regards,
Christian.

>  #endif

Reply via email to