On 06.09.2023 10:01, Boris Brezillon wrote:
>On Tue,  5 Sep 2023 19:45:23 +0100
>Adrián Larumbe <[email protected]> wrote:
>
>> BO's RSS is updated every time new pages are allocated on demand and mapped
>> for the object at GPU page fault's IRQ handler, but only for heap buffers.
>> The reason this is unnecessary for non-heap buffers is that they are mapped
>> onto the GPU's VA space and backed by physical memory in their entirety at
>> BO creation time.
>> 
>> This calculation is unnecessary for imported PRIME objects, since heap
>> buffers cannot be exported by our driver, and the actual BO RSS size is the
>> one reported in its attached dmabuf structure.
>> 
>> Signed-off-by: Adrián Larumbe <[email protected]>
>> ---
>>  drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++++++++++++++
>>  drivers/gpu/drm/panfrost/panfrost_gem.h |  5 +++++
>>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++++++++----
>>  3 files changed, 27 insertions(+), 4 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c 
>> b/drivers/gpu/drm/panfrost/panfrost_gem.c
>> index 7d8f83d20539..cb92c0ed7615 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
>> @@ -208,6 +208,19 @@ static enum drm_gem_object_status 
>> panfrost_gem_status(struct drm_gem_object *obj
>>      return res;
>>  }
>>  
>> +static size_t panfrost_gem_rss(struct drm_gem_object *obj)
>> +{
>> +    struct panfrost_gem_object *bo = to_panfrost_bo(obj);
>> +
>> +    if (bo->is_heap)
>> +            return bo->heap_rss_size;
>> +    else if (bo->base.pages) {
>> +            WARN_ON(bo->heap_rss_size);
>> +            return bo->base.base.size;
>> +    } else
>> +            return 0;
>
>Nit: please add brackets on all conditional blocks, even if only the
>second one needs it.
>
>> +}
>> +
>>  static const struct drm_gem_object_funcs panfrost_gem_funcs = {
>>      .free = panfrost_gem_free_object,
>>      .open = panfrost_gem_open,
>> @@ -220,6 +233,7 @@ static const struct drm_gem_object_funcs 
>> panfrost_gem_funcs = {
>>      .vunmap = drm_gem_shmem_object_vunmap,
>>      .mmap = drm_gem_shmem_object_mmap,
>>      .status = panfrost_gem_status,
>> +    .rss = panfrost_gem_rss,
>>      .vm_ops = &drm_gem_shmem_vm_ops,
>>  };
>>  
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h 
>> b/drivers/gpu/drm/panfrost/panfrost_gem.h
>> index ad2877eeeccd..13c0a8149c3a 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_gem.h
>> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
>> @@ -36,6 +36,11 @@ struct panfrost_gem_object {
>>       */
>>      atomic_t gpu_usecount;
>>  
>> +    /*
>> +     * Object chunk size currently mapped onto physical memory
>> +     */
>> +    size_t heap_rss_size;
>> +
>>      bool noexec             :1;
>>      bool is_heap            :1;
>>  };
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
>> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> index d54d4e7b2195..67c206124781 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> @@ -285,17 +285,19 @@ static void panfrost_mmu_flush_range(struct 
>> panfrost_device *pfdev,
>>      pm_runtime_put_autosuspend(pfdev->dev);
>>  }
>>  
>> -static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu 
>> *mmu,
>> +static size_t mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu 
>> *mmu,
>>                    u64 iova, int prot, struct sg_table *sgt)
>>  {
>>      unsigned int count;
>>      struct scatterlist *sgl;
>>      struct io_pgtable_ops *ops = mmu->pgtbl_ops;
>>      u64 start_iova = iova;
>> +    size_t total = 0;
>>  
>>      for_each_sgtable_dma_sg(sgt, sgl, count) {
>>              unsigned long paddr = sg_dma_address(sgl);
>>              size_t len = sg_dma_len(sgl);
>> +            total += len;
>>  
>>              dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, 
>> len=%zx", mmu->as, iova, paddr, len);
>>  
>> @@ -315,7 +317,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, 
>> struct panfrost_mmu *mmu,
>>  
>>      panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
>>  
>> -    return 0;
>> +    return total;
>>  }
>>  
>>  int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
>> @@ -447,6 +449,7 @@ static int panfrost_mmu_map_fault_addr(struct 
>> panfrost_device *pfdev, int as,
>>      pgoff_t page_offset;
>>      struct sg_table *sgt;
>>      struct page **pages;
>> +    size_t mapped_size;
>>  
>>      bomapping = addr_to_mapping(pfdev, as, addr);
>>      if (!bomapping)
>> @@ -518,10 +521,11 @@ static int panfrost_mmu_map_fault_addr(struct 
>> panfrost_device *pfdev, int as,
>>      if (ret)
>>              goto err_map;
>>  
>> -    mmu_map_sg(pfdev, bomapping->mmu, addr,
>> -               IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
>> +    mapped_size = mmu_map_sg(pfdev, bomapping->mmu, addr,
>> +                             IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
>>  
>>      bomapping->active = true;
>> +    bo->heap_rss_size += mapped_size;
>
>The alloc-on-fault granularity is set static (2MB), so no need to
>make mmu_map_sg() return the mapped size, we can just do += SZ_2M if
>things worked.

At the moment mmu_map_sg is treated as though it always succeeds in mapping the
page. Would it be alright if I changed it so that we take into account the
unlikely case that ops->map_pages might fail?
Something like this: https://gitlab.collabora.com/-/snippets/323

>>  
>>      dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
>>  

Reply via email to