On 2019-02-19 8:40 a.m., Christian König wrote:
> Instead of providing it from outside figure out the ats status in the
> function itself from the data structures.
>
> Signed-off-by: Christian König <[email protected]>

One suggestion inline. Other than that this patch is Reviewed-by: Felix 
Kuehling <[email protected]>

Regards,
   Felix


> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 52 ++++++++++++++------------
>   1 file changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 3c7b98a758c9..48da4ac76837 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -747,8 +747,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
>    * @adev: amdgpu_device pointer
>    * @vm: VM to clear BO from
>    * @bo: BO to clear
> - * @level: level this BO is at
> - * @pte_support_ats: indicate ATS support from PTE
>    *
>    * Root PD needs to be reserved when calling this.
>    *
> @@ -756,10 +754,11 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
>    * 0 on success, errno otherwise.
>    */
>   static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
> -                           struct amdgpu_vm *vm, struct amdgpu_bo *bo,
> -                           unsigned level, bool pte_support_ats)
> +                           struct amdgpu_vm *vm,
> +                           struct amdgpu_bo *bo)
>   {
>       struct ttm_operation_ctx ctx = { true, false };
> +     unsigned level = adev->vm_manager.root_level;
>       struct dma_fence *fence = NULL;
>       unsigned entries, ats_entries;
>       struct amdgpu_ring *ring;
> @@ -768,17 +767,32 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device 
> *adev,
>       int r;
>   
>       entries = amdgpu_bo_size(bo) / 8;
> +     if (vm->pte_support_ats) {
> +             ats_entries = amdgpu_vm_level_shift(adev, level);
> +             ats_entries += AMDGPU_GPU_PAGE_SHIFT;
> +             ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
>   
> -     if (pte_support_ats) {
> -             if (level == adev->vm_manager.root_level) {
> -                     ats_entries = amdgpu_vm_level_shift(adev, level);
> -                     ats_entries += AMDGPU_GPU_PAGE_SHIFT;
> -                     ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
> +             if (!bo->parent) {
>                       ats_entries = min(ats_entries, entries);
>                       entries -= ats_entries;
>               } else {
> -                     ats_entries = entries;
> -                     entries = 0;
> +                     struct amdgpu_bo *ancestor = bo;
> +                     struct amdgpu_vm_pt *pt;
> +
> +                     ++level;
> +                     while (ancestor->parent->parent) {
> +                             ancestor = ancestor->parent;
> +                             ++level;
> +                     }

This could be simplified as

        do {
                ancestor = ancestor->parent;
                ++level;
        } while (ancestor->parent);


> +
> +                     pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt,
> +                                       base);
> +                     if ((pt - vm->root.entries) >= ats_entries) {
> +                             ats_entries = 0;
> +                     } else {
> +                             ats_entries = entries;
> +                             entries = 0;
> +                     }
>               }
>       } else {
>               ats_entries = 0;
> @@ -911,7 +925,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
>   {
>       struct amdgpu_vm_pt_cursor cursor;
>       struct amdgpu_bo *pt;
> -     bool ats = false;
>       uint64_t eaddr;
>       int r;
>   
> @@ -921,9 +934,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
>   
>       eaddr = saddr + size - 1;
>   
> -     if (vm->pte_support_ats)
> -             ats = saddr < AMDGPU_GMC_HOLE_START;
> -
>       saddr /= AMDGPU_GPU_PAGE_SIZE;
>       eaddr /= AMDGPU_GPU_PAGE_SIZE;
>   
> @@ -972,7 +982,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
>   
>               amdgpu_vm_bo_base_init(&entry->base, vm, pt);
>   
> -             r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
> +             r = amdgpu_vm_clear_bo(adev, vm, pt);
>               if (r)
>                       goto error_free_pt;
>       }
> @@ -3047,9 +3057,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
> amdgpu_vm *vm,
>   
>       amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
>   
> -     r = amdgpu_vm_clear_bo(adev, vm, root,
> -                            adev->vm_manager.root_level,
> -                            vm->pte_support_ats);
> +     r = amdgpu_vm_clear_bo(adev, vm, root);
>       if (r)
>               goto error_unreserve;
>   
> @@ -3144,9 +3152,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, 
> struct amdgpu_vm *vm, uns
>        * changing any other state, in case it fails.
>        */
>       if (pte_support_ats != vm->pte_support_ats) {
> -             r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
> -                            adev->vm_manager.root_level,
> -                            pte_support_ats);
> +             vm->pte_support_ats = pte_support_ats;
> +             r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
>               if (r)
>                       goto free_idr;
>       }
> @@ -3154,7 +3161,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, 
> struct amdgpu_vm *vm, uns
>       /* Update VM state */
>       vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
>                                   AMDGPU_VM_USE_CPU_FOR_COMPUTE);
> -     vm->pte_support_ats = pte_support_ats;
>       DRM_DEBUG_DRIVER("VM update mode is %s\n",
>                        vm->use_cpu_for_update ? "CPU" : "SDMA");
>       WARN_ONCE((vm->use_cpu_for_update && 
> !amdgpu_gmc_vram_full_visible(&adev->gmc)),
_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to