Am 01.11.23 um 17:26 schrieb Arunpravin Paneer Selvam:
Replace seq64 bo lock sequences with drm_exec.

Signed-off-by: Alex Deucher <[email protected]>

Reviewed-by: Christian König <[email protected]>

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c | 73 ++++++++++-------------
  1 file changed, 33 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index 63d8b68023be..810f7637096e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -25,6 +25,8 @@
  #include "amdgpu.h"
  #include "amdgpu_seq64.h"
+#include <drm/drm_exec.h>
+
  /**
   * DOC: amdgpu_seq64
   *
@@ -68,11 +70,8 @@ static inline u64 amdgpu_seq64_get_va_base(struct 
amdgpu_device *adev)
  int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                     struct amdgpu_bo_va **bo_va)
  {
-       struct ttm_validate_buffer seq64_tv;
-       struct amdgpu_bo_list_entry pd;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
        struct amdgpu_bo *bo;
+       struct drm_exec exec;
        u64 seq64_addr;
        int r;
@@ -80,23 +79,20 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (!bo)
                return -EINVAL;
- INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&seq64_tv.head);
-
-       seq64_tv.bo = &bo->tbo;
-       seq64_tv.num_shared = 1;
-
-       list_add(&seq64_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r)
-               return r;
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               if (likely(!r))
+                       r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error;
+       }
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
        if (!*bo_va) {
                r = -ENOMEM;
-               goto error_vm;
+               goto error;
        }
seq64_addr = amdgpu_seq64_get_va_base(adev);
@@ -104,23 +100,19 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
                             AMDGPU_PTE_READABLE);
        if (r) {
                DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
-               goto error_map;
+               amdgpu_vm_bo_del(adev, *bo_va);
+               goto error;
        }
r = amdgpu_vm_bo_update(adev, *bo_va, false);
        if (r) {
                DRM_ERROR("failed to do vm_bo_update on userq sem\n");
-               goto error_map;
+               amdgpu_vm_bo_del(adev, *bo_va);
+               goto error;
        }
- ttm_eu_backoff_reservation(&ticket, &list);
-
-       return 0;
-
-error_map:
-       amdgpu_vm_bo_del(adev, *bo_va);
-error_vm:
-       ttm_eu_backoff_reservation(&ticket, &list);
+error:
+       drm_exec_fini(&exec);
        return r;
  }
@@ -134,12 +126,10 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
   */
  void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv 
*fpriv)
  {
-       struct ttm_validate_buffer seq64_tv;
-       struct amdgpu_bo_list_entry pd;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
        struct amdgpu_vm *vm;
        struct amdgpu_bo *bo;
+       struct drm_exec exec;
+       int r;
if (!fpriv->seq64_va)
                return;
@@ -149,20 +139,23 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, 
struct amdgpu_fpriv *fpriv)
                return;
vm = &fpriv->vm;
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&seq64_tv.head);
- seq64_tv.bo = &bo->tbo;
-       seq64_tv.num_shared = 1;
-
-       list_add(&seq64_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               if (likely(!r))
+                       r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error;
+       }
- ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
        amdgpu_vm_bo_del(adev, fpriv->seq64_va);
-       ttm_eu_backoff_reservation(&ticket, &list);
fpriv->seq64_va = NULL;
+
+error:
+       drm_exec_fini(&exec);
  }
/**

Reply via email to