NAK, it is intentionally done like this to avoid starvation of processes.

In other words we assign a VMID only when there is at least one free.

Regards,
Christian.

Am 10.05.2017 um 09:31 schrieb Chunming Zhou:
Change-Id: If24a62b9c3097c9b040225ab0e768145b7a3db1e
Signed-off-by: Chunming Zhou <[email protected]>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 92 +++++++++++++++++-----------------
  1 file changed, 47 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bbb3587..6259608 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -492,51 +492,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
                mutex_unlock(&id_mgr->lock);
                return r;
        }
-       fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
-       if (!fences) {
-               mutex_unlock(&id_mgr->lock);
-               return -ENOMEM;
-       }
-       /* Check if we have an idle VMID */
-       i = 0;
-       list_for_each_entry(idle, &id_mgr->ids_lru, list) {
-               fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
-               if (!fences[i])
-                       break;
-               ++i;
-       }
-
-       /* If we can't find a idle VMID to use, wait till one becomes available 
*/
-       if (&idle->list == &id_mgr->ids_lru) {
-               u64 fence_context = adev->vm_manager.fence_context + ring->idx;
-               unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
-               struct fence_array *array;
-               unsigned j;
-
-               for (j = 0; j < i; ++j)
-                       fence_get(fences[j]);
-
-               array = fence_array_create(i, fences, fence_context,
-                                          seqno, true);
-               if (!array) {
-                       for (j = 0; j < i; ++j)
-                               fence_put(fences[j]);
-                       kfree(fences);
-                       r = -ENOMEM;
-                       goto error;
-               }
-
-
-               r = amdgpu_sync_fence(ring->adev, sync, &array->base);
-               fence_put(&array->base);
-               if (r)
-                       goto error;
-
-               mutex_unlock(&id_mgr->lock);
-               return 0;
-
-       }
-       kfree(fences);
job->vm_needs_flush = false;
        /* Check if we can use a VMID already assigned to this VM */
@@ -586,6 +541,53 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
}; + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+       if (!fences) {
+               mutex_unlock(&id_mgr->lock);
+               return -ENOMEM;
+       }
+       /* Check if we have an idle VMID */
+       i = 0;
+       list_for_each_entry(idle, &id_mgr->ids_lru, list) {
+               fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+               if (!fences[i])
+                       break;
+               ++i;
+       }
+
+       /* If we can't find a idle VMID to use, wait till one becomes available 
*/
+       if (&idle->list == &id_mgr->ids_lru) {
+               u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+               unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+               struct fence_array *array;
+               unsigned j;
+
+               for (j = 0; j < i; ++j)
+                       fence_get(fences[j]);
+
+               array = fence_array_create(i, fences, fence_context,
+                                          seqno, true);
+               if (!array) {
+                       for (j = 0; j < i; ++j)
+                               fence_put(fences[j]);
+                       kfree(fences);
+                       r = -ENOMEM;
+                       goto error;
+               }
+
+
+               r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+               fence_put(&array->base);
+               if (r)
+                       goto error;
+
+               mutex_unlock(&id_mgr->lock);
+               return 0;
+
+       }
+       kfree(fences);
+
+
        /* Still no ID to use? Then use the idle one found earlier */
        id = idle;


_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to