This helper can validate the userq whether can be unmapped prior to the userq VA GEM unmap.
Signed-off-by: Prike Liang <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 57 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h | 3 ++ 2 files changed, 60 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 0208c6b8a8e0..534a9c98c011 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -1150,3 +1150,60 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, mutex_unlock(&adev->userq_mutex); return ret; } + +static bool amdgpu_userq_gem_va_unmap_queue_retrieve(struct amdgpu_usermode_queue *queue, + uint64_t va) +{ + if (amdgpu_userq_va_align(queue->queue_va) != va && + amdgpu_userq_va_align(queue->wptr_va) != va && + amdgpu_userq_va_align(queue->rptr_va) != va && + amdgpu_userq_va_align(queue->eop_va) != va && + amdgpu_userq_va_align(queue->shadow_va) != va && + amdgpu_userq_va_align(queue->csa_va) != va) + return false; + + return true; +} + +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_vm *vm, + uint64_t va) +{ + struct amdgpu_fpriv *fpriv = vm_to_fpriv(vm); + struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; + + if (vm->is_userq_context && &fpriv->vm == vm) { + struct amdgpu_usermode_queue *queue; + int queue_id, r = 0; + + if (mutex_trylock(&uq_mgr->userq_mutex)) { + /* If here the userq bo is busy and needs to deactivate and prevent reusing it.*/ + idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + struct dma_fence *f = queue->last_fence; + + + if (!amdgpu_userq_gem_va_unmap_queue_retrieve(queue, va)) { + dev_dbg(uq_mgr->adev->dev, "queue(id:%d) not belond to vm:%p\n", + queue_id,vm); + continue; + } + + if (f && !dma_fence_is_signaled(f)) { + + dev_warn(uq_mgr->adev->dev, "try to unmap the busy queue(id:%d):%p under vm:%p\n", + queue_id, queue, vm); + /* Need to set a resonable state for avoiding reusing this queue*/ + queue->state = AMDGPU_USERQ_STATE_HUNG; + r++; + } + } + mutex_unlock(&uq_mgr->userq_mutex); + return r; + } else { + /* do we need a try lock again before return*/ + return -EBUSY; + } + + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index 194ec7a6b3b2..08c49d738ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -31,6 +31,7 @@ #define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base) #define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr) #define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name) +#define vm_to_fpriv(v) container_of(v, struct amdgpu_fpriv, vm) enum amdgpu_userq_state { AMDGPU_USERQ_STATE_UNMAPPED = 0, @@ -148,4 +149,6 @@ bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm, int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr); int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm, struct amdgpu_usermode_queue *queue); +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_vm *vm, + uint64_t va); #endif -- 2.34.1
