In case the fill job depends on a previous fence, the caller can
now pass it to make sure the ordering of the jobs is correct.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 22 ++++++++++++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |  1 +
 3 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e7b2cae031b3..be3532134e46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1322,7 +1322,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object 
*bo)
                goto out;
 
        r = amdgpu_fill_buffer(&adev->mman.clear_entities[0], abo, 0, 
&bo->base._resv,
-                              &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+                              &fence, NULL, 
AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
        if (WARN_ON(r))
                goto out;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e1f0567fd2d5..b13f0993dbf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -173,6 +173,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  * @tmz: if we should setup a TMZ enabled mapping
  * @size: in number of bytes to map, out number of bytes mapped
  * @addr: resulting address inside the MC address space
+ * @dep: optional dependency
  *
  * Setup one of the GART windows to access a specific piece of memory or return
  * the physical address for local memory.
@@ -182,7 +183,8 @@ static int amdgpu_ttm_map_buffer(struct drm_sched_entity 
*entity,
                                 struct ttm_resource *mem,
                                 struct amdgpu_res_cursor *mm_cur,
                                 unsigned int window, struct amdgpu_ring *ring,
-                                bool tmz, uint64_t *size, uint64_t *addr)
+                                bool tmz, uint64_t *size, uint64_t *addr,
+                                struct dma_fence *dep)
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned int offset, num_pages, num_dw, num_bytes;
@@ -234,6 +236,9 @@ static int amdgpu_ttm_map_buffer(struct drm_sched_entity 
*entity,
        if (r)
                return r;
 
+       if (dep)
+               drm_sched_job_add_dependency(&job->base, dma_fence_get(dep));
+
        src_addr = num_dw * 4;
        src_addr += job->ibs[0].gpu_addr;
 
@@ -326,13 +331,15 @@ static int amdgpu_ttm_copy_mem_to_mem(struct 
amdgpu_device *adev,
                /* Map src to window 0 and dst to window 1. */
                r = amdgpu_ttm_map_buffer(&entity->base,
                                          src->bo, src->mem, &src_mm,
-                                         entity->gart_window_id0, ring, tmz, 
&cur_size, &from);
+                                         entity->gart_window_id0, ring, tmz, 
&cur_size, &from,
+                                         NULL);
                if (r)
                        goto error;
 
                r = amdgpu_ttm_map_buffer(&entity->base,
                                          dst->bo, dst->mem, &dst_mm,
-                                         entity->gart_window_id1, ring, tmz, 
&cur_size, &to);
+                                         entity->gart_window_id1, ring, tmz, 
&cur_size, &to,
+                                         NULL);
                if (r)
                        goto error;
 
@@ -415,7 +422,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                struct dma_fence *wipe_fence = NULL;
 
                r = amdgpu_fill_buffer(&adev->mman.move_entities[0],
-                                      abo, 0, NULL, &wipe_fence,
+                                      abo, 0, NULL, &wipe_fence, fence,
                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
@@ -2443,7 +2450,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
 
                r = amdgpu_ttm_map_buffer(&entity->base,
                                          &bo->tbo, bo->tbo.resource, &cursor,
-                                         entity->gart_window_id1, ring, false, 
&size, &addr);
+                                         entity->gart_window_id1, ring, false, 
&size, &addr,
+                                         NULL);
                if (r)
                        goto err;
 
@@ -2469,6 +2477,7 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity 
*entity,
                       uint32_t src_data,
                       struct dma_resv *resv,
                       struct dma_fence **f,
+                      struct dma_fence *dependency,
                       u64 k_job_id)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -2496,7 +2505,8 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity 
*entity,
                r = amdgpu_ttm_map_buffer(&entity->base,
                                          &bo->tbo, bo->tbo.resource, &dst,
                                          entity->gart_window_id1, ring, false,
-                                         &cur_size, &to);
+                                         &cur_size, &to,
+                                         dependency);
                if (r)
                        goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9d4891e86675..e8f8165f5bcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -186,6 +186,7 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity 
*entity,
                       uint32_t src_data,
                       struct dma_resv *resv,
                       struct dma_fence **f,
+                      struct dma_fence *dependency,
                       u64 k_job_id);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
-- 
2.43.0

Reply via email to