Module: Mesa
Branch: main
Commit: bbd64747e22495a005adf36f6d592b893013f311
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=bbd64747e22495a005adf36f6d592b893013f311

Author: Marek Olšák <[email protected]>
Date:   Sun Dec  3 04:33:56 2023 -0500

winsys/amdgpu: move lock from amdgpu_winsys_bo into sparse and real BOs

The slab BO doesn't use it.

Reviewed-by: Yogesh Mohan Marimuthu <[email protected]>
Reviewed-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26547>

---

 src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 35 ++++++++++++++-----------------
 src/gallium/winsys/amdgpu/drm/amdgpu_bo.h |  5 +++--
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c |  8 +++----
 3 files changed, 23 insertions(+), 25 deletions(-)

diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 998d5559e21..ecaea7b161d 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -210,7 +210,7 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct 
pb_buffer *_buf)
    else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
       ws->allocated_gtt -= align64(bo->b.base.size, ws->info.gart_page_size);
 
-   simple_mtx_destroy(&bo->b.lock);
+   simple_mtx_destroy(&bo->lock);
    FREE(bo);
 }
 
@@ -375,18 +375,18 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
    } else {
       cpu = p_atomic_read(&real->cpu_ptr);
       if (!cpu) {
-         simple_mtx_lock(&real->b.lock);
+         simple_mtx_lock(&real->lock);
          /* Must re-check due to the possibility of a race. Re-check need not
           * be atomic thanks to the lock. */
          cpu = real->cpu_ptr;
          if (!cpu) {
             if (!amdgpu_bo_do_map(rws, real, &cpu)) {
-               simple_mtx_unlock(&real->b.lock);
+               simple_mtx_unlock(&real->lock);
                return NULL;
             }
             p_atomic_set(&real->cpu_ptr, cpu);
          }
-         simple_mtx_unlock(&real->b.lock);
+         simple_mtx_unlock(&real->lock);
       }
    }
 
@@ -577,7 +577,7 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct 
amdgpu_winsys *ws,
          goto error_va_map;
    }
 
-   simple_mtx_init(&bo->b.lock, mtx_plain);
+   simple_mtx_init(&bo->lock, mtx_plain);
    pipe_reference_init(&bo->b.base.reference, 1);
    bo->b.base.placement = initial_domain;
    bo->b.base.alignment_log2 = util_logbase2(alignment);
@@ -763,7 +763,6 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned 
heap, unsigned entry_s
       bo->b.type = AMDGPU_BO_SLAB;
       bo->b.va = slab->buffer->va + i * entry_size;
       bo->b.unique_id = base_id + i;
-      simple_mtx_init(&bo->b.lock, mtx_plain);
 
       if (is_real_bo(slab->buffer)) {
          /* The slab is not suballocated. */
@@ -806,10 +805,8 @@ void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct 
pb_slab *pslab)
    else
       ws->slab_wasted_gtt -= slab_size - slab->base.num_entries * 
slab->entry_size;
 
-   for (unsigned i = 0; i < slab->base.num_entries; ++i) {
+   for (unsigned i = 0; i < slab->base.num_entries; ++i)
       amdgpu_bo_remove_fences(&slab->entries[i].b);
-      simple_mtx_destroy(&slab->entries[i].b.lock);
-   }
 
    FREE(slab->entries);
    amdgpu_winsys_bo_reference(ws, &slab->buffer, NULL);
@@ -1069,7 +1066,7 @@ static void amdgpu_bo_sparse_destroy(struct radeon_winsys 
*rws, struct pb_buffer
 
    amdgpu_va_range_free(bo->va_handle);
    FREE(bo->commitments);
-   simple_mtx_destroy(&bo->b.lock);
+   simple_mtx_destroy(&bo->lock);
    FREE(bo);
 }
 
@@ -1100,7 +1097,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, 
uint64_t size,
    if (!bo)
       return NULL;
 
-   simple_mtx_init(&bo->b.lock, mtx_plain);
+   simple_mtx_init(&bo->lock, mtx_plain);
    pipe_reference_init(&bo->b.base.reference, 1);
    bo->b.base.placement = domain;
    bo->b.base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
@@ -1139,7 +1136,7 @@ error_va_map:
 error_va_alloc:
    FREE(bo->commitments);
 error_alloc_commitments:
-   simple_mtx_destroy(&bo->b.lock);
+   simple_mtx_destroy(&bo->lock);
    FREE(bo);
    return NULL;
 }
@@ -1164,7 +1161,7 @@ amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct 
pb_buffer *buf,
    va_page = offset / RADEON_SPARSE_PAGE_SIZE;
    end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
 
-   simple_mtx_lock(&bo->b.lock);
+   simple_mtx_lock(&bo->lock);
 
 #if DEBUG_SPARSE_COMMITS
    sparse_dump(bo, __func__);
@@ -1268,7 +1265,7 @@ amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct 
pb_buffer *buf,
    }
 out:
 
-   simple_mtx_unlock(&bo->b.lock);
+   simple_mtx_unlock(&bo->lock);
 
    return ok;
 }
@@ -1293,7 +1290,7 @@ amdgpu_bo_find_next_committed_memory(struct pb_buffer 
*buf,
    start_va_page = va_page = range_offset / RADEON_SPARSE_PAGE_SIZE;
    end_va_page = (*range_size + range_offset) / RADEON_SPARSE_PAGE_SIZE;
 
-   simple_mtx_lock(&bo->b.lock);
+   simple_mtx_lock(&bo->lock);
    /* Lookup the first committed page with backing physical storage */
    while (va_page < end_va_page && !comm[va_page].backing)
       va_page++;
@@ -1302,7 +1299,7 @@ amdgpu_bo_find_next_committed_memory(struct pb_buffer 
*buf,
    if (va_page == end_va_page && !comm[va_page].backing) {
       uncommitted_range_prev = *range_size;
       *range_size = 0;
-      simple_mtx_unlock(&bo->b.lock);
+      simple_mtx_unlock(&bo->lock);
       return uncommitted_range_prev;
    }
 
@@ -1310,7 +1307,7 @@ amdgpu_bo_find_next_committed_memory(struct pb_buffer 
*buf,
    span_va_page = va_page;
    while (va_page < end_va_page && comm[va_page].backing)
       va_page++;
-   simple_mtx_unlock(&bo->b.lock);
+   simple_mtx_unlock(&bo->lock);
 
    /* Calc byte count that need to skip before committed range */
    if (span_va_page != start_va_page)
@@ -1589,7 +1586,7 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct 
radeon_winsys *rws,
    bo->b.type = AMDGPU_BO_REAL;
    bo->b.va = va;
    bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
-   simple_mtx_init(&bo->b.lock, mtx_plain);
+   simple_mtx_init(&bo->lock, mtx_plain);
    bo->bo = result.buf_handle;
    bo->va_handle = va_handle;
    bo->is_shared = true;
@@ -1744,7 +1741,7 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct 
radeon_winsys *rws,
     bo->b.type = AMDGPU_BO_REAL;
     bo->b.va = va;
     bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
-    simple_mtx_init(&bo->b.lock, mtx_plain);
+    simple_mtx_init(&bo->lock, mtx_plain);
     bo->bo = buf_handle;
     bo->cpu_ptr = pointer;
     bo->va_handle = va_handle;
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h 
b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
index 10a8363c578..cf5a160c161 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h
@@ -46,9 +46,8 @@ struct amdgpu_winsys_bo {
    struct pb_buffer base;
    enum amdgpu_bo_type type;
 
-   uint64_t va;
    uint32_t unique_id;
-   simple_mtx_t lock;
+   uint64_t va;
 
    /* how many command streams, which are being emitted in a separate
     * thread, is this bo referenced in? */
@@ -71,6 +70,7 @@ struct amdgpu_bo_real {
 #if DEBUG
    struct list_head global_list_item;
 #endif
+   simple_mtx_t lock;
 
    bool is_user_ptr;
 
@@ -91,6 +91,7 @@ struct amdgpu_bo_sparse {
 
    uint32_t num_va_pages;
    uint32_t num_backing_pages;
+   simple_mtx_t lock;
 
    struct list_head backing;
 
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 03ee5807d2b..1570b5f702e 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -750,7 +750,7 @@ static int amdgpu_lookup_or_add_sparse_buffer(struct 
radeon_cmdbuf *rcs,
    /* We delay adding the backing buffers until we really have to. However,
     * we cannot delay accounting for memory use.
     */
-   simple_mtx_lock(&bo->lock);
+   simple_mtx_lock(&get_sparse_bo(bo)->lock);
 
    list_for_each_entry(struct amdgpu_sparse_backing, backing, 
&get_sparse_bo(bo)->backing, list) {
       if (bo->base.placement & RADEON_DOMAIN_VRAM)
@@ -759,7 +759,7 @@ static int amdgpu_lookup_or_add_sparse_buffer(struct 
radeon_cmdbuf *rcs,
          rcs->used_gart_kb += backing->bo->b.base.size / 1024;
    }
 
-   simple_mtx_unlock(&bo->lock);
+   simple_mtx_unlock(&get_sparse_bo(bo)->lock);
 
    return idx;
 }
@@ -1461,11 +1461,11 @@ static bool amdgpu_add_sparse_backing_buffers(struct 
amdgpu_cs_context *cs)
 {
    for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
       struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
-      struct amdgpu_winsys_bo *bo = buffer->bo;
+      struct amdgpu_bo_sparse *bo = get_sparse_bo(buffer->bo);
 
       simple_mtx_lock(&bo->lock);
 
-      list_for_each_entry(struct amdgpu_sparse_backing, backing, 
&get_sparse_bo(bo)->backing, list) {
+      list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->backing, 
list) {
          /* We can directly add the buffer here, because we know that each
           * backing buffer occurs only once.
           */

Reply via email to