From: Marek Olšák <[email protected]>

---
 src/gallium/drivers/radeonsi/cik_sdma.c  | 12 ++++-----
 src/gallium/drivers/radeonsi/si_buffer.c | 32 ++++++++++++------------
 src/gallium/drivers/radeonsi/si_cp_dma.c | 16 ++++++------
 src/gallium/drivers/radeonsi/si_dma.c    | 12 ++++-----
 src/gallium/drivers/radeonsi/si_dma_cs.c | 10 ++++----
 src/gallium/drivers/radeonsi/si_fence.c  | 20 +++++++--------
 6 files changed, 51 insertions(+), 51 deletions(-)

diff --git a/src/gallium/drivers/radeonsi/cik_sdma.c 
b/src/gallium/drivers/radeonsi/cik_sdma.c
index 8bf6b30aec7..096f75e508f 100644
--- a/src/gallium/drivers/radeonsi/cik_sdma.c
+++ b/src/gallium/drivers/radeonsi/cik_sdma.c
@@ -28,34 +28,34 @@
 
 static void cik_sdma_copy_buffer(struct si_context *ctx,
                                 struct pipe_resource *dst,
                                 struct pipe_resource *src,
                                 uint64_t dst_offset,
                                 uint64_t src_offset,
                                 uint64_t size)
 {
        struct radeon_cmdbuf *cs = ctx->dma_cs;
        unsigned i, ncopy, csize;
-       struct si_resource *rdst = si_resource(dst);
-       struct si_resource *rsrc = si_resource(src);
+       struct si_resource *sdst = si_resource(dst);
+       struct si_resource *ssrc = si_resource(src);
 
        /* Mark the buffer range of destination as valid (initialized),
         * so that transfer_map knows it should wait for the GPU when mapping
         * that range. */
-       util_range_add(&rdst->valid_buffer_range, dst_offset,
+       util_range_add(&sdst->valid_buffer_range, dst_offset,
                       dst_offset + size);
 
-       dst_offset += rdst->gpu_address;
-       src_offset += rsrc->gpu_address;
+       dst_offset += sdst->gpu_address;
+       src_offset += ssrc->gpu_address;
 
        ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-       si_need_dma_space(ctx, ncopy * 7, rdst, rsrc);
+       si_need_dma_space(ctx, ncopy * 7, sdst, ssrc);
 
        for (i = 0; i < ncopy; i++) {
                csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
                radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
                                                CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
                                                0));
                radeon_emit(cs, ctx->chip_class >= GFX9 ? csize - 1 : csize);
                radeon_emit(cs, 0); /* src/dst endian swap */
                radeon_emit(cs, src_offset);
                radeon_emit(cs, src_offset >> 32);
diff --git a/src/gallium/drivers/radeonsi/si_buffer.c 
b/src/gallium/drivers/radeonsi/si_buffer.c
index f84dae79102..5c93eacc4b1 100644
--- a/src/gallium/drivers/radeonsi/si_buffer.c
+++ b/src/gallium/drivers/radeonsi/si_buffer.c
@@ -296,36 +296,36 @@ si_invalidate_buffer(struct si_context *sctx,
 
        return true;
 }
 
 /* Replace the storage of dst with src. */
 void si_replace_buffer_storage(struct pipe_context *ctx,
                                 struct pipe_resource *dst,
                                 struct pipe_resource *src)
 {
        struct si_context *sctx = (struct si_context*)ctx;
-       struct si_resource *rdst = si_resource(dst);
-       struct si_resource *rsrc = si_resource(src);
-       uint64_t old_gpu_address = rdst->gpu_address;
-
-       pb_reference(&rdst->buf, rsrc->buf);
-       rdst->gpu_address = rsrc->gpu_address;
-       rdst->b.b.bind = rsrc->b.b.bind;
-       rdst->b.max_forced_staging_uploads = rsrc->b.max_forced_staging_uploads;
-       rdst->max_forced_staging_uploads = rsrc->max_forced_staging_uploads;
-       rdst->flags = rsrc->flags;
-
-       assert(rdst->vram_usage == rsrc->vram_usage);
-       assert(rdst->gart_usage == rsrc->gart_usage);
-       assert(rdst->bo_size == rsrc->bo_size);
-       assert(rdst->bo_alignment == rsrc->bo_alignment);
-       assert(rdst->domains == rsrc->domains);
+       struct si_resource *sdst = si_resource(dst);
+       struct si_resource *ssrc = si_resource(src);
+       uint64_t old_gpu_address = sdst->gpu_address;
+
+       pb_reference(&sdst->buf, ssrc->buf);
+       sdst->gpu_address = ssrc->gpu_address;
+       sdst->b.b.bind = ssrc->b.b.bind;
+       sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads;
+       sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads;
+       sdst->flags = ssrc->flags;
+
+       assert(sdst->vram_usage == ssrc->vram_usage);
+       assert(sdst->gart_usage == ssrc->gart_usage);
+       assert(sdst->bo_size == ssrc->bo_size);
+       assert(sdst->bo_alignment == ssrc->bo_alignment);
+       assert(sdst->domains == ssrc->domains);
 
        si_rebind_buffer(sctx, dst, old_gpu_address);
 }
 
 static void si_invalidate_resource(struct pipe_context *ctx,
                                   struct pipe_resource *resource)
 {
        struct si_context *sctx = (struct si_context*)ctx;
        struct si_resource *rbuffer = si_resource(resource);
 
diff --git a/src/gallium/drivers/radeonsi/si_cp_dma.c 
b/src/gallium/drivers/radeonsi/si_cp_dma.c
index 3001353df27..5993369d2da 100644
--- a/src/gallium/drivers/radeonsi/si_cp_dma.c
+++ b/src/gallium/drivers/radeonsi/si_cp_dma.c
@@ -205,55 +205,55 @@ static void si_cp_dma_prepare(struct si_context *sctx, 
struct pipe_resource *dst
                if (coher == SI_COHERENCY_SHADER)
                        *packet_flags |= CP_DMA_PFP_SYNC_ME;
        }
 }
 
 void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
                            struct pipe_resource *dst, uint64_t offset,
                            uint64_t size, unsigned value, unsigned user_flags,
                            enum si_coherency coher, enum si_cache_policy 
cache_policy)
 {
-       struct si_resource *rdst = si_resource(dst);
-       uint64_t va = (rdst ? rdst->gpu_address : 0) + offset;
+       struct si_resource *sdst = si_resource(dst);
+       uint64_t va = (sdst ? sdst->gpu_address : 0) + offset;
        bool is_first = true;
 
        assert(size && size % 4 == 0);
 
        /* Mark the buffer range of destination as valid (initialized),
         * so that transfer_map knows it should wait for the GPU when mapping
         * that range. */
-       if (rdst)
-               util_range_add(&rdst->valid_buffer_range, offset, offset + 
size);
+       if (sdst)
+               util_range_add(&sdst->valid_buffer_range, offset, offset + 
size);
 
        /* Flush the caches. */
-       if (rdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
+       if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
                sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
                               SI_CONTEXT_CS_PARTIAL_FLUSH |
                               si_get_flush_flags(sctx, coher, cache_policy);
        }
 
        while (size) {
                unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
-               unsigned dma_flags = CP_DMA_CLEAR | (rdst ? 0 : 
CP_DMA_DST_IS_GDS);
+               unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : 
CP_DMA_DST_IS_GDS);
 
                si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, user_flags,
                                  coher, &is_first, &dma_flags);
 
                /* Emit the clear packet. */
                si_emit_cp_dma(sctx, cs, va, value, byte_count, dma_flags, 
cache_policy);
 
                size -= byte_count;
                va += byte_count;
        }
 
-       if (rdst && cache_policy != L2_BYPASS)
-               rdst->TC_L2_dirty = true;
+       if (sdst && cache_policy != L2_BYPASS)
+               sdst->TC_L2_dirty = true;
 
        /* If it's not a framebuffer fast clear... */
        if (coher == SI_COHERENCY_SHADER)
                sctx->num_cp_dma_calls++;
 }
 
 /**
  * Realign the CP DMA engine. This must be done after a copy with an unaligned
  * size.
  *
diff --git a/src/gallium/drivers/radeonsi/si_dma.c 
b/src/gallium/drivers/radeonsi/si_dma.c
index d0f77566d25..450ed82b4d6 100644
--- a/src/gallium/drivers/radeonsi/si_dma.c
+++ b/src/gallium/drivers/radeonsi/si_dma.c
@@ -30,45 +30,45 @@
 
 static void si_dma_copy_buffer(struct si_context *ctx,
                                struct pipe_resource *dst,
                                struct pipe_resource *src,
                                uint64_t dst_offset,
                                uint64_t src_offset,
                                uint64_t size)
 {
        struct radeon_cmdbuf *cs = ctx->dma_cs;
        unsigned i, ncopy, count, max_size, sub_cmd, shift;
-       struct si_resource *rdst = si_resource(dst);
-       struct si_resource *rsrc = si_resource(src);
+       struct si_resource *sdst = si_resource(dst);
+       struct si_resource *ssrc = si_resource(src);
 
        /* Mark the buffer range of destination as valid (initialized),
         * so that transfer_map knows it should wait for the GPU when mapping
         * that range. */
-       util_range_add(&rdst->valid_buffer_range, dst_offset,
+       util_range_add(&sdst->valid_buffer_range, dst_offset,
                       dst_offset + size);
 
-       dst_offset += rdst->gpu_address;
-       src_offset += rsrc->gpu_address;
+       dst_offset += sdst->gpu_address;
+       src_offset += ssrc->gpu_address;
 
        /* see whether we should use the dword-aligned or byte-aligned copy */
        if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
                sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
                shift = 2;
                max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
        } else {
                sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
                shift = 0;
                max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
        }
 
        ncopy = DIV_ROUND_UP(size, max_size);
-       si_need_dma_space(ctx, ncopy * 5, rdst, rsrc);
+       si_need_dma_space(ctx, ncopy * 5, sdst, ssrc);
 
        for (i = 0; i < ncopy; i++) {
                count = MIN2(size, max_size);
                radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd,
                                              count >> shift));
                radeon_emit(cs, dst_offset);
                radeon_emit(cs, src_offset);
                radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
                radeon_emit(cs, (src_offset >> 32UL) & 0xff);
                dst_offset += count;
diff --git a/src/gallium/drivers/radeonsi/si_dma_cs.c 
b/src/gallium/drivers/radeonsi/si_dma_cs.c
index 61b9be7742b..33177a9e4ad 100644
--- a/src/gallium/drivers/radeonsi/si_dma_cs.c
+++ b/src/gallium/drivers/radeonsi/si_dma_cs.c
@@ -62,60 +62,60 @@ void si_dma_emit_timestamp(struct si_context *sctx, struct 
si_resource *dst,
                                        0));
        radeon_emit(cs, va);
        radeon_emit(cs, va >> 32);
 }
 
 void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
                          uint64_t offset, uint64_t size, unsigned clear_value)
 {
        struct radeon_cmdbuf *cs = sctx->dma_cs;
        unsigned i, ncopy, csize;
-       struct si_resource *rdst = si_resource(dst);
+       struct si_resource *sdst = si_resource(dst);
 
        assert(offset % 4 == 0);
        assert(size);
        assert(size % 4 == 0);
 
        if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
                sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 
4);
                return;
        }
 
        /* Mark the buffer range of destination as valid (initialized),
         * so that transfer_map knows it should wait for the GPU when mapping
         * that range. */
-       util_range_add(&rdst->valid_buffer_range, offset, offset + size);
+       util_range_add(&sdst->valid_buffer_range, offset, offset + size);
 
-       offset += rdst->gpu_address;
+       offset += sdst->gpu_address;
 
        if (sctx->chip_class == SI) {
                /* the same maximum size as for copying */
                ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
-               si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
+               si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
 
                for (i = 0; i < ncopy; i++) {
                        csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
                        radeon_emit(cs, 
SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL, 0,
                                                      csize / 4));
                        radeon_emit(cs, offset);
                        radeon_emit(cs, clear_value);
                        radeon_emit(cs, (offset >> 32) << 16);
                        offset += csize;
                        size -= csize;
                }
                return;
        }
 
        /* The following code is for CI, VI, Vega/Raven, etc. */
        /* the same maximum size as for copying */
        ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-       si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
+       si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
 
        for (i = 0; i < ncopy; i++) {
                csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
                radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 
0,
                                                0x8000 /* dword copy */));
                radeon_emit(cs, offset);
                radeon_emit(cs, offset >> 32);
                radeon_emit(cs, clear_value);
                radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
                offset += csize;
diff --git a/src/gallium/drivers/radeonsi/si_fence.c 
b/src/gallium/drivers/radeonsi/si_fence.c
index 7b4271a1e98..bb53ccba947 100644
--- a/src/gallium/drivers/radeonsi/si_fence.c
+++ b/src/gallium/drivers/radeonsi/si_fence.c
@@ -186,31 +186,31 @@ static void si_add_syncobj_signal(struct si_context *sctx,
                                  struct pipe_fence_handle *fence)
 {
        sctx->ws->cs_add_syncobj_signal(sctx->gfx_cs, fence);
 }
 
 static void si_fence_reference(struct pipe_screen *screen,
                               struct pipe_fence_handle **dst,
                               struct pipe_fence_handle *src)
 {
        struct radeon_winsys *ws = ((struct si_screen*)screen)->ws;
-       struct si_multi_fence **rdst = (struct si_multi_fence **)dst;
-       struct si_multi_fence *rsrc = (struct si_multi_fence *)src;
-
-       if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
-               ws->fence_reference(&(*rdst)->gfx, NULL);
-               ws->fence_reference(&(*rdst)->sdma, NULL);
-               tc_unflushed_batch_token_reference(&(*rdst)->tc_token, NULL);
-               si_resource_reference(&(*rdst)->fine.buf, NULL);
-               FREE(*rdst);
+       struct si_multi_fence **sdst = (struct si_multi_fence **)dst;
+       struct si_multi_fence *ssrc = (struct si_multi_fence *)src;
+
+       if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) {
+               ws->fence_reference(&(*sdst)->gfx, NULL);
+               ws->fence_reference(&(*sdst)->sdma, NULL);
+               tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL);
+               si_resource_reference(&(*sdst)->fine.buf, NULL);
+               FREE(*sdst);
        }
-        *rdst = rsrc;
+        *sdst = ssrc;
 }
 
 static struct si_multi_fence *si_create_multi_fence()
 {
        struct si_multi_fence *fence = CALLOC_STRUCT(si_multi_fence);
        if (!fence)
                return NULL;
 
        pipe_reference_init(&fence->reference, 1);
        util_queue_fence_init(&fence->ready);
-- 
2.17.1

_______________________________________________
mesa-dev mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to