From: Nicolai Hähnle <nicolai.haeh...@amd.com>

Sparse buffers can never be mapped by the CPU.
---
 src/gallium/drivers/radeon/r600_buffer_common.c | 34 +++++++++++++++++--------
 1 file changed, 24 insertions(+), 10 deletions(-)

diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c 
b/src/gallium/drivers/radeon/r600_buffer_common.c
index a1e8228..10036af 100644
--- a/src/gallium/drivers/radeon/r600_buffer_common.c
+++ b/src/gallium/drivers/radeon/r600_buffer_common.c
@@ -44,20 +44,22 @@ bool r600_rings_is_buffer_referenced(struct 
r600_common_context *ctx,
        return false;
 }
 
 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
                                       struct r600_resource *resource,
                                       unsigned usage)
 {
        enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
        bool busy = false;
 
+       assert(!(resource->flags & RADEON_FLAG_SPARSE));
+
        if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
                return ctx->ws->buffer_map(resource->buf, NULL, usage);
        }
 
        if (!(usage & PIPE_TRANSFER_WRITE)) {
                /* have to wait for the last write */
                rusage = RADEON_USAGE_WRITE;
        }
 
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
@@ -242,20 +244,24 @@ static void r600_buffer_destroy(struct pipe_screen 
*screen,
 }
 
 static bool
 r600_invalidate_buffer(struct r600_common_context *rctx,
                       struct r600_resource *rbuffer)
 {
        /* Shared buffers can't be reallocated. */
        if (rbuffer->is_shared)
                return false;
 
+       /* Sparse buffers can't be reallocated. */
+       if (rbuffer->flags & RADEON_FLAG_SPARSE)
+               return false;
+
        /* In AMD_pinned_memory, the user pointer association only gets
         * broken when the buffer is explicitly re-allocated.
         */
        if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
                return false;
 
        /* Check if mapping this buffer would cause waiting for the GPU. */
        if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, 
RADEON_USAGE_READWRITE) ||
            !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
                rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
@@ -348,55 +354,61 @@ static void *r600_buffer_transfer_map(struct pipe_context 
*ctx,
                if (r600_invalidate_buffer(rctx, rbuffer)) {
                        /* At this point, the buffer is always idle. */
                        usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
                } else {
                        /* Fall back to a temporary buffer. */
                        usage |= PIPE_TRANSFER_DISCARD_RANGE;
                }
        }
 
        if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
-           !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
-                      PIPE_TRANSFER_PERSISTENT)) &&
            !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
-           r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) {
+           ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
+                        PIPE_TRANSFER_PERSISTENT)) &&
+             r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
+            (rbuffer->flags & RADEON_FLAG_SPARSE))) {
                assert(usage & PIPE_TRANSFER_WRITE);
 
-               /* Check if mapping this buffer would cause waiting for the 
GPU. */
-               if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, 
RADEON_USAGE_READWRITE) ||
+               /* Check if mapping this buffer would cause waiting for the GPU.
+                */
+               if (rbuffer->flags & RADEON_FLAG_SPARSE ||
+                   r600_rings_is_buffer_referenced(rctx, rbuffer->buf, 
RADEON_USAGE_READWRITE) ||
                    !rctx->ws->buffer_wait(rbuffer->buf, 0, 
RADEON_USAGE_READWRITE)) {
                        /* Do a wait-free write-only transfer using a temporary 
buffer. */
                        unsigned offset;
                        struct r600_resource *staging = NULL;
 
                        u_upload_alloc(ctx->stream_uploader, 0,
                                        box->width + (box->x % 
R600_MAP_BUFFER_ALIGNMENT),
                                       rctx->screen->info.tcc_cache_line_size,
                                       &offset, (struct 
pipe_resource**)&staging,
                                        (void**)&data);
 
                        if (staging) {
                                data += box->x % R600_MAP_BUFFER_ALIGNMENT;
                                return r600_buffer_get_transfer(ctx, resource, 
usage, box,
                                                                ptransfer, 
data, staging, offset);
+                       } else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
+                               return NULL;
                        }
                } else {
                        /* At this point, the buffer is always idle (we checked 
it above). */
                        usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
                }
        }
        /* Use a staging buffer in cached GTT for reads. */
-       else if ((usage & PIPE_TRANSFER_READ) &&
-                !(usage & PIPE_TRANSFER_PERSISTENT) &&
-                (rbuffer->domains & RADEON_DOMAIN_VRAM ||
-                 rbuffer->flags & RADEON_FLAG_GTT_WC) &&
-                r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) {
+       else if (((usage & PIPE_TRANSFER_READ) &&
+                 !(usage & PIPE_TRANSFER_PERSISTENT) &&
+                 (rbuffer->domains & RADEON_DOMAIN_VRAM ||
+                  rbuffer->flags & RADEON_FLAG_GTT_WC) &&
+                 r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
+                (rbuffer->flags & RADEON_FLAG_SPARSE)) {
                struct r600_resource *staging;
 
                staging = (struct r600_resource*) pipe_buffer_create(
                                ctx->screen, 0, PIPE_USAGE_STAGING,
                                box->width + (box->x % 
R600_MAP_BUFFER_ALIGNMENT));
                if (staging) {
                        /* Copy the VRAM buffer to the staging buffer. */
                        rctx->dma_copy(ctx, &staging->b.b, 0,
                                       box->x % R600_MAP_BUFFER_ALIGNMENT,
                                       0, 0, resource, 0, box);
@@ -404,20 +416,22 @@ static void *r600_buffer_transfer_map(struct pipe_context 
*ctx,
                        data = r600_buffer_map_sync_with_rings(rctx, staging,
                                                               usage & 
~PIPE_TRANSFER_UNSYNCHRONIZED);
                        if (!data) {
                                r600_resource_reference(&staging, NULL);
                                return NULL;
                        }
                        data += box->x % R600_MAP_BUFFER_ALIGNMENT;
 
                        return r600_buffer_get_transfer(ctx, resource, usage, 
box,
                                                        ptransfer, data, 
staging, 0);
+               } else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
+                       return NULL;
                }
        }
 
        data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
        if (!data) {
                return NULL;
        }
        data += box->x;
 
        return r600_buffer_get_transfer(ctx, resource, usage, box,
-- 
2.9.3

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to