Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async unmapping is in progress. Do it in preparation to improvement of virtio-gpu resetting that will require this change.
Suggested-by: Akihiko Odaki <[email protected]> Signed-off-by: Dmitry Osipenko <[email protected]> --- hw/display/trace-events | 2 +- hw/display/virtio-gpu-virgl.c | 28 ++++++++++++++++++++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/hw/display/trace-events b/hw/display/trace-events index e323a82cff24..4bfc457fbac1 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h) virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d" virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64 virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res 0x%x, vmr %p, mr %p" -virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d" +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int mapping_state) "res 0x%x, mr %p, mapping_state %d" virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x" virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x" virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x" diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 77fe5cd4ecd8..8cbbe890d282 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -68,11 +68,17 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie) #endif #if VIRGL_VERSION_MAJOR >= 1 +enum virtio_gpu_virgl_hostmem_region_mapping_state { + VIRTIO_GPU_MR_MAPPED, + VIRTIO_GPU_MR_UNMAP_STARTED, + VIRTIO_GPU_MR_UNMAP_COMPLETED, +}; + struct virtio_gpu_virgl_hostmem_region { Object parent_obj; MemoryRegion mr; struct VirtIOGPU *g; - bool finish_unmapping; + enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state; }; #define TYPE_VIRTIO_GPU_VIRGL_HOSTMEM_REGION "virtio-gpu-virgl-hostmem-region" @@ -109,7 +115,7 @@ static void virtio_gpu_virgl_hostmem_region_finalize(Object *obj) return; } - vmr->finish_unmapping = true; + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED; b = VIRTIO_GPU_BASE(vmr->g); b->renderer_blocked--; @@ -167,6 +173,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, object_initialize_child(OBJECT(g), name, vmr, TYPE_VIRTIO_GPU_VIRGL_HOSTMEM_REGION); vmr->g = g; + vmr->mapping_state = VIRTIO_GPU_MR_MAPPED; mr = &vmr->mr; memory_region_init_ram_ptr(mr, OBJECT(vmr), "mr", size, data); @@ -195,7 +202,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, vmr = to_hostmem_region(res->mr); - trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, vmr->finish_unmapping); + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, + vmr->mapping_state); /* * Perform async unmapping in 3 steps: @@ -206,7 +214,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, * asynchronously by virtio_gpu_virgl_hostmem_region_finalize(). * 3. Finish the unmapping with final virgl_renderer_resource_unmap(). */ - if (vmr->finish_unmapping) { + switch (vmr->mapping_state) { + case VIRTIO_GPU_MR_UNMAP_COMPLETED: res->mr = NULL; g_free(vmr); @@ -217,15 +226,22 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, __func__, strerror(-ret)); return ret; } - } else { - *cmd_suspended = true; + break; + case VIRTIO_GPU_MR_MAPPED: /* render will be unblocked once MR is freed */ b->renderer_blocked++; + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED; + /* memory region owns self res->mr object and frees it by itself */ memory_region_del_subregion(&b->hostmem, mr); object_unparent(OBJECT(vmr)); + + /* Fallthrough */ + case VIRTIO_GPU_MR_UNMAP_STARTED: + *cmd_suspended = true; + break; } return 0; -- 2.52.0
