On 24.09.25 21:47, dongwon....@intel.com wrote:
From: Dongwon Kim <dongwon....@intel.com>

When the host KVM/QEMU resumes from hibernation, it loses all graphics
resources previously submitted by the guest OS, as the QEMU process is
terminated during the suspend-resume cycle. This leads to invalid resource
errors when the guest OS attempts to interact with the host using those
resources after resumption.

To resolve this, the virtio-gpu driver now tracks all active virtio_gpu_objects
and provides a mechanism to restore them by re-submitting the objects to QEMU
when needed (e.g., during resume from hibernation).

v2: - Attach backing is done if bo->attached was set before

v3: - Restoration of virtio-gpu resources is no longer triggered via .restore;
       instead, it is handled by a PM notifier only during hibernation.

Cc: Dmitry Osipenko <dmitry.osipe...@collabora.com>
Cc: Vivek Kasireddy <vivek.kasire...@intel.com>
Signed-off-by: Dongwon Kim <dongwon....@intel.com>
---
  drivers/gpu/drm/virtio/virtgpu_drv.h    | 10 ++++
  drivers/gpu/drm/virtio/virtgpu_kms.c    |  1 +
  drivers/gpu/drm/virtio/virtgpu_object.c | 71 +++++++++++++++++++++++++
  3 files changed, 82 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1279f998c8e0..55f836378237 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -126,6 +126,12 @@ struct virtio_gpu_object_array {
        struct drm_gem_object *objs[] __counted_by(total);
  };
+struct virtio_gpu_object_restore {
+       struct virtio_gpu_object *bo;
+       struct virtio_gpu_object_params params;
+       struct list_head node;
+};
+
  struct virtio_gpu_vbuffer;
  struct virtio_gpu_device;
@@ -265,6 +271,7 @@ struct virtio_gpu_device {
        struct work_struct obj_free_work;
        spinlock_t obj_free_lock;
        struct list_head obj_free_list;
+       struct list_head obj_restore;

I am not very familiar with the code but I am curious do we not a lock to keep the list same?


struct virtio_gpu_drv_capset *capsets;
        uint32_t num_capsets;
@@ -479,6 +486,9 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
                               uint32_t *resid);
+
+int virtio_gpu_object_restore_all(struct virtio_gpu_device *vgdev);
+
  /* virtgpu_prime.c */
  int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
                                    struct virtio_gpu_object *bo);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
b/drivers/gpu/drm/virtio/virtgpu_kms.c
index cbebe19c3fb3..08f8e71a7072 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -162,6 +162,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct 
drm_device *dev)
        vgdev->fence_drv.context = dma_fence_context_alloc(1);
        spin_lock_init(&vgdev->fence_drv.lock);
        INIT_LIST_HEAD(&vgdev->fence_drv.fences);
+       INIT_LIST_HEAD(&vgdev->obj_restore);
        INIT_LIST_HEAD(&vgdev->cap_cache);
        INIT_WORK(&vgdev->config_changed_work,
                  virtio_gpu_config_changed_work_func);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index e6363c887500..28fdfc70fa49 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -61,6 +61,38 @@ static void virtio_gpu_resource_id_put(struct 
virtio_gpu_device *vgdev, uint32_t
                ida_free(&vgdev->resource_ida, id - 1);
  }
+static void virtio_gpu_object_add_restore_list(struct virtio_gpu_device *vgdev,
+                                              struct virtio_gpu_object *bo,
+                                              struct virtio_gpu_object_params 
*params)
+{
+       struct virtio_gpu_object_restore *new;
+
+       new = kvmalloc(sizeof(*new), GFP_KERNEL);
+       if (!new) {
+               DRM_ERROR("Fail to allocate virtio_gpu_object_restore");
+               return;
+       }
+
+       new->bo = bo;
+       memcpy(&new->params, params, sizeof(*params));
+
+       list_add_tail(&new->node, &vgdev->obj_restore);
+}
+
+static void virtio_gpu_object_del_restore_list(struct virtio_gpu_device *vgdev,
+                                              struct virtio_gpu_object *bo)
+{
+       struct virtio_gpu_object_restore *curr, *tmp;
+
+       list_for_each_entry_safe(curr, tmp, &vgdev->obj_restore, node) {
+               if (bo == curr->bo) {
+                       list_del(&curr->node);
+                       kvfree(curr);
+                       break;
+               }
+       }
+}
+
  void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
  {
        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
@@ -84,6 +116,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
                drm_gem_object_release(&bo->base.base);
                kfree(bo);
        }
+       virtio_gpu_object_del_restore_list(vgdev, bo);

Is there a possibility to hitting use after free here ?  I see kfree(bo) before this.


Regards,

Nirmoy

  }
static void virtio_gpu_free_object(struct drm_gem_object *obj)
@@ -258,6 +291,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
                virtio_gpu_object_attach(vgdev, bo, ents, nents);
        }
+ /* add submitted object to restore list */
+       virtio_gpu_object_add_restore_list(vgdev, bo, params);
        *bo_ptr = bo;
        return 0;
@@ -271,3 +306,39 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
        drm_gem_shmem_free(shmem_obj);
        return ret;
  }
+
+int virtio_gpu_object_restore_all(struct virtio_gpu_device *vgdev)
+{
+       struct virtio_gpu_object_restore *curr, *tmp;
+       struct virtio_gpu_mem_entry *ents;
+       unsigned int nents;
+       int ret;
+
+       list_for_each_entry_safe(curr, tmp, &vgdev->obj_restore, node) {
+               ret = virtio_gpu_object_shmem_init(vgdev, curr->bo, &ents, 
&nents);
+               if (ret)
+                       break;
+
+               if (curr->params.blob) {
+                       virtio_gpu_cmd_resource_create_blob(vgdev, curr->bo, 
&curr->params,
+                                                           ents, nents);
+               } else if (curr->params.virgl) {
+                       virtio_gpu_cmd_resource_create_3d(vgdev, curr->bo, 
&curr->params,
+                                                         NULL, NULL);
+
+                       if (curr->bo->attached) {
+                               curr->bo->attached = false;
+                               virtio_gpu_object_attach(vgdev, curr->bo, ents, 
nents);
+                       }
+               } else {
+                       virtio_gpu_cmd_create_resource(vgdev, curr->bo, 
&curr->params,
+                                                      NULL, NULL);
+                       if (curr->bo->attached) {
+                               curr->bo->attached = false;
+                               virtio_gpu_object_attach(vgdev, curr->bo, ents, 
nents);
+                       }
+               }
+       }
+
+       return ret;
+}

Reply via email to