Add new ioctl DRM_IOCTL_AMDGPU_CRIU_MAPPING_INFO, which returns a list of mappings associated with a given bo, along with their positions and offsets.
Signed-off-by: David Francis <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_criu.c | 96 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_criu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++ include/uapi/drm/amdgpu_drm.h | 27 +++++++ 5 files changed, 131 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.c index 34a0358946b6..dd677367a82e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.c @@ -142,3 +142,99 @@ int amdgpu_criu_bo_info_ioctl(struct drm_device *dev, void *data, return ret; } + +/** + * amdgpu_criu_mapping_info_ioctl - get information about a buffer's mappings + * + * @dev: drm device pointer + * @data: drm_amdgpu_criu_mapping_info_args + * @filp: drm file pointer + * + * num_mappings is set as an input to the size of the vm_buckets array. + * num_mappings is sent back as output as the number of mappings the bo has. + * If that number is larger than the size of the array, the ioctl must + * be retried. + * + * Returns: + * 0 for success, -errno for errors. + */ +int amdgpu_criu_mapping_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_amdgpu_criu_mapping_info_args *args = data; + struct drm_gem_object *gobj = idr_find(&filp->object_idr, args->gem_handle); + struct amdgpu_vm *avm = &((struct amdgpu_fpriv *)filp->driver_priv)->vm; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(avm, bo); + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct drm_amdgpu_criu_vm_bucket *vm_buckets; + struct amdgpu_bo_va_mapping *mapping; + struct drm_exec exec; + int num_mappings = 0; + int ret; + + vm_buckets = kvzalloc(args->num_mappings * sizeof(*vm_buckets), GFP_KERNEL); + if (!vm_buckets) { + ret = -ENOMEM; + goto free_vms; + } + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + if (gobj) { + ret = drm_exec_lock_obj(&exec, gobj); + drm_exec_retry_on_contention(&exec); + if (ret) + goto unlock_exec; + } + + ret = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (ret) + goto unlock_exec; + } + + amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) { + if (num_mappings < args->num_mappings) { + vm_buckets[num_mappings].start = mapping->start; + vm_buckets[num_mappings].last = mapping->last; + vm_buckets[num_mappings].offset = mapping->offset; + vm_buckets[num_mappings].flags = hardware_flags_to_uapi_flags(drm_to_adev(dev), mapping->flags); + } + num_mappings += 1; + } + + amdgpu_vm_bo_va_for_each_invalid_mapping(mapping, bo_va) { + if (num_mappings < args->num_mappings) { + vm_buckets[num_mappings].start = mapping->start; + vm_buckets[num_mappings].last = mapping->last; + vm_buckets[num_mappings].offset = mapping->offset; + vm_buckets[num_mappings].flags = hardware_flags_to_uapi_flags(drm_to_adev(dev), mapping->flags); + } + num_mappings += 1; + } + + drm_exec_fini(&exec); + + if (num_mappings > 0) { + if (num_mappings <= args->num_mappings) { + ret = copy_to_user((void __user *)args->vm_buckets, vm_buckets, num_mappings * sizeof(*vm_buckets)); + if (ret) { + pr_debug("Failed to copy BO information to user\n"); + ret = -EFAULT; + } + } + } + args->num_mappings = num_mappings; + + kvfree(vm_buckets); + + return ret; +unlock_exec: + drm_exec_fini(&exec); +free_vms: + kvfree(vm_buckets); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.h index 1b18ffee6587..486c341729ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_criu.h @@ -28,5 +28,7 @@ int amdgpu_criu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int amdgpu_criu_mapping_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bf33567bb166..5f3de93a665d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -3023,6 +3023,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_CRIU_BO_INFO, amdgpu_criu_bo_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CRIU_MAPPING_INFO, amdgpu_criu_mapping_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct drm_driver amdgpu_kms_driver = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index f3ad687125ad..978d6b29e626 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -668,4 +668,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); +#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \ + list_for_each_entry(mapping, &bo_va->valids, list) +#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \ + list_for_each_entry(mapping, &bo_va->invalids, list) + #endif diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1508c55ff92a..ab09ae9890e7 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -58,6 +58,7 @@ extern "C" { #define DRM_AMDGPU_USERQ_SIGNAL 0x17 #define DRM_AMDGPU_USERQ_WAIT 0x18 #define DRM_AMDGPU_CRIU_BO_INFO 0x19 +#define DRM_AMDGPU_CRIU_MAPPING_INFO 0x20 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) @@ -79,6 +80,7 @@ extern "C" { #define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal) #define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait) #define DRM_IOCTL_AMDGPU_CRIU_BO_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CRIU_BO_INFO, struct drm_amdgpu_criu_bo_info_args) +#define DRM_IOCTL_AMDGPU_CRIU_MAPPING_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CRIU_MAPPING_INFO, struct drm_amdgpu_criu_mapping_info_args) /** * DOC: memory domains @@ -1654,6 +1656,31 @@ struct drm_amdgpu_criu_bo_bucket { __u32 gem_handle; }; +struct drm_amdgpu_criu_mapping_info_args { + /* Handle of bo to get mappings of */ + __u32 gem_handle; + + /* IN: Size of vm_buckets buffer. OUT: Number of bos in process (if larger than size of buffer, must retry) */ + __u32 num_mappings; + + /* User pointer to array of drm_amdgpu_criu_vm_bucket */ + __u64 vm_buckets; +}; + +struct drm_amdgpu_criu_vm_bucket { + /* Start of mapping (in number of pages) */ + __u64 start; + + /* End of mapping (in number of pages) */ + __u64 last; + + /* Mapping offset */ + __u64 offset; + + /* flags needed to recreate mapping; still pending how to get these */ + __u64 flags; +}; + #if defined(__cplusplus) } #endif -- 2.34.1
