From: Tvrtko Ursulin <[email protected]>

This allows drivers to implement per-object mmap(2) strategies.

If not set via the drm_vma_node_set_vm_ops helper, driver
default vm_ops are used preserving compatibility with the
existing code base.

Signed-off-by: Tvrtko Ursulin <[email protected]>
---
 drivers/gpu/drm/drm_gem.c         | 53 ++++++++++++++++++++++++---------------
 drivers/gpu/drm/drm_vma_manager.c |  1 +
 include/drm/drm_vma_manager.h     | 13 ++++++++++
 3 files changed, 47 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..90d9c1141af3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -824,6 +824,31 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
+static int drm_gem_mmap_obj_ops(struct drm_gem_object *obj,
+                               unsigned long obj_size,
+                               const struct vm_operations_struct *vm_ops,
+                               struct vm_area_struct *vma)
+{
+       /* Check for valid size. */
+       if (obj_size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_ops = vm_ops;
+       vma->vm_private_data = obj;
+       vma->vm_page_prot = 
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
+        * This reference is cleaned up by the corresponding vm_close
+        * (which should happen whether the vma was created by this call, or
+        * by a vm_open due to mremap or partial unmap or whatever).
+        */
+       drm_gem_object_reference(obj);
+
+       return 0;
+}
+
 /**
  * drm_gem_mmap_obj - memory map a GEM object
  * @obj: the GEM object to map
@@ -853,27 +878,11 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned 
long obj_size,
 {
        struct drm_device *dev = obj->dev;
 
-       /* Check for valid size. */
-       if (obj_size < vma->vm_end - vma->vm_start)
-               return -EINVAL;
-
        if (!dev->driver->gem_vm_ops)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_ops = dev->driver->gem_vm_ops;
-       vma->vm_private_data = obj;
-       vma->vm_page_prot = 
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-       /* Take a ref for this mapping of the object, so that the fault
-        * handler can dereference the mmap offset's pointer to the object.
-        * This reference is cleaned up by the corresponding vm_close
-        * (which should happen whether the vma was created by this call, or
-        * by a vm_open due to mremap or partial unmap or whatever).
-        */
-       drm_gem_object_reference(obj);
-
-       return 0;
+       return drm_gem_mmap_obj_ops(obj, obj_size, dev->driver->gem_vm_ops,
+                                   vma);
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
 
@@ -898,6 +907,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct 
*vma)
        struct drm_device *dev = priv->minor->dev;
        struct drm_gem_object *obj = NULL;
        struct drm_vma_offset_node *node;
+       const struct vm_operations_struct *vm_ops;
        int ret;
 
        if (drm_device_is_unplugged(dev))
@@ -932,8 +942,11 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct 
*vma)
                return -EACCES;
        }
 
-       ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
-                              vma);
+       vm_ops = node->vm_ops;
+       if (!vm_ops)
+               vm_ops = dev->driver->gem_vm_ops;
+       ret = drm_gem_mmap_obj_ops(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+                                  vm_ops, vma);
 
        drm_gem_object_unreference_unlocked(obj);
 
diff --git a/drivers/gpu/drm/drm_vma_manager.c 
b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..e73776757554 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -265,6 +265,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager 
*mgr,
                rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
                drm_mm_remove_node(&node->vm_node);
                memset(&node->vm_node, 0, sizeof(node->vm_node));
+               node->vm_ops = NULL;
        }
 
        write_unlock(&mgr->vm_lock);
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 2f63dd5e05eb..78f02911a5c5 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -42,6 +42,7 @@ struct drm_vma_offset_node {
        struct drm_mm_node vm_node;
        struct rb_node vm_rb;
        struct rb_root vm_files;
+       const struct vm_operations_struct *vm_ops;
 };
 
 struct drm_vma_offset_manager {
@@ -244,4 +245,16 @@ static inline int drm_vma_node_verify_access(struct 
drm_vma_offset_node *node,
        return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
 }
 
+static inline int
+drm_vma_node_set_vm_ops(struct drm_vma_offset_node *node,
+                       const struct vm_operations_struct *vm_ops)
+{
+       if (node->vm_ops && node->vm_ops != vm_ops)
+               return -ENODEV;
+
+       node->vm_ops = vm_ops;
+
+       return 0;
+}
+
 #endif /* __DRM_VMA_MANAGER_H__ */
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to