When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 ++
 drivers/gpu/drm/i915/i915_gem.c          | 54 ++++++++++++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  6 +---
 drivers/gpu/drm/i915/i915_gem_userptr.c  |  4 +--
 4 files changed, 45 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8f5cf244094e..9fa925389332 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2707,6 +2707,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
  * _guarantee_ VMA in question is _not in use_ anywhere.
  */
 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ed3f306af42f..95e69dc47fc8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -254,18 +254,38 @@ static const struct drm_i915_gem_object_ops 
i915_gem_phys_ops = {
        .release = i915_gem_object_release_phys,
 };
 
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+       struct list_head still_in_list;
+
+       INIT_LIST_HEAD(&still_in_list);
+       while (!list_empty(&obj->vma_list)) {
+               struct i915_vma *vma =
+                       list_first_entry(&obj->vma_list,
+                                        struct i915_vma,
+                                        obj_link);
+               int ret;
+
+               list_move_tail(&vma->obj_link, &still_in_list);
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       break;
+       }
+       list_splice(&still_in_list, &obj->vma_list);
+
+       return 0;
+}
+
 static int
 drop_pages(struct drm_i915_gem_object *obj)
 {
-       struct i915_vma *vma, *next;
        int ret;
 
        drm_gem_object_reference(&obj->base);
-       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
-               if (i915_vma_unbind(vma))
-                       break;
-
-       ret = i915_gem_object_put_pages(obj);
+       ret = i915_gem_object_unbind(obj);
+       if (ret == 0)
+               ret = i915_gem_object_put_pages(obj);
        drm_gem_object_unreference(&obj->base);
 
        return ret;
@@ -3038,7 +3058,7 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       struct i915_vma *vma, *next;
+       struct i915_vma *vma;
        int ret = 0;
 
        if (obj->cache_level == cache_level)
@@ -3049,7 +3069,8 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
         * catch the issue of the CS prefetch crossing page boundaries and
         * reading an invalid PTE on older architectures.
         */
-       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
@@ -3058,11 +3079,18 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
                        return -EBUSY;
                }
 
-               if (!i915_gem_valid_gtt_space(vma, cache_level)) {
-                       ret = i915_vma_unbind(vma);
-                       if (ret)
-                               return ret;
-               }
+               if (i915_gem_valid_gtt_space(vma, cache_level))
+                       continue;
+
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       return ret;
+
+               /* As unbinding may affect other elements in the
+                * obj->vma_list (due to side-effects from retiring
+                * an active vma), play safe and restart the iterator.
+                */
+               goto restart;
        }
 
        /* We can reuse the existing drm_mm nodes but need to change the
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index fa190ef3f727..e15fc7531f08 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -141,7 +141,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                INIT_LIST_HEAD(&still_in_list);
                while (count < target && !list_empty(phase->list)) {
                        struct drm_i915_gem_object *obj;
-                       struct i915_vma *vma, *v;
 
                        obj = list_first_entry(phase->list,
                                               typeof(*obj), global_list);
@@ -160,10 +159,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        drm_gem_object_reference(&obj->base);
 
                        /* For the unbound phase, this should be a no-op! */
-                       list_for_each_entry_safe(vma, v,
-                                                &obj->vma_list, obj_link)
-                               if (i915_vma_unbind(vma))
-                                       break;
+                       i915_gem_object_unbind(obj);
 
                        if (i915_gem_object_put_pages(obj) == 0)
                                count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2f3638d02bdd..a90392246471 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -75,14 +75,12 @@ static void __cancel_userptr__worker(struct work_struct 
*work)
 
        if (obj->pages != NULL) {
                struct drm_i915_private *dev_priv = to_i915(dev);
-               struct i915_vma *vma, *tmp;
                bool was_interruptible;
 
                was_interruptible = dev_priv->mm.interruptible;
                dev_priv->mm.interruptible = false;
 
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-                       WARN_ON(i915_vma_unbind(vma));
+               WARN_ON(i915_gem_object_unbind(obj));
                WARN_ON(i915_gem_object_put_pages(obj));
 
                dev_priv->mm.interruptible = was_interruptible;
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to