Simply iterating over 1 inactive list is insufficient for the way we now
track inactive (1 list per address space). We could alternatively do
this with bound + unbound lists, and an inactive check. To me, this way
is a bit easier to understand.

Signed-off-by: Ben Widawsky <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b4c35f0..8ce3545 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2282,7 +2282,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct i915_address_space *vm;
        struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        int i;
@@ -2293,8 +2293,9 @@ void i915_gem_reset(struct drm_device *dev)
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
-       list_for_each_entry(obj, &vm->inactive_list, mm_list)
-               obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               list_for_each_entry(obj, &vm->inactive_list, mm_list)
+                       obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 
        i915_gem_restore_fences(dev);
 }
-- 
1.8.3.4

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to