Do to the move active/inactive lists, it no longer makes sense to use
them for shrinking, since shrinking isn't VM specific (such a need may
also exist, but doesn't yet).

What we can do instead is use the global bound list to find all objects
which aren't active.

Signed-off-by: Ben Widawsky <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 586172a..6f08303 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4614,7 +4614,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, 
struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
        bool unlock = true;
@@ -4643,9 +4642,14 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, 
struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &vm->inactive_list, mm_list)
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (obj->active)
+                       continue;
+
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
+       }
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
-- 
1.8.3.4

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to