This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.

The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.

Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the shrinker code, so we must also
modify the shrinker code to make this work.

Signed-off-by: Ben Widawsky <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h            |  20 +-
 drivers/gpu/drm/i915/i915_gem.c            |   2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c      |  31 ++-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 295 ++++++++++++++---------------
 4 files changed, 171 insertions(+), 177 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 59a8c03..fe41a3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -465,6 +465,17 @@ struct i915_vma {
        struct list_head mm_list;
 
        struct list_head vma_link; /* Link in the object's VMA list */
+
+       /** This vma's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
+
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 
 struct i915_address_space {
@@ -1278,8 +1289,6 @@ struct drm_i915_gem_object {
        struct list_head global_list;
 
        struct list_head ring_list;
-       /** This object's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
 
        /**
         * This is set if the object has been written to since last bound
@@ -1357,13 +1366,6 @@ struct drm_i915_gem_object {
        void *dma_buf_vmapping;
        int vmapping_count;
 
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
-
        struct intel_ring_buffer *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0fa6667..397a4b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3922,7 +3922,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
-       INIT_LIST_HEAD(&obj->exec_list);
        INIT_LIST_HEAD(&obj->vma_list);
 
        obj->ops = ops;
@@ -4057,6 +4056,7 @@ struct i915_vma *i915_gem_vma_create(struct 
drm_i915_gem_object *obj,
 
        INIT_LIST_HEAD(&vma->vma_link);
        INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->exec_list);
        vma->vm = vm;
        vma->obj = obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
b/drivers/gpu/drm/i915/i915_gem_evict.c
index 18a44a9..c860c5b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,7 +37,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
        if (vma->obj->pin_count)
                return false;
 
-       list_add(&vma->obj->exec_list, unwind);
+       list_add(&vma->exec_list, unwind);
        return drm_mm_scan_add_block(&vma->node);
 }
 
@@ -49,7 +49,6 @@ i915_gem_evict_something(struct drm_device *dev, struct 
i915_address_space *vm,
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
        struct i915_vma *vma;
-       struct drm_i915_gem_object *obj;
        int ret = 0;
 
        trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -104,14 +103,13 @@ i915_gem_evict_something(struct drm_device *dev, struct 
i915_address_space *vm,
 none:
        /* Nothing found, clean up and bail out! */
        while (!list_empty(&unwind_list)) {
-               obj = list_first_entry(&unwind_list,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&unwind_list,
+                                      struct i915_vma,
                                       exec_list);
-               vma = i915_gem_obj_to_vma(obj, vm);
                ret = drm_mm_scan_remove_block(&vma->node);
                BUG_ON(ret);
 
-               list_del_init(&obj->exec_list);
+               list_del_init(&vma->exec_list);
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
@@ -125,28 +123,27 @@ found:
         * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
        while (!list_empty(&unwind_list)) {
-               obj = list_first_entry(&unwind_list,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&unwind_list,
+                                      struct i915_vma,
                                       exec_list);
-               vma = i915_gem_obj_to_vma(obj, vm);
                if (drm_mm_scan_remove_block(&vma->node)) {
-                       list_move(&obj->exec_list, &eviction_list);
-                       drm_gem_object_reference(&obj->base);
+                       list_move(&vma->exec_list, &eviction_list);
+                       drm_gem_object_reference(&vma->obj->base);
                        continue;
                }
-               list_del_init(&obj->exec_list);
+               list_del_init(&vma->exec_list);
        }
 
        /* Unbinding will emit any required flushes */
        while (!list_empty(&eviction_list)) {
-               obj = list_first_entry(&eviction_list,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&eviction_list,
+                                      struct i915_vma,
                                       exec_list);
                if (ret == 0)
-                       ret = i915_gem_object_unbind(obj, vm);
+                       ret = i915_gem_object_unbind(vma->obj, vm);
 
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
 
        return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1f82a04..75325c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,24 +33,24 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct eb_objects {
-       struct list_head objects;
+struct eb_vmas {
+       struct list_head vmas;
        int and;
        union {
-               struct drm_i915_gem_object *lut[0];
+               struct i915_vma *lut[0];
                struct hlist_head buckets[0];
        };
 };
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
-       struct eb_objects *eb = NULL;
+       struct eb_vmas *eb = NULL;
 
        if (args->flags & I915_EXEC_HANDLE_LUT) {
                int size = args->buffer_count;
-               size *= sizeof(struct drm_i915_gem_object *);
-               size += sizeof(struct eb_objects);
+               size *= sizeof(struct i915_vma *);
+               size += sizeof(struct eb_vmas);
                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | 
__GFP_NORETRY);
        }
 
@@ -61,7 +61,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
                while (count > 2*size)
                        count >>= 1;
                eb = kzalloc(count*sizeof(struct hlist_head) +
-                            sizeof(struct eb_objects),
+                            sizeof(struct eb_vmas),
                             GFP_TEMPORARY);
                if (eb == NULL)
                        return eb;
@@ -70,23 +70,23 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        } else
                eb->and = -args->buffer_count;
 
-       INIT_LIST_HEAD(&eb->objects);
+       INIT_LIST_HEAD(&eb->vmas);
        return eb;
 }
 
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
        if (eb->and >= 0)
                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
 static int
-eb_lookup_objects(struct eb_objects *eb,
-                 struct drm_i915_gem_exec_object2 *exec,
-                 const struct drm_i915_gem_execbuffer2 *args,
-                 struct i915_address_space *vm,
-                 struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+              struct drm_i915_gem_exec_object2 *exec,
+              const struct drm_i915_gem_execbuffer2 *args,
+              struct i915_address_space *vm,
+              struct drm_file *file)
 {
        int i;
 
@@ -103,7 +103,11 @@ eb_lookup_objects(struct eb_objects *eb,
                        return -ENOENT;
                }
 
-               if (!list_empty(&obj->exec_list)) {
+               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               if (IS_ERR(vma))
+                       return PTR_ERR(vma);
+
+               if (!list_empty(&vma->exec_list)) {
                        spin_unlock(&file->table_lock);
                        DRM_DEBUG("Object %p [handle %d, index %d] appears more 
than once in object list\n",
                                   obj, exec[i].handle, i);
@@ -111,19 +115,16 @@ eb_lookup_objects(struct eb_objects *eb,
                }
 
                drm_gem_object_reference(&obj->base);
-               list_add_tail(&obj->exec_list, &eb->objects);
+               list_add_tail(&vma->exec_list, &eb->vmas);
 
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
-               if (IS_ERR(vma))
-                       return PTR_ERR(vma);
+               vma->exec_entry = &exec[i];
 
-               obj->exec_entry = &exec[i];
                if (eb->and < 0) {
-                       eb->lut[i] = obj;
+                       eb->lut[i] = vma;
                } else {
                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? 
i : exec[i].handle;
-                       obj->exec_handle = handle;
-                       hlist_add_head(&obj->exec_node,
+                       vma->exec_handle = handle;
+                       hlist_add_head(&vma->exec_node,
                                       &eb->buckets[handle & eb->and]);
                }
        }
@@ -132,8 +133,7 @@ eb_lookup_objects(struct eb_objects *eb,
        return 0;
 }
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
        if (eb->and < 0) {
                if (handle >= -eb->and)
@@ -145,27 +145,25 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
 
                head = &eb->buckets[handle & eb->and];
                hlist_for_each(node, head) {
-                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma;
 
-                       obj = hlist_entry(node, struct drm_i915_gem_object, 
exec_node);
-                       if (obj->exec_handle == handle)
-                               return obj;
+                       vma = hlist_entry(node, struct i915_vma, exec_node);
+                       if (vma->exec_handle == handle)
+                               return vma;
                }
                return NULL;
        }
 }
 
-static void
-eb_destroy(struct eb_objects *eb, struct i915_address_space *vm)
-{
-       while (!list_empty(&eb->objects)) {
-               struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+       while (!list_empty(&eb->vmas)) {
+               struct i915_vma *vma;
 
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&eb->vmas,
+                                      struct i915_vma,
                                       exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
 }
@@ -179,22 +177,24 @@ static inline int use_cpu_reloc(struct 
drm_i915_gem_object *obj)
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-                                  struct eb_objects *eb,
+                                  struct eb_vmas *eb,
                                   struct drm_i915_gem_relocation_entry *reloc,
                                   struct i915_address_space *vm)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
        struct drm_i915_gem_object *target_i915_obj;
+       struct i915_vma *target_vma;
        uint32_t target_offset;
        int ret = -EINVAL;
 
        /* we've already hold a reference to all valid objects */
-       target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-       if (unlikely(target_obj == NULL))
+       target_vma = eb_get_vma(eb, reloc->target_handle);
+       if (unlikely(target_vma == NULL))
                return -ENOENT;
+       target_i915_obj = target_vma->obj;
+       target_obj = &target_vma->obj->base;
 
-       target_i915_obj = to_intel_bo(target_obj);
        target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -304,14 +304,13 @@ i915_gem_execbuffer_relocate_entry(struct 
drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-                                   struct eb_objects *eb,
-                                   struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+                                struct eb_vmas *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
        struct drm_i915_gem_relocation_entry __user *user_relocs;
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int remain, ret;
 
        user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -330,8 +329,8 @@ i915_gem_execbuffer_relocate_object(struct 
drm_i915_gem_object *obj,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
-                                                                vm);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, 
r,
+                                                                vma->vm);
                        if (ret)
                                return ret;
 
@@ -352,17 +351,16 @@ i915_gem_execbuffer_relocate_object(struct 
drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-                                        struct eb_objects *eb,
-                                        struct drm_i915_gem_relocation_entry 
*relocs,
-                                        struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+                                     struct eb_vmas *eb,
+                                     struct drm_i915_gem_relocation_entry 
*relocs)
 {
-       const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int i, ret;
 
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
-                                                        vm);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, 
&relocs[i],
+                                                        vma->vm);
                if (ret)
                        return ret;
        }
@@ -371,10 +369,10 @@ i915_gem_execbuffer_relocate_object_slow(struct 
drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
                             struct i915_address_space *vm)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int ret = 0;
 
        /* This is the fast path and we cannot handle a pagefault whilst
@@ -385,8 +383,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
         * lockdep complains vehemently.
         */
        pagefault_disable();
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               ret = i915_gem_execbuffer_relocate_vma(vma, eb);
                if (ret)
                        break;
        }
@@ -399,38 +397,36 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-       return entry->relocation_count && !use_cpu_reloc(obj);
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       return entry->relocation_count && !use_cpu_reloc(vma->obj);
 }
 
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-                                  struct intel_ring_buffer *ring,
-                                  struct i915_address_space *vm,
-                                  bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+                               struct intel_ring_buffer *ring,
+                               bool *need_reloc)
 {
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
        u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
-               !obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
-       struct i915_vma *vma;
+               !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
+       struct drm_i915_gem_object *obj = vma->obj;
        int ret;
 
        need_fence =
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(obj);
+       need_mappable = need_fence || need_reloc_mappable(vma);
 
-       ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
                                  false);
        if (ret)
                return ret;
 
-       vma = i915_gem_obj_to_vma(obj, vm);
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
        if (has_fenced_gpu_access) {
@@ -446,8 +442,8 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
                }
        }
 
-       if (entry->offset != i915_gem_obj_offset(obj, vm)) {
-               entry->offset = i915_gem_obj_offset(obj, vm);
+       if (entry->offset != vma->node.start) {
+               entry->offset = vma->node.start;
                *need_reloc = true;
        }
 
@@ -456,67 +452,66 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       vm->map_vma(vma, obj->cache_level, flags);
+       vma->vm->map_vma(vma, obj->cache_level, flags);
 
        return 0;
 }
 
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry;
 
-       if (!i915_gem_obj_bound_any(obj))
+       if (!drm_mm_node_allocated(&vma->node))
                return;
 
-       entry = obj->exec_entry;
+       entry = vma->exec_entry;
 
        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
+               i915_gem_object_unpin_fence(vma->obj);
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
+               i915_gem_object_unpin(vma->obj);
 
        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-                           struct list_head *objects,
-                           struct i915_address_space *vm,
+                           struct list_head *vmas,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
-       struct list_head ordered_objects;
+       struct i915_vma *vma;
+       struct list_head ordered_vmas;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        int retry;
 
-       INIT_LIST_HEAD(&ordered_objects);
-       while (!list_empty(objects)) {
+       INIT_LIST_HEAD(&ordered_vmas);
+       while (!list_empty(vmas)) {
                struct drm_i915_gem_exec_object2 *entry;
                bool need_fence, need_mappable;
 
-               obj = list_first_entry(objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               entry = obj->exec_entry;
+               vma = list_first_entry(vmas, struct i915_vma, exec_list);
+               obj = vma->obj;
+               entry = vma->exec_entry;
 
                need_fence =
                        has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
-               need_mappable = need_fence || need_reloc_mappable(obj);
+               need_mappable = need_fence || need_reloc_mappable(vma);
 
                if (need_mappable)
-                       list_move(&obj->exec_list, &ordered_objects);
+                       list_move(&vma->exec_list, &ordered_vmas);
                else
-                       list_move_tail(&obj->exec_list, &ordered_objects);
+                       list_move_tail(&vma->exec_list, &ordered_vmas);
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & 
~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
                obj->pending_fenced_gpu_access = false;
        }
-       list_splice(&ordered_objects, objects);
+       list_splice(&ordered_vmas, vmas);
 
        /* Attempt to pin all of the buffers into the GTT.
         * This is done in 3 phases:
@@ -535,47 +530,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer 
*ring,
                int ret = 0;
 
                /* Unbind any ill-fitting objects or pin. */
-               list_for_each_entry(obj, objects, exec_list) {
-                       struct drm_i915_gem_exec_object2 *entry = 
obj->exec_entry;
+               list_for_each_entry(vma, vmas, exec_list) {
+                       struct drm_i915_gem_exec_object2 *entry = 
vma->exec_entry;
                        bool need_fence, need_mappable;
-                       u32 obj_offset;
 
-                       if (!i915_gem_obj_bound(obj, vm))
+                       obj = vma->obj;
+
+                       if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       obj_offset = i915_gem_obj_offset(obj, vm);
                        need_fence =
                                has_fenced_gpu_access &&
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
-                       need_mappable = need_fence || need_reloc_mappable(obj);
+                       need_mappable = need_fence || need_reloc_mappable(vma);
 
                        BUG_ON((need_mappable || need_fence) &&
-                              !i915_is_ggtt(vm));
+                              !i915_is_ggtt(vma->vm));
 
                        if ((entry->alignment &&
-                            obj_offset & (entry->alignment - 1)) ||
+                            vma->node.start & (entry->alignment - 1)) ||
                            (need_mappable && !obj->map_and_fenceable))
-                               ret = i915_gem_object_unbind(obj, vm);
+                               ret = i915_gem_object_unbind(obj, vma->vm);
                        else
-                               ret = i915_gem_execbuffer_reserve_object(obj, 
ring, vm, need_relocs);
+                               ret = i915_gem_execbuffer_reserve_vma(vma, 
ring, need_relocs);
                        if (ret)
                                goto err;
                }
 
                /* Bind fresh objects */
-               list_for_each_entry(obj, objects, exec_list) {
-                       if (i915_gem_obj_bound(obj, vm))
+               list_for_each_entry(vma, vmas, exec_list) {
+                       if (drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, 
need_relocs);
+                       ret = i915_gem_execbuffer_reserve_vma(vma, ring, 
need_relocs);
                        if (ret)
                                goto err;
                }
 
 err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(obj, objects, exec_list)
-                       i915_gem_execbuffer_unreserve_object(obj);
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
 
                if (ret != -ENOSPC || retry++)
                        return ret;
@@ -591,24 +586,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_i915_gem_execbuffer2 *args,
                                  struct drm_file *file,
                                  struct intel_ring_buffer *ring,
-                                 struct eb_objects *eb,
-                                 struct drm_i915_gem_exec_object2 *exec,
-                                 struct i915_address_space *vm)
+                                 struct eb_vmas *eb,
+                                 struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_gem_relocation_entry *reloc;
-       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
        bool need_relocs;
        int *reloc_offset;
        int i, total, ret;
        int count = args->buffer_count;
 
+       if (WARN_ON(list_empty(&eb->vmas)))
+               return 0;
+
+       vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
+
        /* We may process another execbuffer during the unlock... */
-       while (!list_empty(&eb->objects)) {
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+       while (!list_empty(&eb->vmas)) {
+               vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -672,20 +670,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
        /* reacquire the objects */
        eb_reset(eb);
-       ret = eb_lookup_objects(eb, exec, args, vm, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               int offset = obj->exec_entry - exec;
-               ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-                                                              reloc + 
reloc_offset[offset],
-                                                              vm);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               int offset = vma->exec_entry - exec;
+               ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+                                                           reloc + 
reloc_offset[offset]);
                if (ret)
                        goto err;
        }
@@ -704,21 +701,21 @@ err:
 
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-                               struct list_head *objects)
+                               struct list_head *vmas)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        uint32_t flush_domains = 0;
        int ret;
 
-       list_for_each_entry(obj, objects, exec_list) {
-               ret = i915_gem_object_sync(obj, ring);
+       list_for_each_entry(vma, vmas, exec_list) {
+               ret = i915_gem_object_sync(vma->obj, ring);
                if (ret)
                        return ret;
 
-               if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-                       i915_gem_clflush_object(obj);
+               if (vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+                       i915_gem_clflush_object(vma->obj);
 
-               flush_domains |= obj->base.write_domain;
+               flush_domains |= vma->obj->base.write_domain;
        }
 
        if (flush_domains & I915_GEM_DOMAIN_CPU)
@@ -785,13 +782,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 }
 
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
-                                  struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_ring_buffer *ring)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 
-       list_for_each_entry(obj, objects, exec_list) {
+       list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
@@ -801,7 +798,7 @@ i915_gem_execbuffer_move_to_active(struct list_head 
*objects,
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-               i915_gem_object_move_to_active(obj, vm, ring);
+               i915_gem_object_move_to_active(obj, vma->vm, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -859,7 +856,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct i915_address_space *vm)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct eb_objects *eb;
+       struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
@@ -999,7 +996,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       eb = eb_create(args);
+       eb = eb_create(args, vm);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
@@ -1007,18 +1004,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
        }
 
        /* Look up object handles */
-       ret = eb_lookup_objects(eb, exec, args, vm, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       batch_obj = list_entry(eb->objects.prev,
-                              struct drm_i915_gem_object,
-                              exec_list);
+       batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
@@ -1028,7 +1023,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, 
file, ring,
-                                                               eb, exec, vm);
+                                                               eb, exec);
                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
                }
                if (ret)
@@ -1053,7 +1048,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                vm->map_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
        }
 
-       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
                goto err;
 
@@ -1108,11 +1103,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
 
        trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
-       i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+       i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
-       eb_destroy(eb, vm);
+       eb_destroy(eb);
 
        mutex_unlock(&dev->struct_mutex);
 
-- 
1.8.3.3

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to