---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 38 ++++----
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  6 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  1 -
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  2 -
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    | 12 ++-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  1 -
 drivers/gpu/drm/i915/i915_debugfs.c           |  6 +-
 drivers/gpu/drm/i915/i915_drv.h               |  2 -
 drivers/gpu/drm/i915/i915_gem.c               |  4 +-
 drivers/gpu/drm/i915/i915_request.c           | 89 ++++++++-----------
 drivers/gpu/drm/i915/i915_request.h           |  3 -
 .../gpu/drm/i915/selftests/igt_flush_test.c   |  5 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  3 -
 13 files changed, 74 insertions(+), 98 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 68faf1a71c97..bd01b7d95d0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -751,8 +751,11 @@ static int eb_select_context(struct i915_execbuffer *eb)
 
 static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
 {
+       struct list_head *requests = &ring->timeline->requests;
        struct i915_request *rq;
 
+       lockdep_assert_held(&ring->timeline->mutex);
+
        /*
         * Completely unscientific finger-in-the-air estimates for suitable
         * maximum user request size (to avoid blocking) and then backoff.
@@ -767,12 +770,15 @@ static struct i915_request *__eb_wait_for_ring(struct 
intel_ring *ring)
         * claiming our resources, but not so long that the ring completely
         * drains before we can submit our next request.
         */
-       list_for_each_entry(rq, &ring->request_list, ring_link) {
+       list_for_each_entry(rq, requests, link) {
+               if (rq->ring != ring)
+                       continue;
+
                if (__intel_ring_space(rq->postfix,
                                       ring->emit, ring->size) > ring->size / 2)
                        break;
        }
-       if (&rq->ring_link == &ring->request_list)
+       if (&rq->link == requests)
                return NULL; /* weird, we will check again later for real */
 
        return i915_request_get(rq);
@@ -780,8 +786,12 @@ static struct i915_request *__eb_wait_for_ring(struct 
intel_ring *ring)
 
 static int eb_wait_for_ring(const struct i915_execbuffer *eb)
 {
+       struct intel_ring *ring = eb->context->ring;
        struct i915_request *rq;
-       int ret = 0;
+       int ret;
+
+       if (READ_ONCE(ring->space) >= PAGE_SIZE)
+               return 0;
 
        /*
         * Apply a light amount of backpressure to prevent excessive hogs
@@ -789,18 +799,20 @@ static int eb_wait_for_ring(const struct i915_execbuffer 
*eb)
         * keeping all of their resources pinned.
         */
 
-       rq = __eb_wait_for_ring(eb->context->ring);
-       if (rq) {
-               mutex_unlock(&eb->i915->drm.struct_mutex);
+       ret = mutex_lock_interruptible(&ring->timeline->mutex);
+       if (ret)
+               return ret;
 
+       rq = __eb_wait_for_ring(ring);
+       mutex_unlock(&ring->timeline->mutex);
+
+       if (rq) {
                if (i915_request_wait(rq,
                                      I915_WAIT_INTERRUPTIBLE,
                                      MAX_SCHEDULE_TIMEOUT) < 0)
                        ret = -EINTR;
 
                i915_request_put(rq);
-
-               mutex_lock(&eb->i915->drm.struct_mutex);
        }
 
        return ret;
@@ -2447,15 +2459,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         */
        intel_gt_pm_get(eb.i915);
 
-       err = i915_mutex_lock_interruptible(dev);
-       if (err)
-               goto err_rpm;
-
        err = eb_select_engine(&eb, file, args);
        if (unlikely(err))
-               goto err_unlock;
+               goto err_rpm;
 
-       err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+       err = eb_wait_for_ring(&eb);
        if (unlikely(err))
                goto err_engine;
 
@@ -2612,8 +2620,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                eb_release_vmas(&eb);
 err_engine:
        eb_unpin_context(&eb);
-err_unlock:
-       mutex_unlock(&dev->struct_mutex);
 err_rpm:
        intel_gt_pm_put(eb.i915);
        i915_gem_context_put(eb.gem_context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 35b73c347887..e7134cb38425 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -68,11 +68,7 @@ static void retire_work_handler(struct work_struct *work)
        struct drm_i915_private *i915 =
                container_of(work, typeof(*i915), gem.retire_work.work);
 
-       /* Come back later if the device is busy... */
-       if (mutex_trylock(&i915->drm.struct_mutex)) {
-               i915_retire_requests(i915);
-               mutex_unlock(&i915->drm.struct_mutex);
-       }
+       i915_retire_requests(i915);
 
        queue_delayed_work(i915->wq,
                           &i915->gem.retire_work,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 85eb5c99d657..85bbe2055ea0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -745,7 +745,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs 
*engine)
                               engine->status_page.vma))
                goto out_frame;
 
-       INIT_LIST_HEAD(&frame->ring.request_list);
        frame->ring.timeline = &frame->timeline;
        frame->ring.vaddr = frame->cs;
        frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h 
b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 96994a55804a..367e687b5601 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,8 +68,6 @@ struct intel_ring {
        void *vaddr;
 
        struct i915_timeline *timeline;
-       struct list_head request_list;
-       struct list_head active_link;
 
        u32 head;
        u32 tail;
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 95d9adc7e2e9..546f27b81b4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1267,7 +1267,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
                return ERR_PTR(-ENOMEM);
 
        kref_init(&ring->ref);
-       INIT_LIST_HEAD(&ring->request_list);
        ring->timeline = i915_timeline_get(timeline);
 
        ring->size = size;
@@ -1787,21 +1786,26 @@ static int ring_request_alloc(struct i915_request 
*request)
 
 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 {
+       struct list_head *requests = &ring->timeline->requests;
        struct i915_request *target;
        long timeout;
 
+       lockdep_assert_held(&ring->timeline->mutex);
        if (intel_ring_update_space(ring) >= bytes)
                return 0;
 
-       GEM_BUG_ON(list_empty(&ring->request_list));
-       list_for_each_entry(target, &ring->request_list, ring_link) {
+       GEM_BUG_ON(list_empty(requests));
+       list_for_each_entry(target, requests, link) {
+               if (target->ring != ring)
+                       continue;
+
                /* Would completion of this request free enough space? */
                if (bytes <= __intel_ring_space(target->postfix,
                                                ring->emit, ring->size))
                        break;
        }
 
-       if (WARN_ON(&target->ring_link == &ring->request_list))
+       if (GEM_WARN_ON(&target->link == requests))
                return -ENOSPC;
 
        timeout = i915_request_wait(target,
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c 
b/drivers/gpu/drm/i915/gt/mock_engine.c
index b9c2764beca3..fb19a5290abc 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -67,7 +67,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs 
*engine)
        ring->base.vaddr = (void *)(ring + 1);
        ring->base.timeline = &ring->timeline;
 
-       INIT_LIST_HEAD(&ring->base.request_list);
        intel_ring_update_space(&ring->base);
 
        return &ring->base;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 657e8fb18e38..a8a114c6a839 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3695,12 +3695,12 @@ i915_drop_caches_set(void *data, u64 val)
                                                     I915_WAIT_LOCKED,
                                                     MAX_SCHEDULE_TIMEOUT);
 
-               if (val & DROP_RETIRE)
-                       i915_retire_requests(i915);
-
                mutex_unlock(&i915->drm.struct_mutex);
        }
 
+       if (val & DROP_RETIRE)
+               i915_retire_requests(i915);
+
        if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
                i915_handle_error(i915, ALL_ENGINES, 0, NULL);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0ad9522425e..621f13191e01 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1863,8 +1863,6 @@ struct drm_i915_private {
                        struct list_head hwsp_free_list;
                } timelines;
 
-               struct list_head active_rings;
-
                struct intel_wakeref wakeref;
 
                struct list_head closed_vma;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1571c707ad15..91a21672461f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1017,9 +1017,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
                if (err)
                        return err;
 
-               i915_retire_requests(i915);
        }
 
+       i915_retire_requests(i915);
+
        return 0;
 }
 
@@ -1772,7 +1773,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 
        intel_gt_pm_init(dev_priv);
 
-       INIT_LIST_HEAD(&dev_priv->gt.active_rings);
        INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
        spin_lock_init(&dev_priv->gt.closed_lock);
 
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 2c2624d7e18e..7e1c699d21c9 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -182,9 +182,6 @@ i915_request_remove_from_client(struct i915_request 
*request)
 
 static void advance_ring(struct i915_request *request)
 {
-       struct intel_ring *ring = request->ring;
-       unsigned int tail;
-
        /*
         * We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
@@ -194,24 +191,7 @@ static void advance_ring(struct i915_request *request)
         * Note this requires that we are always called in request
         * completion order.
         */
-       GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
-       if (list_is_last(&request->ring_link, &ring->request_list)) {
-               /*
-                * We may race here with execlists resubmitting this request
-                * as we retire it. The resubmission will move the ring->tail
-                * forwards (to request->wa_tail). We either read the
-                * current value that was written to hw, or the value that
-                * is just about to be. Either works, if we miss the last two
-                * noops - they are safe to be replayed on a reset.
-                */
-               tail = READ_ONCE(request->tail);
-               list_del(&ring->active_link);
-       } else {
-               tail = request->postfix;
-       }
-       list_del_init(&request->ring_link);
-
-       ring->head = tail;
+       request->ring->head = request->postfix;
 }
 
 static void free_capture_list(struct i915_request *request)
@@ -290,7 +270,7 @@ static bool i915_request_retire(struct i915_request *rq)
 
 void i915_request_retire_upto(struct i915_request *rq)
 {
-       struct intel_ring *ring = rq->ring;
+       struct list_head *requests = &rq->timeline->requests;
        struct i915_request *tmp;
 
        GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -300,11 +280,9 @@ void i915_request_retire_upto(struct i915_request *rq)
 
        lockdep_assert_held(&rq->timeline->mutex);
        GEM_BUG_ON(!i915_request_completed(rq));
-       GEM_BUG_ON(list_empty(&rq->ring_link));
 
        do {
-               tmp = list_first_entry(&ring->request_list,
-                                      typeof(*tmp), ring_link);
+               tmp = list_first_entry(requests, typeof(*tmp), link);
        } while (i915_request_retire(tmp) && tmp != rq);
 }
 
@@ -535,12 +513,12 @@ semaphore_notify(struct i915_sw_fence *fence, enum 
i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct i915_timeline *tl)
 {
        struct i915_request *rq, *rn;
 
-       lockdep_assert_held(&ring->timeline->mutex);
-       list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+       lockdep_assert_held(&tl->mutex);
+       list_for_each_entry_safe(rq, rn, &tl->requests, link)
                if (!i915_request_retire(rq))
                        break;
 }
@@ -548,17 +526,17 @@ static void ring_retire_requests(struct intel_ring *ring)
 static noinline struct i915_request *
 request_alloc_slow(struct intel_context *ce, gfp_t gfp)
 {
-       struct intel_ring *ring = ce->ring;
+       struct i915_timeline *tl = ce->ring->timeline;
        struct i915_request *rq;
 
-       if (list_empty(&ring->request_list))
+       if (list_empty(&tl->requests))
                goto out;
 
        if (!gfpflags_allow_blocking(gfp))
                goto out;
 
        /* Move our oldest request to the slab-cache (if not in use!) */
-       rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+       rq = list_first_entry(&tl->requests, typeof(*rq), link);
        i915_request_retire(rq);
 
        rq = kmem_cache_alloc(global.slab_requests,
@@ -567,11 +545,11 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
                return rq;
 
        /* Ratelimit ourselves to prevent oom from malicious clients */
-       rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+       rq = list_last_entry(&tl->requests, typeof(*rq), link);
        cond_synchronize_rcu(rq->rcustate);
 
        /* Retire our old requests in the hope that we free some */
-       ring_retire_requests(ring);
+       retire_requests(tl);
 
 out:
        return kmem_cache_alloc(global.slab_requests, gfp);
@@ -711,6 +689,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 struct i915_request *
 i915_request_create(struct intel_context *ce)
 {
+       struct i915_timeline *tl = ce->ring->timeline;
        struct i915_request *rq;
        int err;
 
@@ -719,8 +698,8 @@ i915_request_create(struct intel_context *ce)
                return ERR_PTR(err);
 
        /* Move our oldest request to the slab-cache (if not in use!) */
-       rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
-       if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
+       rq = list_first_entry(&tl->requests, typeof(*rq), link);
+       if (!list_is_last(&rq->link, &tl->requests) &&
            i915_request_completed(rq))
                i915_request_retire(rq);
 
@@ -731,7 +710,7 @@ i915_request_create(struct intel_context *ce)
                goto err_unlock;
 
        /* Check that we do not interrupt ourselves with a new request */
-       rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+       rq->cookie = lockdep_pin_lock(&tl->mutex);
 
        return rq;
 
@@ -743,10 +722,10 @@ i915_request_create(struct intel_context *ce)
 static int
 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 {
-       if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+       if (list_is_first(&signal->link, &signal->ring->timeline->requests))
                return 0;
 
-       signal = list_prev_entry(signal, ring_link);
+       signal = list_prev_entry(signal, link);
        if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
                return 0;
 
@@ -1143,6 +1122,7 @@ struct i915_request *__i915_request_commit(struct 
i915_request *rq)
         */
        GEM_BUG_ON(rq->reserved_space > ring->space);
        rq->reserved_space = 0;
+       rq->emitted_jiffies = jiffies;
 
        /*
         * Record the position of the start of the breadcrumb so that
@@ -1156,11 +1136,6 @@ struct i915_request *__i915_request_commit(struct 
i915_request *rq)
 
        prev = __i915_request_add_to_timeline(rq);
 
-       list_add_tail(&rq->ring_link, &ring->request_list);
-       if (list_is_first(&rq->ring_link, &ring->request_list))
-               list_add(&ring->active_link, &rq->i915->gt.active_rings);
-       rq->emitted_jiffies = jiffies;
-
        /*
         * Let the backend know a new request has arrived that may need
         * to adjust the existing execution schedule due to a high priority
@@ -1459,22 +1434,30 @@ long i915_request_wait(struct i915_request *rq,
 
 bool i915_retire_requests(struct drm_i915_private *i915)
 {
-       struct intel_ring *ring, *tmp;
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+       struct i915_timeline *tl, *tn;
+
+       mutex_lock(&gt->mutex);
+       list_for_each_entry_safe(tl, tn, &gt->active_list, link) {
+               if (!i915_active_fence_isset(&tl->last_request))
+                       continue;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
+               i915_timeline_get(tl);
 
-       list_for_each_entry_safe(ring, tmp,
-                                &i915->gt.active_rings, active_link) {
-               intel_ring_get(ring); /* last rq holds reference! */
-               mutex_lock(&ring->timeline->mutex);
+               mutex_unlock(&gt->mutex);
+               if (!mutex_trylock(&tl->mutex))
+                       goto relock;
 
-               ring_retire_requests(ring);
+               retire_requests(tl);
 
-               mutex_unlock(&ring->timeline->mutex);
-               intel_ring_put(ring);
+               mutex_unlock(&tl->mutex);
+relock:
+               mutex_lock(&gt->mutex);
+               i915_timeline_put(tl);
        }
+       mutex_unlock(&gt->mutex);
 
-       return !list_empty(&i915->gt.active_rings);
+       return !list_empty(&gt->active_list);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 8277cff0df70..d63b7f69cd87 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -220,9 +220,6 @@ struct i915_request {
        /** timeline->request entry for this request */
        struct list_head link;
 
-       /** ring->request_list entry for this request */
-       struct list_head ring_link;
-
        struct drm_i915_file_private *file_priv;
        /** file_priv list entry for this request */
        struct list_head client_link;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c 
b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 5bfd1b2626a2..0a10351c3c64 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@
 int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 {
        int ret = i915_terminally_wedged(i915) ? -EIO : 0;
-       int repeat = !!(flags & I915_WAIT_LOCKED);
+       int repeat = 1;
 
        cond_resched();
 
@@ -33,8 +33,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned 
int flags)
                }
 
                /* Ensure we also flush after wedging. */
-               if (flags & I915_WAIT_LOCKED)
-                       i915_retire_requests(i915);
+               i915_retire_requests(i915);
        } while (repeat--);
 
        return ret;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a96d0c012d46..1b66f0d95a0f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -40,8 +40,6 @@ void mock_device_flush(struct drm_i915_private *i915)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
        do {
                for_each_engine(engine, i915, id)
                        mock_engine_flush(engine);
@@ -200,7 +198,6 @@ struct drm_i915_private *mock_gem_device(void)
 
        i915_timelines_init(i915);
 
-       INIT_LIST_HEAD(&i915->gt.active_rings);
        INIT_LIST_HEAD(&i915->gt.closed_vma);
        spin_lock_init(&i915->gt.closed_lock);
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to