Using intel_ring_* to refer to the intel_engine_cs functions is most
confusing!

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 10 +++----
 drivers/gpu/drm/i915/i915_dma.c            |  8 +++---
 drivers/gpu/drm/i915/i915_drv.h            |  4 +--
 drivers/gpu/drm/i915/i915_gem.c            | 22 +++++++-------
 drivers/gpu/drm/i915/i915_gem_context.c    |  8 +++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  6 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 +++---
 drivers/gpu/drm/i915/i915_gem_request.h    |  4 +--
 drivers/gpu/drm/i915/i915_gpu_error.c      |  8 +++---
 drivers/gpu/drm/i915/i915_guc_submission.c |  6 ++--
 drivers/gpu/drm/i915/i915_irq.c            | 18 ++++++------
 drivers/gpu/drm/i915/i915_trace.h          |  2 +-
 drivers/gpu/drm/i915/intel_breadcrumbs.c   |  4 +--
 drivers/gpu/drm/i915/intel_lrc.c           | 17 +++++------
 drivers/gpu/drm/i915/intel_mocs.c          |  6 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 46 ++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 36 +++++++++++------------
 17 files changed, 104 insertions(+), 109 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 6e91726db8d3..dec10784c2bc 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -599,7 +599,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void 
*data)
                                           engine->name,
                                           
i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          intel_ring_get_seqno(engine),
+                                          intel_engine_get_seqno(engine),
                                           
i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any 
ring\n");
@@ -732,7 +732,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
        struct rb_node *rb;
 
        seq_printf(m, "Current sequence (%s): %x\n",
-                  ring->name, intel_ring_get_seqno(ring));
+                  ring->name, intel_engine_get_seqno(ring));
 
        seq_printf(m, "Current user interrupts (%s): %x\n",
                   ring->name, READ_ONCE(ring->user_interrupts));
@@ -1354,8 +1354,8 @@ static int i915_hangcheck_info(struct seq_file *m, void 
*unused)
        intel_runtime_pm_get(dev_priv);
 
        for_each_ring(ring, dev_priv, i) {
-               acthd[i] = intel_ring_get_active_head(ring);
-               seqno[i] = intel_ring_get_seqno(ring);
+               acthd[i] = intel_engine_get_active_head(ring);
+               seqno[i] = intel_engine_get_seqno(ring);
        }
 
        i915_get_extra_instdone(dev, instdone);
@@ -2496,7 +2496,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
        struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *ring;
-       enum intel_ring_id i;
+       enum intel_engine_id i;
        u64 total = 0;
 
        if (!HAS_GUC_SCHED(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4c72c83cfa28..c0242ce45e43 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -87,16 +87,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = 1;
                break;
        case I915_PARAM_HAS_BSD:
-               value = intel_ring_initialized(&dev_priv->ring[VCS]);
+               value = intel_engine_initialized(&dev_priv->ring[VCS]);
                break;
        case I915_PARAM_HAS_BLT:
-               value = intel_ring_initialized(&dev_priv->ring[BCS]);
+               value = intel_engine_initialized(&dev_priv->ring[BCS]);
                break;
        case I915_PARAM_HAS_VEBOX:
-               value = intel_ring_initialized(&dev_priv->ring[VECS]);
+               value = intel_engine_initialized(&dev_priv->ring[VECS]);
                break;
        case I915_PARAM_HAS_BSD2:
-               value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+               value = intel_engine_initialized(&dev_priv->ring[VCS2]);
                break;
        case I915_PARAM_HAS_RELAXED_FENCING:
                value = 1;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9f06dd19bfb2..466adc6617f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -520,7 +520,7 @@ struct drm_i915_error_state {
                /* Software tracked state */
                bool waiting;
                int hangcheck_score;
-               enum intel_ring_hangcheck_action hangcheck_action;
+               enum intel_engine_hangcheck_action hangcheck_action;
                int num_requests;
 
                /* our own tracking of ring head and tail */
@@ -1973,7 +1973,7 @@ static inline struct drm_i915_private *guc_to_i915(struct 
intel_guc *guc)
 /* Iterate over initialised rings */
 #define for_each_ring(ring__, dev_priv__, i__) \
        for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
-               for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), 
intel_ring_initialized((ring__))))
+               for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), 
intel_engine_initialized((ring__))))
 
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 430c439ece26..a81cad666d3a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2067,7 +2067,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
                drm_gem_object_reference(&obj->base);
-       obj->active |= intel_ring_flag(engine);
+       obj->active |= intel_engine_flag(engine);
 
        list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
        i915_gem_request_assign(&obj->last_read_req[engine->id], req);
@@ -2079,7 +2079,7 @@ static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(obj->last_write_req == NULL);
-       GEM_BUG_ON(!(obj->active & 
intel_ring_flag(obj->last_write_req->engine)));
+       GEM_BUG_ON(!(obj->active & 
intel_engine_flag(obj->last_write_req->engine)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2273,7 +2273,7 @@ static void i915_gem_reset_ring_cleanup(struct 
drm_i915_private *dev_priv,
                intel_ring_update_space(buffer);
        }
 
-       intel_ring_init_seqno(ring, ring->last_submitted_seqno);
+       intel_engine_init_seqno(ring, ring->last_submitted_seqno);
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2576,7 +2576,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
                i915_gem_object_retire_request(obj, from_req);
        } else {
-               int idx = intel_ring_sync_index(from, to);
+               int idx = intel_engine_sync_index(from, to);
                u32 seqno = i915_gem_request_get_seqno(from_req);
 
                WARN_ON(!to_req);
@@ -2794,7 +2794,7 @@ int i915_gpu_idle(struct drm_device *dev)
                                return ret;
                }
 
-               ret = intel_ring_idle(ring);
+               ret = intel_engine_idle(ring);
                if (ret)
                        return ret;
        }
@@ -4180,13 +4180,13 @@ int i915_gem_init_rings(struct drm_device *dev)
        return 0;
 
 cleanup_vebox_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+       intel_engine_cleanup(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+       intel_engine_cleanup(&dev_priv->ring[BCS]);
 cleanup_bsd_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+       intel_engine_cleanup(&dev_priv->ring[VCS]);
 cleanup_render_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+       intel_engine_cleanup(&dev_priv->ring[RCS]);
 
        return ret;
 }
@@ -4341,8 +4341,8 @@ int i915_gem_init(struct drm_device *dev)
        if (!i915.enable_execlists) {
                dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
                dev_priv->gt.init_rings = i915_gem_init_rings;
-               dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
-               dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+               dev_priv->gt.cleanup_ring = intel_engine_cleanup;
+               dev_priv->gt.stop_ring = intel_engine_stop;
        } else {
                dev_priv->gt.execbuf_submit = intel_execlists_submission;
                dev_priv->gt.init_rings = intel_logical_rings_init;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 5b4e77a80c19..ac2e205fe3b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -610,7 +610,7 @@ static inline bool should_skip_switch(struct 
intel_engine_cs *ring,
                return false;
 
        if (to->ppgtt && from == to &&
-           !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+           !(intel_engine_flag(ring) & to->ppgtt->pd_dirty_rings))
                return true;
 
        return false;
@@ -691,7 +691,7 @@ static int do_switch(struct drm_i915_gem_request *req)
                        goto unpin_out;
 
                /* Doing a PD load always reloads the page dirs */
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
+               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
        }
 
        if (engine->id != RCS) {
@@ -719,9 +719,9 @@ static int do_switch(struct drm_i915_gem_request *req)
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
        } else if (to->ppgtt &&
-                  (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
+                  (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
                hw_flags |= MI_FORCE_RESTORE;
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
+               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
        }
 
        /* We should never emit switch_mm more than once */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a0f5a997c2f2..b7c90072f7d4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -942,7 +942,7 @@ static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(req->engine);
+       const unsigned other_rings = ~intel_engine_flag(req->engine);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -972,7 +972,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request 
*req,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return intel_ring_invalidate_all_caches(req);
+       return intel_engine_invalidate_all_caches(req);
 }
 
 static bool
@@ -1443,7 +1443,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        } else
                ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
 
-       if (!intel_ring_initialized(ring)) {
+       if (!intel_engine_initialized(ring)) {
                DRM_DEBUG("execbuf with invalid ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 4cc64d9cca12..54834ad1bf5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -90,7 +90,7 @@ static void i915_fence_timeline_value_str(struct fence 
*fence, char *str,
                                          int size)
 {
        snprintf(str, size, "%u",
-                intel_ring_get_seqno(to_i915_request(fence)->engine));
+                intel_engine_get_seqno(to_i915_request(fence)->engine));
 }
 
 static void i915_fence_release(struct fence *fence)
@@ -136,7 +136,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 
seqno)
 
        /* Carefully retire all requests without writing to the rings */
        for_each_ring(ring, dev_priv, i) {
-               ret = intel_ring_idle(ring);
+               ret = intel_engine_idle(ring);
                if (ret)
                        return ret;
        }
@@ -144,7 +144,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 
seqno)
 
        /* Finally reset hw state */
        for_each_ring(ring, dev_priv, i) {
-               intel_ring_init_seqno(ring, seqno);
+               intel_engine_init_seqno(ring, seqno);
 
                for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
                        ring->semaphore.sync_seqno[j] = 0;
@@ -429,7 +429,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
                if (i915.enable_execlists)
                        ret = logical_ring_flush_all_caches(request);
                else
-                       ret = intel_ring_flush_all_caches(request);
+                       ret = intel_engine_flush_all_caches(request);
                /* Not allowed to fail! */
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h 
b/drivers/gpu/drm/i915/i915_gem_request.h
index bd17e3a9a71d..cd4412f6e7e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -198,13 +198,13 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 
 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(intel_ring_get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(intel_ring_get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->fence.seqno);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index b47ca1b7041f..f27d6d1b64d6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -221,7 +221,7 @@ static void print_error_buffers(struct 
drm_i915_error_state_buf *m,
        }
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action 
a)
 {
        switch (a) {
        case HANGCHECK_IDLE:
@@ -841,7 +841,7 @@ static void gen8_record_semaphore_state(struct 
drm_i915_private *dev_priv,
                signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
                                / 4;
                tmp = error->semaphore_obj->pages[0];
-               idx = intel_ring_sync_index(ring, to);
+               idx = intel_engine_sync_index(ring, to);
 
                ering->semaphore_mboxes[idx] = tmp[signal_offset];
                ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
@@ -901,8 +901,8 @@ static void i915_record_ring_state(struct drm_device *dev,
 
        ering->waiting = intel_engine_has_waiter(ring);
        ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
-       ering->acthd = intel_ring_get_active_head(ring);
-       ering->seqno = intel_ring_get_seqno(ring);
+       ering->acthd = intel_engine_get_active_head(ring);
+       ering->seqno = intel_engine_get_seqno(ring);
        ering->start = I915_READ_START(ring);
        ering->head = I915_READ_HEAD(ring);
        ering->tail = I915_READ_TAIL(ring);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index b47e630e048a..39ccfa8934e3 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -510,7 +510,7 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
                                  struct drm_i915_gem_request *rq)
 {
-       enum intel_ring_id ring_id = rq->engine->id;
+       enum intel_engine_id ring_id = rq->engine->id;
        struct guc_wq_item *wqi;
        void *base;
        u32 tail, wq_len, wq_off, space;
@@ -565,7 +565,7 @@ static int guc_add_workqueue_item(struct i915_guc_client 
*gc,
 /* Update the ringbuffer pointer in a saved context image */
 static void lr_context_update(struct drm_i915_gem_request *rq)
 {
-       enum intel_ring_id ring_id = rq->engine->id;
+       enum intel_engine_id ring_id = rq->engine->id;
        struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
        struct drm_i915_gem_object *rb_obj = rq->ring->obj;
        struct page *page;
@@ -594,7 +594,7 @@ int i915_guc_submit(struct i915_guc_client *client,
                    struct drm_i915_gem_request *rq)
 {
        struct intel_guc *guc = client->guc;
-       enum intel_ring_id ring_id = rq->engine->id;
+       enum intel_engine_id ring_id = rq->engine->id;
        int q_ret, b_ret;
 
        /* Need this because of the deferred pin ctx and ring */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ce52d7d9ad91..ce047ac84f5f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2896,7 +2896,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       if (i915_seqno_passed(intel_ring_get_seqno(signaller), seqno))
+       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -2945,7 +2945,7 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
        return stuck;
 }
 
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
 head_stuck(struct intel_engine_cs *ring, u64 acthd)
 {
        if (acthd != ring->hangcheck.acthd) {
@@ -2968,12 +2968,12 @@ head_stuck(struct intel_engine_cs *ring, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *ring, u64 acthd)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum intel_ring_hangcheck_action ha;
+       enum intel_engine_hangcheck_action ha;
        u32 tmp;
 
        ha = head_stuck(ring, acthd);
@@ -3053,8 +3053,8 @@ static void i915_hangcheck_elapsed(struct work_struct 
*work)
 
                semaphore_clear_deadlocks(dev_priv);
 
-               acthd = intel_ring_get_active_head(ring);
-               seqno = intel_ring_get_seqno(ring);
+               acthd = intel_engine_get_active_head(ring);
+               seqno = intel_engine_get_seqno(ring);
                user_interrupts = READ_ONCE(ring->user_interrupts);
 
                if (ring->hangcheck.seqno == seqno) {
@@ -3091,8 +3091,8 @@ static void i915_hangcheck_elapsed(struct work_struct 
*work)
                                 * being repeatedly kicked and so responsible
                                 * for stalling the machine.
                                 */
-                               ring->hangcheck.action = ring_stuck(ring,
-                                                                   acthd);
+                               ring->hangcheck.action =
+                                       engine_stuck(ring, acthd);
 
                                switch (ring->hangcheck.action) {
                                case HANGCHECK_IDLE:
diff --git a/drivers/gpu/drm/i915/i915_trace.h 
b/drivers/gpu/drm/i915/i915_trace.h
index 0204ff72b3e4..95cab4776401 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -569,7 +569,7 @@ TRACE_EVENT(i915_gem_request_notify,
            TP_fast_assign(
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->seqno = intel_ring_get_seqno(ring);
+                          __entry->seqno = intel_engine_get_seqno(ring);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c 
b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5ba8b4cd8a18..b9366e6ca5ad 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -141,7 +141,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
                           struct intel_wait *wait)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       u32 seqno = intel_ring_get_seqno(engine);
+       u32 seqno = intel_engine_get_seqno(engine);
        struct rb_node **p, *parent, *completed;
        bool first;
 
@@ -283,7 +283,7 @@ void intel_engine_remove_wait(struct intel_engine_cs 
*engine,
                         * the first_waiter. This is undesirable if that
                         * waiter is a high priority task.
                         */
-                       u32 seqno = intel_ring_get_seqno(engine);
+                       u32 seqno = intel_engine_get_seqno(engine);
                        while (i915_seqno_passed(seqno,
                                                 to_wait(next)->seqno)) {
                                struct rb_node *n = rb_next(next);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 65beb7267d1a..92ae7bc532ed 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -637,7 +637,7 @@ static int logical_ring_invalidate_all_caches(struct 
drm_i915_gem_request *req)
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                                 struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(req->engine);
+       const unsigned other_rings = ~intel_engine_flag(req->engine);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -843,10 +843,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(ring))
                return;
 
-       ret = intel_ring_idle(ring);
+       ret = intel_engine_idle(ring);
        if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          ring->name, ret);
@@ -1455,7 +1455,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
        if (req->ctx->ppgtt &&
-           (intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
+           (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) 
{
                if (!USES_FULL_48BIT_PPGTT(req->i915) &&
                    !intel_vgpu_active(req->i915->dev)) {
                        ret = intel_logical_ring_emit_pdps(req);
@@ -1463,7 +1463,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
                                return ret;
                }
 
-               req->ctx->ppgtt->pd_dirty_rings &= 
~intel_ring_flag(req->engine);
+               req->ctx->ppgtt->pd_dirty_rings &= 
~intel_engine_flag(req->engine);
        }
 
        ret = intel_ring_begin(req, 4);
@@ -1714,14 +1714,11 @@ static int gen8_init_rcs_context(struct 
drm_i915_gem_request *req)
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv;
-
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(ring))
                return;
 
-       dev_priv = ring->dev->dev_private;
-
        if (ring->buffer) {
+               struct drm_i915_private *dev_priv = ring->i915;
                intel_logical_ring_stop(ring);
                WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
        }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index 039c7405f640..61e1704d7313 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -159,7 +159,7 @@ static bool get_mocs_settings(struct drm_i915_private 
*dev_priv,
        return result;
 }
 
-static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
 {
        switch (ring) {
        case RCS:
@@ -191,7 +191,7 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, 
int index)
  */
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
                                   const struct drm_i915_mocs_table *table,
-                                  enum intel_ring_id id)
+                                  enum intel_engine_id id)
 {
        struct intel_ringbuffer *ring = req->ring;
        unsigned int index;
@@ -318,7 +318,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request 
*req)
 
        if (get_mocs_settings(req->i915, &t)) {
                struct intel_engine_cs *ring;
-               enum intel_ring_id ring_id;
+               enum intel_engine_id ring_id;
 
                /* Program the control registers */
                for_each_ring(ring, req->i915, ring_id) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c437b61ac1d0..1bb9f376aa0b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -425,16 +425,16 @@ static void ring_write_tail(struct intel_engine_cs *ring,
        I915_WRITE_TAIL(ring, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u64 acthd;
 
-       if (INTEL_INFO(ring->dev)->gen >= 8)
-               acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-                                        RING_ACTHD_UDW(ring->mmio_base));
-       else if (INTEL_INFO(ring->dev)->gen >= 4)
-               acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+       if (INTEL_INFO(dev_priv)->gen >= 8)
+               acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+                                        RING_ACTHD_UDW(engine->mmio_base));
+       else if (INTEL_INFO(dev_priv)->gen >= 4)
+               acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
 
@@ -697,7 +697,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
                return 0;
 
        req->engine->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(req);
+       ret = intel_engine_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -715,7 +715,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        intel_ring_advance(ring);
 
        req->engine->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(req);
+       ret = intel_engine_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -2028,21 +2028,19 @@ static int intel_init_engine(struct drm_device *dev,
        return 0;
 
 error:
-       intel_cleanup_ring_buffer(engine);
+       intel_engine_cleanup(engine);
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_engine_cleanup(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv;
-
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(ring))
                return;
 
-       dev_priv = to_i915(ring->dev);
-
        if (ring->buffer) {
-               intel_stop_ring_buffer(ring);
+               struct drm_i915_private *dev_priv = ring->i915;
+
+               intel_engine_stop(ring);
                WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & 
MODE_IDLE) == 0);
 
                intel_unpin_ringbuffer_obj(ring->buffer);
@@ -2062,7 +2060,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs 
*ring)
        ring->dev = NULL;
 }
 
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *req;
 
@@ -2265,7 +2263,7 @@ int intel_ring_cacheline_align(struct 
drm_i915_gem_request *req)
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_engine_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2834,7 +2832,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 }
 
 int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
+intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
        int ret;
@@ -2853,7 +2851,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request 
*req)
 }
 
 int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
        uint32_t flush_domains;
@@ -2874,14 +2872,14 @@ intel_ring_invalidate_all_caches(struct 
drm_i915_gem_request *req)
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_engine_stop(struct intel_engine_cs *ring)
 {
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(ring))
                return;
 
-       ret = intel_ring_idle(ring);
+       ret = intel_engine_idle(ring);
        if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          ring->name, ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6bd9b356c95d..6803e4820688 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -75,7 +75,7 @@ struct  intel_hw_status_page {
        ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
        } while(0)
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
        HANGCHECK_IDLE = 0,
        HANGCHECK_WAIT,
        HANGCHECK_ACTIVE,
@@ -86,13 +86,13 @@ enum intel_ring_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
        u64 acthd;
        u64 max_acthd;
        u32 seqno;
        unsigned user_interrupts;
        int score;
-       enum intel_ring_hangcheck_action action;
+       enum intel_engine_hangcheck_action action;
        int deadlock;
        u32 instdone[I915_NUM_INSTDONE_REG];
 };
@@ -148,9 +148,9 @@ struct  i915_ctx_workarounds {
 
 struct drm_i915_gem_request;
 
-struct  intel_engine_cs {
+struct intel_engine_cs {
        const char      *name;
-       enum intel_ring_id {
+       enum intel_engine_id {
                RCS = 0x0,
                VCS,
                BCS,
@@ -337,7 +337,7 @@ struct  intel_engine_cs {
        struct intel_context *default_context;
        struct intel_context *last_context;
 
-       struct intel_ring_hangcheck hangcheck;
+       struct intel_engine_hangcheck hangcheck;
 
        struct {
                struct drm_i915_gem_object *obj;
@@ -380,20 +380,20 @@ struct  intel_engine_cs {
 };
 
 static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_engine_initialized(struct intel_engine_cs *ring)
 {
        return ring->dev != NULL;
 }
 
 static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_engine_flag(struct intel_engine_cs *ring)
 {
        return 1 << ring->id;
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
-                     struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *ring,
+                       struct intel_engine_cs *other)
 {
        int idx;
 
@@ -461,8 +461,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_engine_stop(struct intel_engine_cs *ring);
+void intel_engine_cleanup(struct intel_engine_cs *ring);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
@@ -487,10 +487,10 @@ int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
 
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+int __must_check intel_engine_idle(struct intel_engine_cs *ring);
+void intel_engine_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);
 int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -501,8 +501,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
+u64 intel_engine_get_active_head(struct intel_engine_cs *ring);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *ring)
 {
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to