From: Matthew Brost <[email protected]>

Preempt, tlb invalidation, and OA fences now use embedded fence lock.

Signed-off-by: Matthew Brost <[email protected]>
---
 drivers/gpu/drm/xe/xe_oa.c                  | 5 +----
 drivers/gpu/drm/xe/xe_preempt_fence.c       | 3 +--
 drivers/gpu/drm/xe/xe_preempt_fence_types.h | 2 --
 drivers/gpu/drm/xe/xe_tlb_inval.c           | 5 +----
 drivers/gpu/drm/xe/xe_tlb_inval_types.h     | 2 --
 5 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 7a13a7bd99a6..41d081781060 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -112,8 +112,6 @@ struct xe_oa_config_bo {
 struct xe_oa_fence {
        /* @base: dma fence base */
        struct dma_fence base;
-       /* @lock: lock for the fence */
-       spinlock_t lock;
        /* @work: work to signal @base */
        struct delayed_work work;
        /* @cb: callback to schedule @work */
@@ -1017,8 +1015,7 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream 
*stream, struct xe_oa_config
        }
 
        /* Point of no return: initialize and set fence to signal */
-       spin_lock_init(&ofence->lock);
-       dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+       dma_fence_init(&ofence->base, &xe_oa_fence_ops, NULL, 0, 0);
 
        for (i = 0; i < stream->num_syncs; i++) {
                if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c 
b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7f587ca3947d..75f433aee747 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -145,9 +145,8 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, 
struct xe_exec_queue *q,
 {
        list_del_init(&pfence->link);
        pfence->q = xe_exec_queue_get(q);
-       spin_lock_init(&pfence->lock);
        dma_fence_init(&pfence->base, &preempt_fence_ops,
-                     &pfence->lock, context, seqno);
+                      NULL, context, seqno);
 
        return &pfence->base;
 }
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h 
b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
index ac125c697a41..a98de8d1c723 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
@@ -25,8 +25,6 @@ struct xe_preempt_fence {
        struct xe_exec_queue *q;
        /** @preempt_work: work struct which issues preemption */
        struct work_struct preempt_work;
-       /** @lock: dma-fence fence lock */
-       spinlock_t lock;
        /** @error: preempt fence is in error state */
        int error;
 };
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c 
b/drivers/gpu/drm/xe/xe_tlb_inval.c
index 918a59e686ea..5c23e76b0241 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval.c
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -133,7 +133,6 @@ int xe_gt_tlb_inval_init_early(struct xe_gt *gt)
        tlb_inval->seqno = 1;
        INIT_LIST_HEAD(&tlb_inval->pending_fences);
        spin_lock_init(&tlb_inval->pending_lock);
-       spin_lock_init(&tlb_inval->lock);
        INIT_DELAYED_WORK(&tlb_inval->fence_tdr, xe_tlb_inval_fence_timeout);
 
        err = drmm_mutex_init(&xe->drm, &tlb_inval->seqno_lock);
@@ -420,10 +419,8 @@ void xe_tlb_inval_fence_init(struct xe_tlb_inval 
*tlb_inval,
 {
        xe_pm_runtime_get_noresume(tlb_inval->xe);
 
-       spin_lock_irq(&tlb_inval->lock);
-       dma_fence_init(&fence->base, &inval_fence_ops, &tlb_inval->lock,
+       dma_fence_init(&fence->base, &inval_fence_ops, NULL,
                       dma_fence_context_alloc(1), 1);
-       spin_unlock_irq(&tlb_inval->lock);
        INIT_LIST_HEAD(&fence->link);
        if (stack)
                set_bit(FENCE_STACK_BIT, &fence->base.flags);
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_types.h 
b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
index 8f8b060e9005..80e893950099 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval_types.h
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
@@ -104,8 +104,6 @@ struct xe_tlb_inval {
        struct delayed_work fence_tdr;
        /** @job_wq: schedules TLB invalidation jobs */
        struct workqueue_struct *job_wq;
-       /** @tlb_inval.lock: protects TLB invalidation fences */
-       spinlock_t lock;
 };
 
 /**
-- 
2.43.0

Reply via email to