There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait().

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Ralf Baechle <r...@linux-mips.org>
Cc: <linux-m...@linux-mips.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Alan Stern <st...@rowland.harvard.edu>
Cc: Andrea Parri <parri.and...@gmail.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
---
 arch/mips/include/asm/spinlock.h | 16 ----------------
 1 file changed, 16 deletions(-)

diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index a8df44d60607..81b4945031ee 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -50,22 +50,6 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t 
lock)
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       u16 owner = READ_ONCE(lock->h.serving_now);
-       smp_rmb();
-       for (;;) {
-               arch_spinlock_t tmp = READ_ONCE(*lock);
-
-               if (tmp.h.serving_now == tmp.h.ticket ||
-                   tmp.h.serving_now != owner)
-                       break;
-
-               cpu_relax();
-       }
-       smp_acquire__after_ctrl_dep();
-}
-
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
        u32 counters = ACCESS_ONCE(lock->lock);
-- 
2.5.2

Reply via email to