There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait().

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: <linux-i...@vger.kernel.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Alan Stern <st...@rowland.harvard.edu>
Cc: Andrea Parri <parri.and...@gmail.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
---
 arch/ia64/include/asm/spinlock.h | 21 ---------------------
 1 file changed, 21 deletions(-)

diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index ca9e76149a4a..df2c121164b8 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -76,22 +76,6 @@ static __always_inline void 
__ticket_spin_unlock(arch_spinlock_t *lock)
        ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 }
 
-static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       int     *p = (int *)&lock->lock, ticket;
-
-       ia64_invala();
-
-       for (;;) {
-               asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : 
"memory");
-               if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
-                       return;
-               cpu_relax();
-       }
-
-       smp_acquire__after_ctrl_dep();
-}
-
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
        long tmp = ACCESS_ONCE(lock->lock);
@@ -143,11 +127,6 @@ static __always_inline void 
arch_spin_lock_flags(arch_spinlock_t *lock,
        arch_spin_lock(lock);
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       __ticket_spin_unlock_wait(lock);
-}
-
 #define arch_read_can_lock(rw)         (*(volatile int *)(rw) >= 0)
 #define arch_write_can_lock(rw)        (*(volatile int *)(rw) == 0)
 
-- 
2.5.2

Reply via email to