There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore replaces the spin_unlock_wait() calls
in nf_conntrack_lock() and nf_conntrack_all_lock() with spin_lock()
followed immediately by spin_unlock().  These functions do not appear
to be invoked on any fastpaths.

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Pablo Neira Ayuso <pa...@netfilter.org>
Cc: Jozsef Kadlecsik <kad...@blackhole.kfki.hu>
Cc: Florian Westphal <f...@strlen.de>
Cc: "David S. Miller" <da...@davemloft.net>
Cc: <netfilter-de...@vger.kernel.org>
Cc: <coret...@netfilter.org>
Cc: <netdev@vger.kernel.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Alan Stern <st...@rowland.harvard.edu>
Cc: Andrea Parri <parri.and...@gmail.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
---
 net/netfilter/nf_conntrack_core.c | 26 ++++++++------------------
 1 file changed, 8 insertions(+), 18 deletions(-)

diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index e847dbaa0c6b..9f997859d160 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -99,15 +99,11 @@ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
        spin_lock(lock);
        while (unlikely(nf_conntrack_locks_all)) {
                spin_unlock(lock);
-
-               /*
-                * Order the 'nf_conntrack_locks_all' load vs. the
-                * spin_unlock_wait() loads below, to ensure
-                * that 'nf_conntrack_locks_all_lock' is indeed held:
-                */
-               smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
-               spin_unlock_wait(&nf_conntrack_locks_all_lock);
+               /* Wait for nf_conntrack_locks_all_lock holder to release ... */
+               spin_lock(&nf_conntrack_locks_all_lock);
+               spin_unlock(&nf_conntrack_locks_all_lock);
                spin_lock(lock);
+               /* ... and retry. */
        }
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
@@ -150,17 +146,11 @@ static void nf_conntrack_all_lock(void)
 
        spin_lock(&nf_conntrack_locks_all_lock);
        nf_conntrack_locks_all = true;
-
-       /*
-        * Order the above store of 'nf_conntrack_locks_all' against
-        * the spin_unlock_wait() loads below, such that if
-        * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
-        * we must observe nf_conntrack_locks[] held:
-        */
-       smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
-
        for (i = 0; i < CONNTRACK_LOCKS; i++) {
-               spin_unlock_wait(&nf_conntrack_locks[i]);
+               /* Wait for any current holder to release lock. */
+               spin_lock(&nf_conntrack_locks[i]);
+               spin_unlock(&nf_conntrack_locks[i]);
+               /* Next acquisition will see nf_conntrack_locks_all == true. */
        }
 }
 
-- 
2.5.2

Reply via email to