Some arm64 platforms have slow per-CPU atomic operations, for example, the Neoverse V2. This commit therefore moves SRCU-fast from per-CPU atomic operations to interrupt-disabled non-read-modify-write-atomic atomic_read()/atomic_set() operations. This works because SRCU-fast-updown is not invoked from read-side primitives, which means that if srcu_read_unlock_fast() NMI handlers. This means that srcu_read_lock_fast_updown() and srcu_read_unlock_fast_updown() can exclude themselves and each other
This reduces the overhead of calls to srcu_read_lock_fast_updown() and srcu_read_unlock_fast_updown() from about 100ns to about 12ns on an ARM Neoverse V2. Although this is not excellent compared to about 2ns on x86, it sure beats 100ns. This command was used to measure the overhead: tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --configs NOPREEMPT --kconfig "CONFIG_NR_CPUS=64 CONFIG_TASKS_TRACE_RCU=y" --bootargs "refscale.loops=100000 refscale.guest_os_delay=5 refscale.nreaders=64 refscale.holdoff=30 torture.disable_onoff_at_boot refscale.scale_type=srcu-fast-updown refscale.verbose_batched=8 torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=8 refscale.nruns=100" --trust-make Signed-off-by: Paul E. McKenney <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Cc: <[email protected]> Cc: <[email protected]> --- include/linux/srcutree.h | 56 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index d6f978b50472..70560dc4636c 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -253,6 +253,34 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss return &ssp->sda->srcu_ctrs[idx]; } +/* + * Non-atomic manipulation of SRCU lock counters. + */ +static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_na(struct srcu_struct *ssp) +{ + atomic_long_t *scnp; + struct srcu_ctr __percpu *scp; + + lockdep_assert_preemption_disabled(); + scp = READ_ONCE(ssp->srcu_ctrp); + scnp = raw_cpu_ptr(&scp->srcu_locks); + atomic_long_set(scnp, atomic_long_read(scnp) + 1); + return scp; +} + +/* + * Non-atomic manipulation of SRCU unlock counters. + */ +static inline void notrace +__srcu_read_unlock_fast_na(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) +{ + atomic_long_t *scnp; + + lockdep_assert_preemption_disabled(); + scnp = raw_cpu_ptr(&scp->srcu_unlocks); + atomic_long_set(scnp, atomic_long_read(scnp) + 1); +} + /* * Counts the new reader in the appropriate per-CPU element of the * srcu_struct. Returns a pointer that must be passed to the matching @@ -327,12 +355,23 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp) { - struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp); + struct srcu_ctr __percpu *scp; - if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) + if (IS_ENABLED(CONFIG_ARM64) && IS_ENABLED(CONFIG_ARM64_USE_LSE_PERCPU_ATOMICS)) { + unsigned long flags; + + local_irq_save(flags); + scp = __srcu_read_lock_fast_na(ssp); + local_irq_restore(flags); /* Avoids leaking the critical section. */ + return scp; + } + + scp = READ_ONCE(ssp->srcu_ctrp); + if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) { this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader. - else + } else { atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader. + } barrier(); /* Avoid leaking the critical section. */ return scp; } @@ -350,10 +389,17 @@ static inline void notrace __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) { barrier(); /* Avoid leaking the critical section. */ - if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) + if (IS_ENABLED(CONFIG_ARM64)) { + unsigned long flags; + + local_irq_save(flags); + __srcu_read_unlock_fast_na(ssp, scp); + local_irq_restore(flags); + } else if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) { this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader. - else + } else { atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader. + } } void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor); -- 2.40.1

