Hi Paul,

On 8/5/24 12:21, Paul E. McKenney wrote:
> Use the new cmpxchg_emu_u8() to emulate one-byte cmpxchg() on arc.
>
> [ paulmck: Drop two-byte support per Arnd Bergmann feedback. ]
> [ paulmck: Apply feedback from Naresh Kamboju. ]
> [ paulmck: Apply kernel test robot feedback. ]
>
> Signed-off-by: Paul E. McKenney <paul...@kernel.org>
> Cc: Vineet Gupta <vgu...@kernel.org>
> Cc: Andi Shyti <andi.sh...@linux.intel.com>
> Cc: Andrzej Hajda <andrzej.ha...@intel.com>
> Cc: Arnd Bergmann <a...@arndb.de>
> Cc: Palmer Dabbelt <pal...@rivosinc.com>
> Cc: <linux-snps-arc@lists.infradead.org>
> ---
>  arch/arc/Kconfig               |  1 +
>  arch/arc/include/asm/cmpxchg.h | 33 ++++++++++++++++++++++++---------
>  2 files changed, 25 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
> index fd0b0a0d4686a..163608fd49d18 100644
> --- a/arch/arc/Kconfig
> +++ b/arch/arc/Kconfig
> @@ -13,6 +13,7 @@ config ARC
>       select ARCH_HAS_SETUP_DMA_OPS
>       select ARCH_HAS_SYNC_DMA_FOR_CPU
>       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> +     select ARCH_NEED_CMPXCHG_1_EMU
>       select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
>       select ARCH_32BIT_OFF_T
>       select BUILDTIME_TABLE_SORT
> diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
> index e138fde067dea..2102ce076f28b 100644
> --- a/arch/arc/include/asm/cmpxchg.h
> +++ b/arch/arc/include/asm/cmpxchg.h
> @@ -8,6 +8,7 @@
>  
>  #include <linux/build_bug.h>
>  #include <linux/types.h>
> +#include <linux/cmpxchg-emu.h>
>  
>  #include <asm/barrier.h>
>  #include <asm/smp.h>
> @@ -46,6 +47,9 @@
>       __typeof__(*(ptr)) _prev_;                                      \
>                                                                       \
>       switch(sizeof((_p_))) {                                         \
> +     case 1:                                                         \
> +             _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, 
> (uintptr_t)_o_, (uintptr_t)_n_);        \
> +             break;                                                  \
>       case 4:                                                         \
>               _prev_ = __cmpxchg(_p_, _o_, _n_);                      \
>               break;                                                  \
> @@ -65,16 +69,27 @@
>       __typeof__(*(ptr)) _prev_;                                      \
>       unsigned long __flags;                                          \
>                                                                       \
> -     BUILD_BUG_ON(sizeof(_p_) != 4);                                 \

Is this alone not sufficient: i.e. for !LLSC let the atomic op happen
under a spin-lock for non 4 byte quantities as well.

> +     switch(sizeof((_p_))) {                                         \
> +     case 1:                                                         \
> +             __flags = cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, 
> (uintptr_t)_n_);   \
> +             _prev_ = (__typeof__(*(ptr)))__flags;                   \
> +             break;                                                  \
> +             break;                                                  \

FWIW, the 2nd break seems extraneous.

> +     case 4:                                                         \
> +             /*                                                      \
> +              * spin lock/unlock provide the needed smp_mb()         \
> +              * before/after                                         \
> +              */                                                     \
> +             atomic_ops_lock(__flags);                               \
> +             _prev_ = *_p_;                                          \
> +             if (_prev_ == _o_)                                      \
> +                     *_p_ = _n_;                                     \
> +             atomic_ops_unlock(__flags);                             \
> +             break;                                                  \
> +     default:                                                        \
> +             BUILD_BUG();                                            \
> +     }                                                               \
>                                                                       \
> -     /*                                                              \
> -      * spin lock/unlock provide the needed smp_mb() before/after    \
> -      */                                                             \
> -     atomic_ops_lock(__flags);                                       \
> -     _prev_ = *_p_;                                                  \
> -     if (_prev_ == _o_)                                              \
> -             *_p_ = _n_;                                             \
> -     atomic_ops_unlock(__flags);                                     \
>       _prev_;                                                         \
>  })

-Vineet

_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to