On Wed, 3 May 2023 at 08:17, Richard Henderson
<[email protected]> wrote:
>
> Signed-off-by: Richard Henderson <[email protected]>
> ---
> +/**
> + * load_atom_16:
> + * @p: host address
> + * @memop: the full memory op
> + *
> + * Load 16 bytes from @p, honoring the atomicity of @memop.
> + */
> +static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
> + void *pv, MemOp memop)
> +{
> + uintptr_t pi = (uintptr_t)pv;
> + int atmax;
> + Int128 r;
> + uint64_t a, b;
> +
> + /*
> + * If the host does not support 8-byte atomics, wait until we have
> + * examined the atomicity parameters below.
> + */
> + if (HAVE_al16_fast && likely((pi & 15) == 0)) {
> + return load_atomic16(pv);
> + }
Comment says "8-byte atomics" but code is testing for
existence of 16-byte atomics ?
> +
> + atmax = required_atomicity(env, pi, memop);
> + switch (atmax) {
> + case MO_8:
> + memcpy(&r, pv, 16);
> + return r;
> + case MO_16:
> + a = load_atom_8_by_2(pv);
> + b = load_atom_8_by_2(pv + 8);
> + break;
> + case MO_32:
> + a = load_atom_8_by_4(pv);
> + b = load_atom_8_by_4(pv + 8);
> + break;
> + case MO_64:
> + if (!HAVE_al8) {
> + cpu_loop_exit_atomic(env_cpu(env), ra);
> + }
> + a = load_atomic8(pv);
> + b = load_atomic8(pv + 8);
> + break;
> + case -MO_64:
> + if (!HAVE_al8) {
> + cpu_loop_exit_atomic(env_cpu(env), ra);
> + }
> + a = load_atom_extract_al8x2(pv);
> + b = load_atom_extract_al8x2(pv + 8);
> + break;
> + case MO_128:
> + return load_atomic16_or_exit(env, ra, pv);
> + default:
> + g_assert_not_reached();
> + }
> + return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b);
> + }
> +/**
> + * store_atomic16:
> + * @pv: host address
> + * @val: value to store
> + *
> + * Atomically store 16 aligned bytes to @pv.
> + */
> +static inline void store_atomic16(void *pv, Int128 val)
> +{
> +#if defined(CONFIG_ATOMIC128)
> + __uint128_t *pu = __builtin_assume_aligned(pv, 16);
> + Int128Alias new;
> +
> + new.s = val;
> + qatomic_set__nocheck(pu, new.u);
> +#elif defined(CONFIG_CMPXCHG128)
> + __uint128_t *pu = __builtin_assume_aligned(pv, 16);
> + __uint128_t o;
> + Int128Alias n;
> +
> + /*
> + * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
> + * defer to libatomic, so we must use __sync_val_compare_and_swap_16
> + * and accept the sequential consistency that comes with it.
> + */
> + n.s = val;
> + do {
> + o = *pu;
> + } while (!__sync_bool_compare_and_swap_16(pu, o, n.u));
Same val vs bool thing as the other patch.
Otherwise
Reviewed-by: Peter Maydell <[email protected]>
thanks
-- PMM