You are right. I will fix this case as in gen_ldst_i32(). Thanks, alvise
On Fri, Jul 17, 2015 at 11:49 AM, Alex Bennée <alex.ben...@linaro.org> wrote: > > Alvise Rigo <a.r...@virtualopensystems.com> writes: > >> Create a new pair of instructions that implement a LoadLink/StoreConditional >> mechanism. >> >> It has not been possible to completely include the two new opcodes >> in the plain variants, since the StoreConditional will always require >> one more argument to store the success of the operation. >> >> Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> >> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> >> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> >> --- >> tcg/tcg-be-ldst.h | 1 + >> tcg/tcg-op.c | 23 +++++++++++++++++++++++ >> tcg/tcg-op.h | 3 +++ >> tcg/tcg-opc.h | 4 ++++ >> tcg/tcg.c | 2 ++ >> tcg/tcg.h | 18 ++++++++++-------- >> 6 files changed, 43 insertions(+), 8 deletions(-) >> >> diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h >> index 40a2369..b3f9c51 100644 >> --- a/tcg/tcg-be-ldst.h >> +++ b/tcg/tcg-be-ldst.h >> @@ -24,6 +24,7 @@ >> >> typedef struct TCGLabelQemuLdst { >> bool is_ld; /* qemu_ld: true, qemu_st: false */ >> + TCGReg llsc_success; /* reg index for qemu_stcond outcome */ >> TCGMemOpIdx oi; >> TCGType type; /* result type of a load */ >> TCGReg addrlo_reg; /* reg index for low word of guest virtual addr >> */ >> diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c >> index 45098c3..a73b522 100644 >> --- a/tcg/tcg-op.c >> +++ b/tcg/tcg-op.c >> @@ -1885,6 +1885,15 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, >> TCGv addr, >> #endif >> } >> >> +/* An output operand to return the StoreConditional result */ >> +static void gen_stcond_i32(TCGOpcode opc, TCGv_i32 is_dirty, TCGv_i32 val, >> + TCGv addr, TCGMemOp memop, TCGArg idx) >> +{ >> + TCGMemOpIdx oi = make_memop_idx(memop, idx); >> + >> + tcg_gen_op4i_i32(opc, is_dirty, val, addr, oi); > > This breaks on 64 bit builds as TCGv addr can be 64 bit. > >> +} >> + >> static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, >> TCGMemOp memop, TCGArg idx) >> { >> @@ -1911,12 +1920,26 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, >> TCGArg idx, TCGMemOp memop) >> gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); >> } >> >> +void tcg_gen_qemu_ldlink_i32(TCGv_i32 val, TCGv addr, TCGArg idx, >> + TCGMemOp memop) >> +{ >> + memop = tcg_canonicalize_memop(memop, 0, 0); >> + gen_ldst_i32(INDEX_op_qemu_ldlink_i32, val, addr, memop, idx); >> +} >> + >> void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp >> memop) >> { >> memop = tcg_canonicalize_memop(memop, 0, 1); >> gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); >> } >> >> +void tcg_gen_qemu_stcond_i32(TCGv_i32 is_dirty, TCGv_i32 val, TCGv addr, >> + TCGArg idx, TCGMemOp memop) >> +{ >> + memop = tcg_canonicalize_memop(memop, 0, 1); >> + gen_stcond_i32(INDEX_op_qemu_stcond_i32, is_dirty, val, addr, memop, >> idx); >> +} >> + >> void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp >> memop) >> { >> if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { >> diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h >> index d1d763f..f183169 100644 >> --- a/tcg/tcg-op.h >> +++ b/tcg/tcg-op.h >> @@ -754,6 +754,9 @@ void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, >> TCGMemOp); >> void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); >> void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); >> >> +void tcg_gen_qemu_ldlink_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); >> +void tcg_gen_qemu_stcond_i32(TCGv_i32, TCGv_i32, TCGv, TCGArg, TCGMemOp); >> + >> static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) >> { >> tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB); >> diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h >> index 13ccb60..d6c0454 100644 >> --- a/tcg/tcg-opc.h >> +++ b/tcg/tcg-opc.h >> @@ -183,6 +183,10 @@ DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, >> TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) >> DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, >> TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) >> +DEF(qemu_ldlink_i32, 1, TLADDR_ARGS, 2, >> + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) >> +DEF(qemu_stcond_i32, 1, TLADDR_ARGS + 1, 2, >> + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) >> DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, >> TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) >> DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, >> diff --git a/tcg/tcg.c b/tcg/tcg.c >> index 7e088b1..8a2265e 100644 >> --- a/tcg/tcg.c >> +++ b/tcg/tcg.c >> @@ -1068,6 +1068,8 @@ void tcg_dump_ops(TCGContext *s) >> i = 1; >> break; >> case INDEX_op_qemu_ld_i32: >> + case INDEX_op_qemu_ldlink_i32: >> + case INDEX_op_qemu_stcond_i32: >> case INDEX_op_qemu_st_i32: >> case INDEX_op_qemu_ld_i64: >> case INDEX_op_qemu_st_i64: >> diff --git a/tcg/tcg.h b/tcg/tcg.h >> index 8ca85ab..d41a18c 100644 >> --- a/tcg/tcg.h >> +++ b/tcg/tcg.h >> @@ -282,6 +282,8 @@ typedef enum TCGMemOp { >> MO_TEQ = MO_TE | MO_Q, >> >> MO_SSIZE = MO_SIZE | MO_SIGN, >> + >> + MO_EXCL = 32, /* Set for exclusive memory access */ >> } TCGMemOp; >> >> typedef tcg_target_ulong TCGArg; >> @@ -964,13 +966,13 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, >> target_ulong addr, >> TCGMemOpIdx oi, uintptr_t retaddr); >> /* Exclusive variants */ >> tcg_target_ulong helper_ret_ldlinkub_mmu(CPUArchState *env, target_ulong >> addr, >> - int mmu_idx, uintptr_t retaddr); >> + TCGMemOpIdx oi, uintptr_t >> retaddr); >> tcg_target_ulong helper_le_ldlinkuw_mmu(CPUArchState *env, target_ulong >> addr, >> - int mmu_idx, uintptr_t retaddr); >> + TCGMemOpIdx oi, uintptr_t >> retaddr); >> tcg_target_ulong helper_le_ldlinkul_mmu(CPUArchState *env, target_ulong >> addr, >> - int mmu_idx, uintptr_t retaddr); >> + TCGMemOpIdx oi, uintptr_t >> retaddr); >> uint64_t helper_le_ldlinkq_mmu(CPUArchState *env, target_ulong addr, >> - int mmu_idx, uintptr_t retaddr); >> + TCGMemOpIdx oi, uintptr_t >> retaddr); >> >> /* Value sign-extended to tcg register size. */ >> tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, >> @@ -1000,13 +1002,13 @@ void helper_be_stq_mmu(CPUArchState *env, >> target_ulong addr, uint64_t val, >> TCGMemOpIdx oi, uintptr_t retaddr); >> /* Exclusive variants */ >> tcg_target_ulong helper_ret_stcondb_mmu(CPUArchState *env, target_ulong >> addr, >> - uint8_t val, int mmu_idx, uintptr_t >> retaddr); >> + uint8_t val, TCGMemOpIdx oi, uintptr_t retaddr); >> tcg_target_ulong helper_le_stcondw_mmu(CPUArchState *env, target_ulong addr, >> - uint16_t val, int mmu_idx, uintptr_t >> retaddr); >> + uint16_t val, TCGMemOpIdx oi, uintptr_t >> retaddr); >> tcg_target_ulong helper_le_stcondl_mmu(CPUArchState *env, target_ulong addr, >> - uint32_t val, int mmu_idx, uintptr_t >> retaddr); >> + uint32_t val, TCGMemOpIdx oi, uintptr_t >> retaddr); >> uint64_t helper_le_stcondq_mmu(CPUArchState *env, target_ulong addr, >> - uint64_t val, int mmu_idx, uintptr_t >> retaddr); >> + uint64_t val, TCGMemOpIdx oi, uintptr_t >> retaddr); >> >> /* Temporary aliases until backends are converted. */ >> #ifdef TARGET_WORDS_BIGENDIAN > > -- > Alex Bennée