@@ -24,6 +24,7 @@
typedef struct TCGLabelQemuLdst {
bool is_ld; /* qemu_ld: true, qemu_st: false */
+ TCGReg llsc_success; /* reg index for qemu_stcond outcome */
TCGMemOpIdx oi;
TCGType type; /* result type of a load */
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
@@ -1885,6 +1885,15 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
#endif
}
+/* An output operand to return the StoreConditional result */
+static void gen_stcond_i32(TCGOpcode opc, TCGv_i32 is_dirty, TCGv_i32 val,
+ TCGv addr, TCGMemOp memop, TCGArg idx)
+{
+ TCGMemOpIdx oi = make_memop_idx(memop, idx);
+
+ tcg_gen_op4i_i32(opc, is_dirty, val, addr, oi);
+}
+
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
TCGMemOp memop, TCGArg idx)
{
@@ -1911,12 +1920,26 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
}
+void tcg_gen_qemu_ldlink_i32(TCGv_i32 val, TCGv addr, TCGArg idx,
+ TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+ gen_ldst_i32(INDEX_op_qemu_ldlink_i32, val, addr, memop, idx);
+}
+
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 0, 1);
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
}
+void tcg_gen_qemu_stcond_i32(TCGv_i32 is_dirty, TCGv_i32 val, TCGv addr,
+ TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 1);
+ gen_stcond_i32(INDEX_op_qemu_stcond_i32, is_dirty, val, addr, memop, idx);
+}
+
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
@@ -754,6 +754,9 @@ void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
+void tcg_gen_qemu_ldlink_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
+void tcg_gen_qemu_stcond_i32(TCGv_i32, TCGv_i32, TCGv, TCGArg, TCGMemOp);
+
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
@@ -183,6 +183,10 @@ DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ldlink_i32, 1, TLADDR_ARGS, 2,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_stcond_i32, 1, TLADDR_ARGS + 1, 2,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
@@ -1068,6 +1068,8 @@ void tcg_dump_ops(TCGContext *s)
i = 1;
break;
case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ldlink_i32:
+ case INDEX_op_qemu_stcond_i32:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
@@ -282,6 +282,8 @@ typedef enum TCGMemOp {
MO_TEQ = MO_TE | MO_Q,
MO_SSIZE = MO_SIZE | MO_SIGN,
+
+ MO_EXCL = 32, /* Set for exclusive memory access */
} TCGMemOp;
typedef tcg_target_ulong TCGArg;
@@ -957,13 +959,13 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
/* Exclusive variants */
tcg_target_ulong helper_ret_ldlinkub_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+ TCGMemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldlinkuw_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+ TCGMemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldlinkul_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+ TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldlinkq_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+ TCGMemOpIdx oi, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
@@ -993,13 +995,13 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
/* Exclusive variants */
tcg_target_ulong helper_ret_stcondb_mmu(CPUArchState *env, target_ulong addr,
- uint8_t val, int mmu_idx, uintptr_t retaddr);
+ uint8_t val, TCGMemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_stcondw_mmu(CPUArchState *env, target_ulong addr,
- uint16_t val, int mmu_idx, uintptr_t retaddr);
+ uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_stcondl_mmu(CPUArchState *env, target_ulong addr,
- uint32_t val, int mmu_idx, uintptr_t retaddr);
+ uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_stcondq_mmu(CPUArchState *env, target_ulong addr,
- uint64_t val, int mmu_idx, uintptr_t retaddr);
+ uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
Create a new pair of instructions that implement a LoadLink/StoreConditional mechanism. It has not been possible to completely include the two new opcodes in the plain variants, since the StoreConditional will always require one more argument to store the success of the operation. Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- tcg/tcg-be-ldst.h | 1 + tcg/tcg-op.c | 23 +++++++++++++++++++++++ tcg/tcg-op.h | 3 +++ tcg/tcg-opc.h | 4 ++++ tcg/tcg.c | 2 ++ tcg/tcg.h | 18 ++++++++++-------- 6 files changed, 43 insertions(+), 8 deletions(-)