@@ -534,6 +534,8 @@ DEF_HELPER_4(atomic_cmpxchg64, i32, env, i32, i64, i32)
DEF_HELPER_1(atomic_clear, void, env)
DEF_HELPER_3(atomic_claim, void, env, i32, i64)
+DEF_HELPER_4(stcond_aa32_i32, i32, env, i32, i32, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#endif
@@ -1095,3 +1095,14 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
+
+uint32_t HELPER(stcond_aa32_i32)(CPUARMState *env, uint32_t val, uint32_t addr,
+ uint32_t index)
+{
+ CPUArchState *state = env;
+ TCGMemOpIdx op;
+
+ op = make_memop_idx(MO_LEUL, index);
+
+ return helper_le_stcondl_mmu(state, addr, val, op, 0);
+}
@@ -1006,6 +1006,17 @@ static inline void gen_aa32_stex64(TCGv_i32 is_dirty, TCGv_i64 val,
#endif
+/* Use the runtime helper for 32bit exclusive stores. */
+static inline void gen_aa32_stex32(TCGv_i32 is_dirty, TCGv_i32 val,
+ TCGv_i32 addr, int index)
+{
+ TCGv index_tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(index_tmp, index);
+ gen_helper_stcond_aa32_i32(is_dirty, cpu_env, val, addr, index_tmp);
+ tcg_temp_free_i32(index_tmp);
+}
+
DO_GEN_LD(8s, MO_SB)
DO_GEN_LD(8u, MO_UB)
DO_GEN_LD(8uex, MO_UB | MO_EXCL)
@@ -1021,7 +1032,6 @@ DO_GEN_ST(32, MO_TEUL)
/* Load/Store exclusive generators (always unsigned) */
DO_GEN_STEX(8, MO_UB)
DO_GEN_STEX(16, MO_TEUW)
-DO_GEN_STEX(32, MO_TEUL)
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
{
Instead of using TCG's load and store instructions, use a runtime helper as a hook for the slow-path. This is a proof of concept to verify that this approach is actually working. At the moment only the 32bit STREX is relying on this new code-path and it's working as expected. Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- target-arm/helper.h | 2 ++ target-arm/op_helper.c | 11 +++++++++++ target-arm/translate.c | 12 +++++++++++- 3 files changed, 24 insertions(+), 1 deletion(-)