@@ -394,6 +394,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
#define MMUSUFFIX _mmu
+/* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
+#define GEN_EXCLUSIVE_HELPERS
#define SHIFT 0
#include "softmmu_template.h"
@@ -406,6 +408,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
#define SHIFT 3
#include "softmmu_template.h"
#undef MMUSUFFIX
+#undef GEN_EXCLUSIVE_HELPERS
#define MMUSUFFIX _cmmu
#undef GETPC_ADJ
new file mode 100644
@@ -0,0 +1,155 @@
+/*
+ * Software MMU support (esclusive load/store operations)
+ *
+ * Generate helpers used by TCG for qemu_ldlink/stcond ops.
+ *
+ * Included from softmmu_template.h only.
+ *
+ * Copyright (c) 2015 Virtual Open Systems
+ *
+ * Authors:
+ * Alvise Rigo <a.rigo@virtualopensystems.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * TODO configurations not implemented:
+ * - Signed/Unsigned Big-endian
+ * - Signed Little-endian
+ * */
+
+#if DATA_SIZE > 1
+#define helper_le_ldlink_name glue(glue(helper_le_ldlink, USUFFIX), MMUSUFFIX)
+#define helper_le_stcond_name glue(glue(helper_le_stcond, SUFFIX), MMUSUFFIX)
+#else
+#define helper_le_ldlink_name glue(glue(helper_ret_ldlink, USUFFIX), MMUSUFFIX)
+#define helper_le_stcond_name glue(glue(helper_ret_stcond, SUFFIX), MMUSUFFIX)
+#endif
+
+/* helpers from cpu_ldst.h, byte-order independent versions */
+#if DATA_SIZE > 1
+#define helper_ld_legacy glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
+#define helper_st_legacy glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
+#else
+#define helper_ld_legacy glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+#define helper_st_legacy glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
+#endif
+
+#define is_write_tlb_entry_set(env, page, index) \
+({ \
+ (addr & TARGET_PAGE_MASK) \
+ == ((env->tlb_table[mmu_idx][index].addr_write) & \
+ (TARGET_PAGE_MASK | TLB_INVALID_MASK)); \
+}) \
+
+#define EXCLUSIVE_RESET_ADDR ULLONG_MAX
+
+WORD_TYPE helper_le_ldlink_name(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ WORD_TYPE ret;
+ int index;
+ CPUState *cpu;
+ hwaddr hw_addr;
+ unsigned mmu_idx = get_mmuidx(oi);
+
+ /* Use the proper load helper from cpu_ldst.h */
+ ret = helper_ld_legacy(env, addr, mmu_idx, retaddr);
+
+ /* The last legacy access ensures that the TLB and IOTLB entry for 'addr'
+ * have been created. */
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+
+ /* hw_addr = hwaddr of the page (i.e. section->mr->ram_addr + xlat)
+ * plus the offset (i.e. addr & ~TARGET_PAGE_MASK) */
+ hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr;
+
+ /* Set the exclusive-protected hwaddr. */
+ env->excl_protected_hwaddr = hw_addr;
+ env->ll_sc_context = true;
+
+ /* No need to mask hw_addr with TARGET_PAGE_MASK since
+ * cpu_physical_memory_excl_is_dirty() will take care of that. */
+ if (cpu_physical_memory_excl_is_dirty(hw_addr)) {
+ cpu_physical_memory_clear_excl_dirty(hw_addr);
+
+ /* Invalidate the TLB entry for the other processors. The next TLB
+ * entries for this page will have the TLB_EXCL flag set. */
+ CPU_FOREACH(cpu) {
+ if (cpu != current_cpu) {
+ tlb_flush(cpu, 1);
+ }
+ }
+ }
+
+ /* For this vCPU, just update the TLB entry, no need to flush. */
+ env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
+
+ return ret;
+}
+
+WORD_TYPE helper_le_stcond_name(CPUArchState *env, target_ulong addr,
+ DATA_TYPE val, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ WORD_TYPE ret;
+ int index;
+ hwaddr hw_addr;
+ unsigned mmu_idx = get_mmuidx(oi);
+
+ /* If the TLB entry is not the right one, create it. */
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ if (!is_write_tlb_entry_set(env, addr, index)) {
+ tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
+
+ /* hw_addr = hwaddr of the page (i.e. section->mr->ram_addr + xlat)
+ * plus the offset (i.e. addr & ~TARGET_PAGE_MASK) */
+ hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr;
+
+ if (!env->ll_sc_context) {
+ /* No LoakLink has been set, the StoreCond has to fail. */
+ return 1;
+ }
+
+ env->ll_sc_context = 0;
+
+ if (cpu_physical_memory_excl_is_dirty(hw_addr)) {
+ /* Another vCPU has accessed the memory after the LoadLink. */
+ ret = 1;
+ } else {
+ helper_st_legacy(env, addr, val, mmu_idx, retaddr);
+
+ /* The StoreConditional succeeded */
+ ret = 0;
+ }
+
+ env->tlb_table[mmu_idx][index].addr_write &= ~TLB_EXCL;
+ env->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR;
+ /* It's likely that the page will be used again for exclusive accesses,
+ * for this reason we don't flush any TLB cache at the price of some
+ * additional slow paths and we don't set the page bit as dirty.
+ * The EXCL TLB entries will not remain there forever since they will be
+ * eventually removed to serve another guest page; when this happens we
+ * remove also the dirty bit (see cputlb.c).
+ * */
+
+ return ret;
+}
+
+#undef helper_le_ldlink_name
+#undef helper_le_stcond_name
+#undef helper_ld_legacy
+#undef helper_st_legacy
@@ -601,6 +601,10 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
+#ifdef GEN_EXCLUSIVE_HELPERS
+#include "softmmu_llsc_template.h"
+#endif
+
#undef READ_ACCESS_TYPE
#undef SHIFT
#undef DATA_TYPE
@@ -955,6 +955,15 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
+/* Exclusive variants */
+tcg_target_ulong helper_ret_ldlinkub_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldlinkuw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldlinkul_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint64_t helper_le_ldlinkq_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
@@ -982,6 +991,15 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
+/* Exclusive variants */
+tcg_target_ulong helper_ret_stcondb_mmu(CPUArchState *env, target_ulong addr,
+ uint8_t val, int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_stcondw_mmu(CPUArchState *env, target_ulong addr,
+ uint16_t val, int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_stcondl_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t val, int mmu_idx, uintptr_t retaddr);
+uint64_t helper_le_stcondq_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t val, int mmu_idx, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
The new helpers rely on the legacy ones to perform the actual read/write. The StoreConditional helper (helper_le_stcond_name) returns 1 if the store has to fail due to a concurrent access to the same page by another vCPU. A 'concurrent access' can be a store made by *any* vCPU (although, some implementations allow stores made by the CPU that issued the LoadLink). These helpers also update the TLB entry of the page involved in the LL/SC, so that all the following accesses made by any vCPU will follow the slow path. In real multi-threading, these helpers will require to temporarily pause the execution of the other vCPUs in order to update accordingly (flush) the TLB cache. Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- cputlb.c | 3 + softmmu_llsc_template.h | 155 ++++++++++++++++++++++++++++++++++++++++++++++++ softmmu_template.h | 4 ++ tcg/tcg.h | 18 ++++++ 4 files changed, 180 insertions(+) create mode 100644 softmmu_llsc_template.h