@@ -151,10 +151,7 @@ static const int tcg_target_call_oarg_regs[1] = {
* If the guest base gets placed in high memory, it's more efficient
* to use a register to hold the address.
*/
-#ifndef CONFIG_USE_GUEST_BASE
-#define GUEST_BASE 0
-#endif
-#define USE_GUEST_BASE_REG (GUEST_BASE > 0x7fff0000)
+#define USE_GUEST_BASE_REG (guest_base > 0x7fff0000)
#define TCG_GUEST_BASE_REG TCG_REG_S5
/*
@@ -258,6 +255,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
const char *ct_str = *pct_str;
switch (ct_str[0]) {
+ case 'R':
case 'r':
/* Constaint 'r' means any register is okay. */
ct->ct |= TCG_CT_REG;
@@ -360,14 +358,14 @@ static int tcg_match_andi(tcg_target_long val)
}
}
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
if (ct & TCG_CT_CONST) {
return 1;
}
- if (ct & TCG_CT_CONST_IS32) {
+ if (type == TCG_TYPE_I32) {
val = (int32_t)val;
}
if ((ct & TCG_CT_CONST_U8) && val == (uint8_t)val) {
@@ -718,20 +716,17 @@ static void tcg_out_st_sz(TCGContext *s, TCGMemOp memop, TCGReg ra, TCGReg rb,
tcg_out_mem_long(s, st_opc[memop & MO_SIZE], ra, rb, disp);
}
-static void patch_reloc(uint8_t *x_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+static void patch_reloc(tcg_insn_unit *code_ptr, int type,
+ intptr_t value, intptr_t addend)
{
- uint32_t *code_ptr = (uint32_t *)x_ptr;
- uint32_t insn = *code_ptr;
-
- value += addend;
switch (type) {
case R_ALPHA_BRADDR:
- value -= (intptr_t)x_ptr + 4;
+ value += addend;
+ value -= (intptr_t)code_ptr + 4;
if ((value & 3) || value < -0x400000 || value >= 0x400000) {
tcg_abort();
}
- *code_ptr = (insn & ~0x1fffff) | INSN_DISP21(value >> 2);
+ *code_ptr = (*code_ptr & ~0x1fffff) | INSN_DISP21(value >> 2);
break;
default:
@@ -756,13 +751,12 @@ static inline void tcg_out_br_direct(TCGContext *s, AlphaOpcode opc, TCGReg ra,
}
static inline void tcg_out_br_label(TCGContext *s, AlphaOpcode opc, TCGReg ra,
- int label_index)
+ TCGLabel *l)
{
- TCGLabel *l = &s->labels[label_index];
if (l->has_value) {
tcg_out_br_direct(s, opc, ra, l->u.value);
} else {
- tcg_out_reloc(s, s->code_ptr, R_ALPHA_BRADDR, label_index, 0);
+ tcg_out_reloc(s, s->code_ptr, R_ALPHA_BRADDR, l, 0);
tcg_out_br_noaddr(s, opc, ra);
}
}
@@ -788,8 +782,9 @@ static inline void tcg_out_reset_tb(TCGContext *s, TCGReg reg)
}
}
-static void tcg_out_const_call(TCGContext *s, intptr_t dest)
+static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
{
+ intptr_t dest = (intptr_t)target;
const uint16_t *check = (const uint16_t *) dest;
uint16_t check1 = check[1];
uint16_t check3 = check[3];
@@ -930,7 +925,7 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg dest,
}
static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
- TCGArg arg2, int const_arg2, int label_index)
+ TCGArg arg2, int const_arg2, TCGLabel *l)
{
/* Note that unsigned comparisons are not present here, which means
that their entries will contain zeros. */
@@ -964,7 +959,7 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
arg1 = TMP_REG1;
}
- tcg_out_br_label(s, opc, arg1, label_index);
+ tcg_out_br_label(s, opc, arg1, l);
}
/* Note that these functions don't have normal C calling conventions. */
@@ -1069,37 +1064,38 @@ static TCGReg tcg_out_tlb_cmp(TCGContext *s, TCGMemOp memop, TCGReg addr_reg,
/* Record the context of a call to the out of line helper code for the slow
path for a load or store, so that we can later generate the correct
helper code. */
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
- TCGReg data_reg, TCGReg addr_reg, int mem_idx,
- uint8_t *raddr, uint8_t *label_ptr)
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
+ TCGType ext, TCGReg data_reg, TCGReg addr_reg,
+ tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
{
TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld;
- label->opc = opc;
+ label->oi = oi;
+ label->type = ext;
label->datalo_reg = data_reg;
label->addrlo_reg = addr_reg;
- label->mem_index = mem_idx;
label->raddr = raddr;
label->label_ptr[0] = label_ptr;
}
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
- TCGMemOp memop = lb->opc;
+ TCGMemOpIdx oi = lb->oi;
+ TCGMemOp opc = get_memop(oi);
patch_reloc(lb->label_ptr[0], R_ALPHA_BRADDR, (uintptr_t)s->code_ptr, 0);
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, lb->addrlo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_A2, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_A2, oi);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (uintptr_t)lb->raddr);
- tcg_out_const_call(s, (uintptr_t)qemu_ld_helpers[memop & ~MO_SIGN]);
+ tcg_out_call(s, (uintptr_t)qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
/* Note that the chosen helpers zero-extend. */
- if (memop & MO_SIGN) {
- tgen_extend(s, memop, TCG_REG_V0, lb->datalo_reg);
+ if (opc & MO_SIGN) {
+ tgen_extend(s, opc, TCG_REG_V0, lb->datalo_reg);
} else {
tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_V0);
}
@@ -1109,28 +1105,31 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
- TCGMemOp memop = lb->opc;
+ TCGMemOpIdx oi = lb->oi;
+ TCGMemOp opc = get_memop(oi);
patch_reloc(lb->label_ptr[0], R_ALPHA_BRADDR, (uintptr_t)s->code_ptr, 0);
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A0, TCG_AREG0);
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, lb->addrlo_reg);
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, lb->datalo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_A3, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_A3, oi);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (uintptr_t)lb->raddr);
- tcg_out_const_call(s, (uintptr_t)qemu_st_helpers[memop]);
+ tcg_out_call(s, (uintptr_t)qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
tcg_out_br_direct(s, INSN_BR, TCG_REG_ZERO, (uintptr_t)lb->raddr);
}
#endif /* SOFTMMU */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGReg data,
- const TCGReg addr, TCGMemOp memop, int mem_index)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGReg data, const TCGReg addr,
+ TCGMemOpIdx oi, TCGType ext)
{
+ TCGMemOp memop = get_memop(oi);
TCGReg base;
long ofs;
#if defined(CONFIG_SOFTMMU)
+ unsigned mem_index = get_mmuidx(oi);
uint8_t *label_ptr;
#endif
@@ -1149,7 +1148,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGReg data,
base = TCG_REG_A1;
ofs = 0;
} else {
- ofs = GUEST_BASE;
+ ofs = guest_base;
}
#endif
@@ -1157,17 +1156,18 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGReg data,
tcg_out_ld_sz(s, memop, data, base, ofs);
#if defined(CONFIG_SOFTMMU)
- add_qemu_ldst_label(s, 1, memop, data, addr, mem_index,
- s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, 1, io, ext, data, addr, s->code_ptr, label_ptr);
#endif
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGReg data,
- const TCGReg addr, TCGMemOp memop, int mem_index)
+static void tcg_out_qemu_st(TCGContext *s, const TCGReg data, const TCGReg addr,
+ TCGMemOpIdx oi, TCGType ext)
{
+ TCGMemOp memop = get_memop(oi);
TCGReg base, out;
long ofs;
#if defined(CONFIG_SOFTMMU)
+ unsigned mem_index = get_mmuidx(oi);
uint8_t *label_ptr;
#endif
@@ -1186,7 +1186,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGReg data,
base = TCG_REG_A1;
ofs = 0;
} else {
- ofs = GUEST_BASE;
+ ofs = guest_base;
}
#endif
@@ -1201,8 +1201,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGReg data,
tcg_out_st_sz(s, memop, out, base, ofs);
#if defined(CONFIG_SOFTMMU)
- add_qemu_ldst_label(s, 0, memop, data, addr, mem_index,
- s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, 0, io, ext, data, addr, s->code_ptr, label_ptr);
#endif
}
@@ -1240,7 +1239,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, INSN_NOP);
}
- s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->tb_jmp_offset[arg0] = tcg_current_code_size(s);
s->code_ptr += 8;
} else {
/* Indirect jump method. */
@@ -1248,24 +1247,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
(intptr_t)(s->tb_next + arg0));
}
tcg_out_fmt_jmp(s, INSN_JMP, TCG_REG_ZERO, TCG_REG_TB, 0);
- s->tb_next_offset[arg0] = s->code_ptr - s->code_buf;
+ s->tb_next_offset[arg0] = tcg_current_code_size(s);
/* The "unlinked" state of a TB has the jump fall through.
Therefore we need to reset TCG_REG_TB to our top. */
tcg_out_reset_tb(s, TCG_REG_TB);
break;
- case INDEX_op_call:
- if (const_args[0]) {
- tcg_out_const_call(s, arg0);
- } else {
- tcg_out_fmt_jmp(s, INSN_JSR, TCG_REG_RA, TCG_REG_PV, 0);
- tcg_out_reset_tb(s, TCG_REG_RA);
- }
- break;
-
case INDEX_op_br:
- tcg_out_br_label(s, INSN_BR, TCG_REG_ZERO, arg0);
+ tcg_out_br_label(s, INSN_BR, TCG_REG_ZERO, arg_label(arg0));
break;
case INDEX_op_ld8u_i32:
@@ -1526,7 +1516,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
arg1 = (int32_t)arg1;
/* FALLTHRU */
case INDEX_op_brcond_i64:
- tcg_out_brcond(s, arg2, arg0, arg1, const_args[1], args[3]);
+ tcg_out_brcond(s, arg2, arg0, arg1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i32:
@@ -1553,18 +1543,20 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext16s_i64:
tgen_ext16s(s, arg1, arg0);
break;
+ case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
tgen_ext32s(s, arg1, arg0);
break;
- case INDEX_op_trunc_i32:
- if (arg2 > 0) {
- tcg_out_fmt_opi(s, INSN_SRA, arg1, arg2, arg0);
- arg1 = arg0;
- }
- if (arg2 < 32) {
- tgen_ext32s(s, arg1, arg0);
- }
+ case INDEX_op_extu_i32_i64:
+ tgen_ext32u(s, arg1, arg0);
+ break;
+
+ case INDEX_op_extrl_i64_i32:
+ tgen_ext32s(s, arg1, arg0);
+ break;
+ case INDEX_op_extrh_i64_i32:
+ tcg_out_fmt_opi(s, INSN_SRA, arg1, 32, arg0);
break;
case INDEX_op_div_i32:
@@ -1610,8 +1602,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_ld_i32:
/* Make sure 32-bit data stays sign-extended. */
- if ((arg2 & MO_SIZE) == MO_32) {
- arg2 |= MO_SIGN;
+ if ((get_memop(arg2) & MO_SIZE) == MO_32) {
+ arg2 |= make_memop_idx(MO_SIGN, 0);
}
/* FALLTHRU */
case INDEX_op_qemu_ld_i64:
@@ -1627,6 +1619,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_movi_i32:
case INDEX_op_movi_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
/* These four are handled by tcg.c directly. */
default:
tcg_abort();
@@ -1636,12 +1629,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static const TCGTargetOpDef alpha_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
- { INDEX_op_call, { "ci" } },
{ INDEX_op_br, { } },
- { INDEX_op_mov_i32, { "r", "r" } },
- { INDEX_op_movi_i32, { "r" } },
-
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
{ INDEX_op_ld16u_i32, { "r", "r" } },
@@ -1677,10 +1666,8 @@ static const TCGTargetOpDef alpha_op_defs[] = {
{ INDEX_op_setcond_i32, { "r", "rJ", "rWI" } },
{ INDEX_op_movcond_i32, { "r", "rJ", "rWI", "rWI", "0" } },
- { INDEX_op_trunc_i32, { "r", "r" } },
-
- { INDEX_op_mov_i64, { "r", "r" } },
- { INDEX_op_movi_i64, { "r" } },
+ { INDEX_op_extrl_i64_i32, { "r", "R" } },
+ { INDEX_op_extrh_i64_i32, { "r", "R" } },
{ INDEX_op_ld8u_i64, { "r", "r" } },
{ INDEX_op_ld8s_i64, { "r", "r" } },
@@ -1726,6 +1713,8 @@ static const TCGTargetOpDef alpha_op_defs[] = {
{ INDEX_op_ext8s_i64, { "r", "r" } },
{ INDEX_op_ext16s_i64, { "r", "r" } },
{ INDEX_op_ext32s_i64, { "r", "r" } },
+ { INDEX_op_ext_i32_i64, { "r", "r" } },
+ { INDEX_op_extu_i32_i64, { "r", "r" } },
{ INDEX_op_bswap16_i32, { "r", "r" } },
{ INDEX_op_bswap32_i32, { "r", "r" } },
@@ -1796,7 +1785,7 @@ void tcg_target_qemu_prologue(TCGContext *s)
/* Setup TCG_GUEST_BASE_REG if desired. */
if (USE_GUEST_BASE_REG) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
@@ -21,8 +21,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#ifndef TCG_TARGET_ALPHA
#define TCG_TARGET_ALPHA 1
+#define TCG_TARGET_INSN_UNIT_SIZE 4
+#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
#define TCG_TARGET_NB_REGS 32
/* Having the zero register ($31) == 0 within TCG simplifies a few things.
@@ -127,7 +130,8 @@ typedef enum TCGReg {
(((ofs) & 7) == 0 && ((len) == 8 || (len) == 16 || (len) == 32))
/* We require special attention for truncation. */
-#define TCG_TARGET_HAS_trunc_i32 1
+#define TCG_TARGET_HAS_extrl_i64_i32 1
+#define TCG_TARGET_HAS_extrh_i64_i32 1
/* The default implementations of these are fine. */
#define TCG_TARGET_HAS_neg_i32 0
@@ -161,3 +165,5 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
{
__asm__ __volatile__ ("call_pal 0x86");
}
+
+#endif /* TCG_TARGET_ALPHA */