@@ -36,6 +36,9 @@
#define TCG_CT_CONST_S32 0x100
#define TCG_CT_CONST_N32 0x200
+#define TCG_TMP0 TCG_REG_R13
+
+
/* All of the following instructions are prefixed with their instruction
format, and are defined as 8- or 16-bit quantities, even when the two
halves of the 16-bit quantity may appear 32 bits apart in the insn.
@@ -491,7 +494,7 @@ static void tcg_out_ldst(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
if (ofs < -0x80000 || ofs >= 0x80000) {
/* Combine the low 16 bits of the offset with the actual load insn;
the high 48 bits must come from an immediate load. */
- index = TCG_REG_R13;
+ index = TCG_TMP0;
tcg_out_movi(s, TCG_TYPE_PTR, index, ofs & ~0xffff);
ofs &= 0xffff;
}
@@ -658,8 +661,8 @@ static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
for (i = 0; i < 4; i++) {
tcg_target_ulong mask = ~(0xffffull << i*16);
if ((val & mask) == 0) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, val);
- tcg_out_insn(s, RRE, NGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
+ tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
return;
}
}
@@ -667,8 +670,8 @@ static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
for (i = 0; i < 2; i++) {
tcg_target_ulong mask = ~(0xffffffffull << i*32);
if ((val & mask) == 0) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, val);
- tcg_out_insn(s, RRE, NGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
+ tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
return;
}
}
@@ -734,8 +737,8 @@ static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
value first and perform the xor via registers. This is true for
any 32-bit negative value, where the high 32-bits get flipped too. */
if (sval < 0 && sval == (int32_t)sval) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, sval);
- tcg_out_insn(s, RRE, XGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, sval);
+ tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
return;
}
@@ -792,8 +795,8 @@ static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
} else if (off == (int32_t)off) {
tcg_out_insn(s, RIL, BRCL, cc, off);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
- tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+ tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
}
}
@@ -815,8 +818,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long dest)
if (off == (int32_t)off) {
tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
- tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+ tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
}
}
@@ -852,13 +855,13 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int data_reg, int addr_reg,
tgen64_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
if (is_store) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
offsetof(CPUState, tlb_table[mem_index][0].addr_write));
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
offsetof(CPUState, tlb_table[mem_index][0].addr_read));
}
- tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13);
+ tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0);
tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0);
@@ -1103,16 +1106,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
(tcg_target_long)s->code_ptr) >> 1;
if (off == (int32_t)off) {
/* load address relative to PC */
- tcg_out_insn(s, RIL, LARL, TCG_REG_R13, off);
+ tcg_out_insn(s, RIL, LARL, TCG_TMP0, off);
} else {
/* too far for larl */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
(tcg_target_long)(s->tb_next + args[0]));
}
/* load address stored at s->tb_next + args[0] */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R13, TCG_REG_R13, 0);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_TMP0, 0);
/* and go there */
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R13);
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
}
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
@@ -1353,8 +1356,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_sh64(s, RSY_RLL, args[0], args[1],
SH32_REG_NONE, (32 - args[2]) & 31);
} else {
- tcg_out_insn(s, RR, LCR, TCG_REG_R13, args[2]);
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_R13, 0);
+ tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
+ tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
}
break;
@@ -1373,8 +1376,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
} else {
/* We can use the smaller 32-bit negate because only the
low 6 bits are examined for the rotate. */
- tcg_out_insn(s, RR, LCR, TCG_REG_R13, args[2]);
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_REG_R13, 0);
+ tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
+ tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
}
break;
@@ -1638,7 +1641,7 @@ void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
/* frequently used as a temporary */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13);
+ tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
/* another temporary */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12);
/* XXX many insns can't be used with R0, so we better avoid it for now */
Use a define for the temp register instead of hard-coding it. Signed-off-by: Richard Henderson <rth@twiddle.net> --- tcg/s390/tcg-target.c | 49 ++++++++++++++++++++++++++----------------------- 1 files changed, 26 insertions(+), 23 deletions(-)