Message ID | 1275678883-7082-13-git-send-email-rth@twiddle.net |
---|---|
State | New |
Headers | show |
On Fri, Jun 04, 2010 at 12:14:20PM -0700, Richard Henderson wrote: > Use a define for the temp register instead of hard-coding it. > > Signed-off-by: Richard Henderson <rth@twiddle.net> > --- > tcg/s390/tcg-target.c | 54 ++++++++++++++++++++++++++---------------------- > 1 files changed, 29 insertions(+), 25 deletions(-) This patch looks ok. > diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c > index 5b2134b..2b80c02 100644 > --- a/tcg/s390/tcg-target.c > +++ b/tcg/s390/tcg-target.c > @@ -40,6 +40,10 @@ > rather than TCG_REG_R0. */ > #define TCG_REG_NONE 0 > > +/* A scratch register that may be be used throughout the backend. */ > +#define TCG_TMP0 TCG_REG_R13 > + > + > /* All of the following instructions are prefixed with their instruction > format, and are defined as 8- or 16-bit quantities, even when the two > halves of the 16-bit quantity may appear 32 bits apart in the insn. > @@ -376,12 +380,12 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, > tcg_out_insn(s, RI, IILH, ret, arg >> 16); > } else { > /* branch over constant and store its address in R13 */ > - tcg_out_insn(s, RIL, BRASL, TCG_REG_R13, (6 + 8) >> 1); > + tcg_out_insn(s, RIL, BRASL, TCG_TMP0, (6 + 8) >> 1); > /* 64-bit constant */ > tcg_out32(s, arg >> 32); > tcg_out32(s, arg); > /* load constant to ret */ > - tcg_out_insn(s, RXY, LG, ret, TCG_REG_R13, 0, 0); > + tcg_out_insn(s, RXY, LG, ret, TCG_TMP0, 0, 0); > } > } > > @@ -399,14 +403,14 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, > if (ofs < -0x80000 || ofs >= 0x80000) { > /* Combine the low 16 bits of the offset with the actual load insn; > the high 48 bits must come from an immediate load. */ > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, ofs & ~0xffff); > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff); > ofs &= 0xffff; > > /* If we were already given an index register, add it in. */ > if (index != TCG_REG_NONE) { > - tcg_out_insn(s, RRE, AGR, TCG_REG_R13, index); > + tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); > } > - index = TCG_REG_R13; > + index = TCG_TMP0; > } > > if (opc_rx && ofs >= 0 && ofs < 0x1000) { > @@ -482,8 +486,8 @@ static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest) > } else if (off == (int32_t)off) { > tcg_out_insn(s, RIL, BRCL, cc, off); > } else { > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest); > - tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13); > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest); > + tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); > } > } > > @@ -505,8 +509,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long dest) > if (off == (int32_t)off) { > tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); > } else { > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest); > - tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13); > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest); > + tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); > } > } > > @@ -538,22 +542,22 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int data_reg, int addr_reg, > tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE, > TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); > > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, > TARGET_PAGE_MASK | ((1 << s_bits) - 1)); > - tcg_out_insn(s, RRE, NGR, arg0, TCG_REG_R13); > + tcg_out_insn(s, RRE, NGR, arg0, TCG_TMP0); > > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, > (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); > - tcg_out_insn(s, RRE, NGR, arg1, TCG_REG_R13); > + tcg_out_insn(s, RRE, NGR, arg1, TCG_TMP0); > > if (is_store) { > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, > offsetof(CPUState, tlb_table[mem_index][0].addr_write)); > } else { > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, > offsetof(CPUState, tlb_table[mem_index][0].addr_read)); > } > - tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13); > + tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0); > > tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0); > > @@ -688,8 +692,8 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc) > #else > /* swapped unsigned halfword load with upper bits zeroed */ > tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0); > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xffffL); > - tcg_out_insn(s, RRE, NGR, data_reg, 13); > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, 0xffffL); > + tcg_out_insn(s, RRE, NGR, data_reg, TCG_TMP0); > #endif > break; > case LD_INT16: > @@ -802,16 +806,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, > (tcg_target_long)s->code_ptr) >> 1; > if (off == (int32_t)off) { > /* load address relative to PC */ > - tcg_out_insn(s, RIL, LARL, TCG_REG_R13, off); > + tcg_out_insn(s, RIL, LARL, TCG_TMP0, off); > } else { > /* too far for larl */ > - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, > + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, > (tcg_target_long)(s->tb_next + args[0])); > } > /* load address stored at s->tb_next + args[0] */ > - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R13, TCG_REG_R13, 0); > + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_TMP0, 0); > /* and go there */ > - tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R13); > + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); > } > s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; > break; > @@ -934,9 +938,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, > break; > case INDEX_op_neg_i64: > /* FIXME: optimize args[0] != args[1] case */ > - tcg_out_mov(s, TCG_REG_R13, args[1]); > + tcg_out_mov(s, TCG_TMP0, args[1]); > tcg_out_movi(s, TCG_TYPE_I64, args[0], 0); > - tcg_out_insn(s, RRE, SGR, args[0], TCG_REG_R13); > + tcg_out_insn(s, RRE, SGR, args[0], TCG_TMP0); > break; > > case INDEX_op_mul_i32: > @@ -1192,7 +1196,7 @@ void tcg_target_init(TCGContext *s) > > tcg_regset_clear(s->reserved_regs); > /* frequently used as a temporary */ > - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); > + tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); > /* another temporary */ > tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); > /* XXX many insns can't be used with R0, so we better avoid it for now */ > -- > 1.7.0.1 > > >
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index 5b2134b..2b80c02 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -40,6 +40,10 @@ rather than TCG_REG_R0. */ #define TCG_REG_NONE 0 +/* A scratch register that may be be used throughout the backend. */ +#define TCG_TMP0 TCG_REG_R13 + + /* All of the following instructions are prefixed with their instruction format, and are defined as 8- or 16-bit quantities, even when the two halves of the 16-bit quantity may appear 32 bits apart in the insn. @@ -376,12 +380,12 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, tcg_out_insn(s, RI, IILH, ret, arg >> 16); } else { /* branch over constant and store its address in R13 */ - tcg_out_insn(s, RIL, BRASL, TCG_REG_R13, (6 + 8) >> 1); + tcg_out_insn(s, RIL, BRASL, TCG_TMP0, (6 + 8) >> 1); /* 64-bit constant */ tcg_out32(s, arg >> 32); tcg_out32(s, arg); /* load constant to ret */ - tcg_out_insn(s, RXY, LG, ret, TCG_REG_R13, 0, 0); + tcg_out_insn(s, RXY, LG, ret, TCG_TMP0, 0, 0); } } @@ -399,14 +403,14 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, if (ofs < -0x80000 || ofs >= 0x80000) { /* Combine the low 16 bits of the offset with the actual load insn; the high 48 bits must come from an immediate load. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, ofs & ~0xffff); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff); ofs &= 0xffff; /* If we were already given an index register, add it in. */ if (index != TCG_REG_NONE) { - tcg_out_insn(s, RRE, AGR, TCG_REG_R13, index); + tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); } - index = TCG_REG_R13; + index = TCG_TMP0; } if (opc_rx && ofs >= 0 && ofs < 0x1000) { @@ -482,8 +486,8 @@ static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest) } else if (off == (int32_t)off) { tcg_out_insn(s, RIL, BRCL, cc, off); } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest); - tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest); + tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); } } @@ -505,8 +509,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long dest) if (off == (int32_t)off) { tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest); - tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest); + tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); } } @@ -538,22 +542,22 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int data_reg, int addr_reg, tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); - tcg_out_insn(s, RRE, NGR, arg0, TCG_REG_R13); + tcg_out_insn(s, RRE, NGR, arg0, TCG_TMP0); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); - tcg_out_insn(s, RRE, NGR, arg1, TCG_REG_R13); + tcg_out_insn(s, RRE, NGR, arg1, TCG_TMP0); if (is_store) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, offsetof(CPUState, tlb_table[mem_index][0].addr_write)); } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, offsetof(CPUState, tlb_table[mem_index][0].addr_read)); } - tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13); + tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0); tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0); @@ -688,8 +692,8 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc) #else /* swapped unsigned halfword load with upper bits zeroed */ tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xffffL); - tcg_out_insn(s, RRE, NGR, data_reg, 13); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, 0xffffL); + tcg_out_insn(s, RRE, NGR, data_reg, TCG_TMP0); #endif break; case LD_INT16: @@ -802,16 +806,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, (tcg_target_long)s->code_ptr) >> 1; if (off == (int32_t)off) { /* load address relative to PC */ - tcg_out_insn(s, RIL, LARL, TCG_REG_R13, off); + tcg_out_insn(s, RIL, LARL, TCG_TMP0, off); } else { /* too far for larl */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (tcg_target_long)(s->tb_next + args[0])); } /* load address stored at s->tb_next + args[0] */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R13, TCG_REG_R13, 0); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_TMP0, 0); /* and go there */ - tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R13); + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); } s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; break; @@ -934,9 +938,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_neg_i64: /* FIXME: optimize args[0] != args[1] case */ - tcg_out_mov(s, TCG_REG_R13, args[1]); + tcg_out_mov(s, TCG_TMP0, args[1]); tcg_out_movi(s, TCG_TYPE_I64, args[0], 0); - tcg_out_insn(s, RRE, SGR, args[0], TCG_REG_R13); + tcg_out_insn(s, RRE, SGR, args[0], TCG_TMP0); break; case INDEX_op_mul_i32: @@ -1192,7 +1196,7 @@ void tcg_target_init(TCGContext *s) tcg_regset_clear(s->reserved_regs); /* frequently used as a temporary */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); + tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* another temporary */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* XXX many insns can't be used with R0, so we better avoid it for now */
Use a define for the temp register instead of hard-coding it. Signed-off-by: Richard Henderson <rth@twiddle.net> --- tcg/s390/tcg-target.c | 54 ++++++++++++++++++++++++++---------------------- 1 files changed, 29 insertions(+), 25 deletions(-)