@@ -772,9 +772,6 @@ extern opt_pass* make_pass_sh_optimize_sett_clrt (gcc::context* ctx,
static void
register_sh_passes (void)
{
- if (!TARGET_SH1)
- return;
-
/* Running the sh_treg_combine pass after ce1 generates better code when
comparisons are combined and reg-reg moves are introduced, because
reg-reg moves will be eliminated afterwards. However, there are quite
@@ -848,36 +845,31 @@ sh_option_override (void)
if (!TARGET_SH3 && TARGET_USERMODE)
TARGET_USERMODE = false;
- if (TARGET_SH1)
+ if (! strcmp (sh_div_str, "call-div1"))
+ sh_div_strategy = SH_DIV_CALL_DIV1;
+ else if (! strcmp (sh_div_str, "call-fp") && TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_CALL_FP;
+ else if (! strcmp (sh_div_str, "call-table") && TARGET_DYNSHIFT)
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ else
{
- if (! strcmp (sh_div_str, "call-div1"))
- sh_div_strategy = SH_DIV_CALL_DIV1;
- else if (! strcmp (sh_div_str, "call-fp")
- && (TARGET_FPU_DOUBLE || TARGET_FPU_SINGLE_ONLY
- || TARGET_FPU_ANY))
- sh_div_strategy = SH_DIV_CALL_FP;
- else if (! strcmp (sh_div_str, "call-table") && TARGET_DYNSHIFT)
+ /* Pick one that makes most sense for the target in general.
+ It is not much good to use different functions depending on -Os,
+ since then we'll end up with two different functions when some of
+ the code is compiled for size, and some for speed. */
+
+ /* SH4 tends to emphasize speed. */
+ if (TARGET_HARD_SH4)
sh_div_strategy = SH_DIV_CALL_TABLE;
+ /* These have their own way of doing things. */
+ else if (TARGET_SH2A)
+ sh_div_strategy = SH_DIV_INTRINSIC;
+ /* SH1 .. SH3 cores often go into small-footprint systems, so
+ default to the smallest implementation available. */
else
- /* Pick one that makes most sense for the target in general.
- It is not much good to use different functions depending
- on -Os, since then we'll end up with two different functions
- when some of the code is compiled for size, and some for
- speed. */
-
- /* SH4 tends to emphasize speed. */
- if (TARGET_HARD_SH4)
- sh_div_strategy = SH_DIV_CALL_TABLE;
- /* These have their own way of doing things. */
- else if (TARGET_SH2A)
- sh_div_strategy = SH_DIV_INTRINSIC;
- /* SH1 .. SH3 cores often go into small-footprint systems, so
- default to the smallest implementation available. */
- else
- sh_div_strategy = SH_DIV_CALL_DIV1;
+ sh_div_strategy = SH_DIV_CALL_DIV1;
}
- if (!TARGET_SH1)
- TARGET_PRETEND_CMOVE = 0;
+
if (sh_divsi3_libfunc[0])
; /* User supplied - leave it alone. */
else if (TARGET_DIVIDE_CALL_FP)
@@ -1443,8 +1435,7 @@ sh_print_operand (FILE *stream, rtx x, int code)
break;
default:
- if (TARGET_SH1)
- fputc ('#', stream);
+ fputc ('#', stream);
output_addr_const (stream, x);
break;
}
@@ -1618,8 +1609,7 @@ prepare_move_operands (rtx operands[], machine_mode mode)
of a library call to the target. Reject `st r0,@(rX,rY)' because
reload will fail to find a spill register for rX, since r0 is already
being used for the source. */
- else if (TARGET_SH1
- && refers_to_regno_p (R0_REG, operands[1])
+ else if (refers_to_regno_p (R0_REG, operands[1])
&& MEM_P (operands[0])
&& GET_CODE (XEXP (operands[0], 0)) == PLUS
&& REG_P (XEXP (XEXP (operands[0], 0), 1)))
@@ -1639,7 +1629,7 @@ prepare_move_operands (rtx operands[], machine_mode mode)
case. We can pre-allocate R0 for that index term to avoid
the issue. See PR target/66591. */
else if (sh_lra_p ()
- && TARGET_SH1 && ! TARGET_SH2A
+ && ! TARGET_SH2A
&& ((REG_P (operands[0]) && MEM_P (operands[1]))
|| (REG_P (operands[1]) && MEM_P (operands[0]))))
{
@@ -9590,8 +9580,7 @@ sh_can_follow_jump (const rtx_insn *branch1, const rtx_insn *branch2)
{
/* Don't follow if BRANCH2 is possible to be a jump crossing between
hot and cold partitions. */
- if (TARGET_SH1
- && flag_reorder_blocks_and_partition
+ if (flag_reorder_blocks_and_partition
&& simplejump_p (branch2)
&& CROSSING_JUMP_P (branch2))
return false;
@@ -77,7 +77,7 @@ extern int code_for_indirect_jump_scratch;
/* This is not used by the SH2E calling convention */
#define TARGET_VARARGS_PRETEND_ARGS(FUN_DECL) \
- (TARGET_SH1 && ! TARGET_SH2E \
+ (! TARGET_SH2E \
&& ! (TARGET_HITACHI || sh_attr_renesas_p (FUN_DECL)))
#ifndef TARGET_CPU_DEFAULT
@@ -636,7 +636,7 @@ extern char sh_additional_register_names[ADDREGNAMES_SIZE] \
|| XD_REGISTER_P (REGNO) \
|| (REGNO) == AP_REG || (REGNO) == RAP_REG \
|| (REGNO) == FRAME_POINTER_REGNUM \
- || (TARGET_SH1 && (SPECIAL_REGISTER_P (REGNO) || (REGNO) == PR_REG)) \
+ || ((SPECIAL_REGISTER_P (REGNO) || (REGNO) == PR_REG)) \
|| (TARGET_SH2E && (REGNO) == FPUL_REG))
/* The mode that should be generally used to store a register by
@@ -1879,8 +1879,7 @@ extern int current_function_interrupt;
#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
if (GET_MODE_CLASS (MODE) == MODE_INT \
&& GET_MODE_SIZE (MODE) < 4/* ! UNITS_PER_WORD */)\
- (UNSIGNEDP) = ((MODE) == SImode ? 0 : (UNSIGNEDP)), \
- (MODE) = (TARGET_SH1 ? SImode : DImode);
+ (UNSIGNEDP) = ((MODE) == SImode ? 0 : (UNSIGNEDP)), (MODE) = SImode;
#define MAX_FIXED_MODE_SIZE (64)
@@ -1529,12 +1529,9 @@
(match_operand:DI 2 "arith_operand")))]
""
{
- if (TARGET_SH1)
- {
- operands[2] = force_reg (DImode, operands[2]);
- emit_insn (gen_adddi3_compact (operands[0], operands[1], operands[2]));
- DONE;
- }
+ operands[2] = force_reg (DImode, operands[2]);
+ emit_insn (gen_adddi3_compact (operands[0], operands[1], operands[2]));
+ DONE;
})
(define_insn_and_split "adddi3_compact"
@@ -1780,7 +1777,7 @@
(match_operand:SI 2 "arith_or_int_operand")))]
""
{
- if (TARGET_SH1 && !arith_operand (operands[2], SImode))
+ if (!arith_operand (operands[2], SImode))
{
if (!sh_lra_p () || reg_overlap_mentioned_p (operands[0], operands[1]))
{
@@ -1935,12 +1932,9 @@
(match_operand:DI 2 "arith_reg_operand" "")))]
""
{
- if (TARGET_SH1)
- {
- operands[1] = force_reg (DImode, operands[1]);
- emit_insn (gen_subdi3_compact (operands[0], operands[1], operands[2]));
- DONE;
- }
+ operands[1] = force_reg (DImode, operands[1]);
+ emit_insn (gen_subdi3_compact (operands[0], operands[1], operands[2]));
+ DONE;
})
(define_insn_and_split "subdi3_compact"
@@ -2165,7 +2159,7 @@
(match_operand:SI 2 "arith_reg_operand" "")))]
""
{
- if (TARGET_SH1 && CONST_INT_P (operands[1]))
+ if (CONST_INT_P (operands[1]))
{
emit_insn (gen_negsi2 (operands[0], operands[2]));
emit_insn (gen_addsi3 (operands[0], operands[0], operands[1]));
@@ -2840,20 +2834,18 @@
Ideally the splitter of *andsi_compact would be enough, if redundant
zero extensions were detected after the combine pass, which does not
happen at the moment. */
- if (TARGET_SH1)
+
+ if (satisfies_constraint_Jmb (operands[2]))
{
- if (satisfies_constraint_Jmb (operands[2]))
- {
- emit_insn (gen_zero_extendqisi2 (operands[0],
- gen_lowpart (QImode, operands[1])));
- DONE;
- }
- else if (satisfies_constraint_Jmw (operands[2]))
- {
- emit_insn (gen_zero_extendhisi2 (operands[0],
- gen_lowpart (HImode, operands[1])));
- DONE;
- }
+ emit_insn (gen_zero_extendqisi2 (operands[0],
+ gen_lowpart (QImode, operands[1])));
+ DONE;
+ }
+ else if (satisfies_constraint_Jmw (operands[2]))
+ {
+ emit_insn (gen_zero_extendhisi2 (operands[0],
+ gen_lowpart (HImode, operands[1])));
+ DONE;
}
})
@@ -5565,23 +5557,19 @@
""
{
prepare_move_operands (operands, DImode);
- if (TARGET_SH1)
- {
- /* When the dest operand is (R0, R1) register pair, split it to
- two movsi of which dest is R1 and R0 so as to lower R0-register
- pressure on the first movsi. Apply only for simple source not
- to make complex rtl here. */
- if (REG_P (operands[0])
- && REGNO (operands[0]) == R0_REG
- && REG_P (operands[1])
- && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
- {
- emit_insn (gen_movsi (gen_rtx_REG (SImode, R1_REG),
- gen_rtx_SUBREG (SImode, operands[1], 4)));
- emit_insn (gen_movsi (gen_rtx_REG (SImode, R0_REG),
- gen_rtx_SUBREG (SImode, operands[1], 0)));
- DONE;
- }
+
+ /* When the dest operand is (R0, R1) register pair, split it to
+ two movsi of which dest is R1 and R0 so as to lower R0-register
+ pressure on the first movsi. Apply only for simple source not
+ to make complex rtl here. */
+ if (REG_P (operands[0]) && REGNO (operands[0]) == R0_REG
+ && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ {
+ emit_insn (gen_movsi (gen_rtx_REG (SImode, R1_REG),
+ gen_rtx_SUBREG (SImode, operands[1], 4)));
+ emit_insn (gen_movsi (gen_rtx_REG (SImode, R0_REG),
+ gen_rtx_SUBREG (SImode, operands[1], 0)));
+ DONE;
}
})