@@ -3859,11 +3859,12 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
/* True if we make a dummy type here. */
bool made_dummy = false;
/* The mode to be used for the pointer type. */
- machine_mode p_mode = mode_for_size (esize, MODE_INT, 0);
+ scalar_int_mode p_mode;
/* The GCC type used for the designated type. */
tree gnu_desig_type = NULL_TREE;
- if (!targetm.valid_pointer_mode (p_mode))
+ if (!int_mode_for_size (esize, 0).exists (&p_mode)
+ || !targetm.valid_pointer_mode (p_mode))
p_mode = ptr_mode;
/* If either the designated type or its full view is an unconstrained
@@ -6113,12 +6114,11 @@ gnat_to_gnu_subprog_type (Entity_Id gnat_subprog, bool definition,
unsigned int size
= TREE_INT_CST_LOW (TYPE_SIZE (gnu_cico_return_type));
unsigned int i = BITS_PER_UNIT;
- machine_mode mode;
+ scalar_int_mode mode;
while (i < size)
i <<= 1;
- mode = mode_for_size (i, MODE_INT, 0);
- if (mode != BLKmode)
+ if (int_mode_for_size (i, 0).exists (&mode))
{
SET_TYPE_MODE (gnu_cico_return_type, mode);
SET_TYPE_ALIGN (gnu_cico_return_type,
@@ -1151,8 +1151,9 @@ make_type_from_size (tree type, tree size_tree, bool for_biased)
may need to return the thin pointer. */
if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2)
{
- machine_mode p_mode = mode_for_size (size, MODE_INT, 0);
- if (!targetm.valid_pointer_mode (p_mode))
+ scalar_int_mode p_mode;
+ if (!int_mode_for_size (size, 0).exists (&p_mode)
+ || !targetm.valid_pointer_mode (p_mode))
p_mode = ptr_mode;
return
build_pointer_type_for_mode
@@ -10250,9 +10250,9 @@ set_builtin_user_assembler_name (tree decl, const char *asmspec)
if (DECL_FUNCTION_CODE (decl) == BUILT_IN_FFS
&& INT_TYPE_SIZE < BITS_PER_WORD)
{
+ scalar_int_mode mode = *int_mode_for_size (INT_TYPE_SIZE, 0);
set_user_assembler_libfunc ("ffs", asmspec);
- set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
- "ffs");
+ set_optab_libfunc (ffs_optab, mode, "ffs");
}
}
@@ -1025,7 +1025,6 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa
if (stack_usage_map[low] != 0)
{
int num_to_save;
- machine_mode save_mode;
int delta;
rtx addr;
rtx stack_area;
@@ -1038,13 +1037,16 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa
*high_to_save = high;
num_to_save = high - low + 1;
- save_mode = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
+
+ opt_scalar_int_mode save_mode
+ = int_mode_for_size (num_to_save * BITS_PER_UNIT, 1);
/* If we don't have the required alignment, must do this
in BLKmode. */
- if ((low & (MIN (GET_MODE_SIZE (save_mode),
- BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
- save_mode = BLKmode;
+ if (save_mode.exists ()
+ && (low & (MIN (GET_MODE_SIZE (*save_mode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ save_mode = opt_scalar_int_mode ();
if (ARGS_GROW_DOWNWARD)
delta = -high;
@@ -1052,18 +1054,21 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa
delta = low;
addr = plus_constant (Pmode, argblock, delta);
- stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, addr));
- set_mem_align (stack_area, PARM_BOUNDARY);
- if (save_mode == BLKmode)
+ if (!save_mode.exists ())
{
+ stack_area = gen_rtx_MEM (BLKmode, memory_address (BLKmode, addr));
+ set_mem_align (stack_area, PARM_BOUNDARY);
save_area = assign_stack_temp (BLKmode, num_to_save);
emit_block_move (validize_mem (save_area), stack_area,
GEN_INT (num_to_save), BLOCK_OP_CALL_PARM);
}
else
{
- save_area = gen_reg_rtx (save_mode);
+ stack_area = gen_rtx_MEM (*save_mode,
+ memory_address (*save_mode, addr));
+ set_mem_align (stack_area, PARM_BOUNDARY);
+ save_area = gen_reg_rtx (*save_mode);
emit_move_insn (save_area, stack_area);
}
@@ -7283,19 +7283,16 @@ expand_field_assignment (const_rtx x)
/* Don't attempt bitwise arithmetic on non scalar integer modes. */
if (! SCALAR_INT_MODE_P (compute_mode))
{
- machine_mode imode;
-
/* Don't do anything for vector or complex integral types. */
if (! FLOAT_MODE_P (compute_mode))
break;
/* Try to find an integral mode to pun with. */
- imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
- if (imode == BLKmode)
+ if (!int_mode_for_size (GET_MODE_BITSIZE (compute_mode), 0)
+ .exists (&compute_mode))
break;
- compute_mode = imode;
- inner = gen_lowpart (imode, inner);
+ inner = gen_lowpart (compute_mode, inner);
}
/* Compute a mask of LEN bits, if we can do this on the host machine. */
@@ -7366,7 +7363,6 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
machine_mode wanted_inner_reg_mode = word_mode;
machine_mode pos_mode = word_mode;
machine_mode extraction_mode = word_mode;
- machine_mode tmode = mode_for_size (len, MODE_INT, 1);
rtx new_rtx = 0;
rtx orig_pos_rtx = pos_rtx;
HOST_WIDE_INT orig_pos;
@@ -7414,7 +7410,8 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
For MEM, we can avoid an extract if the field starts on an appropriate
boundary and we can change the mode of the memory reference. */
- if (tmode != BLKmode
+ scalar_int_mode tmode;
+ if (int_mode_for_size (len, 1).exists (&tmode)
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
&& !MEM_P (inner)
&& (pos == 0 || REG_P (inner))
@@ -10418,8 +10415,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
&& ! mode_dependent_address_p (XEXP (varop, 0),
MEM_ADDR_SPACE (varop))
&& ! MEM_VOLATILE_P (varop)
- && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
- MODE_INT, 1)) != BLKmode)
+ && (int_mode_for_size (GET_MODE_BITSIZE (mode) - count, 1)
+ .exists (&tmode)))
{
new_rtx = adjust_address_nv (varop, tmode,
BYTES_BIG_ENDIAN ? 0
@@ -12334,7 +12331,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
& GET_MODE_MASK (mode))
+ 1)) >= 0
&& const_op >> i == 0
- && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
+ && int_mode_for_size (i, 1).exists (&tmode))
{
op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
continue;
@@ -12494,8 +12491,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
&& CONST_INT_P (XEXP (op0, 1))
&& GET_CODE (XEXP (op0, 0)) == ASHIFT
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
- && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
- MODE_INT, 1)) != BLKmode
+ && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
+ .exists (&tmode))
&& (((unsigned HOST_WIDE_INT) const_op
+ (GET_MODE_MASK (tmode) >> 1) + 1)
<= GET_MODE_MASK (tmode)))
@@ -12513,8 +12510,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
&& CONST_INT_P (XEXP (XEXP (op0, 0), 1))
&& GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
&& XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
- && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
- MODE_INT, 1)) != BLKmode
+ && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
+ .exists (&tmode))
&& (((unsigned HOST_WIDE_INT) const_op
+ (GET_MODE_MASK (tmode) >> 1) + 1)
<= GET_MODE_MASK (tmode)))
@@ -598,7 +598,7 @@ do_jump (tree exp, rtx_code_label *if_false_label,
&& TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
&& (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
- && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
+ && int_mode_for_size (i + 1, 0).exists (&mode)
&& (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
&& TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
&& have_insn_for (COMPARE, TYPE_MODE (type)))
@@ -15048,13 +15048,12 @@ mem_loc_descriptor (rtx rtl, machine_mode mode,
|| GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT))
{
dw_die_ref type_die = base_type_for_mode (mode, 1);
- machine_mode amode;
+ scalar_int_mode amode;
if (type_die == NULL)
return NULL;
- amode = mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT,
- MODE_INT, 0);
if (INTVAL (rtl) >= 0
- && amode != BLKmode
+ && (int_mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT, 0)
+ .exists (&amode))
&& trunc_int_for_mode (INTVAL (rtl), amode) == INTVAL (rtl)
/* const DW_OP_convert <XXX> vs.
DW_OP_const_type <XXX, 1, const>. */
@@ -5854,8 +5854,7 @@ init_derived_machine_modes (void)
byte_mode = *opt_byte_mode;
word_mode = *opt_word_mode;
- ptr_mode = as_a <scalar_int_mode> (mode_for_size (POINTER_SIZE,
- MODE_INT, 0));
+ ptr_mode = *int_mode_for_size (POINTER_SIZE, 0);
}
/* Create some permanent unique rtl objects shared between all functions. */
@@ -363,7 +363,7 @@ check_reverse_float_storage_order_support (void)
rtx
flip_storage_order (machine_mode mode, rtx x)
{
- machine_mode int_mode;
+ scalar_int_mode int_mode;
rtx result;
if (mode == QImode)
@@ -383,16 +383,13 @@ flip_storage_order (machine_mode mode, rtx x)
if (__builtin_expect (reverse_storage_order_supported < 0, 0))
check_reverse_storage_order_support ();
- if (SCALAR_INT_MODE_P (mode))
- int_mode = mode;
- else
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
{
if (FLOAT_MODE_P (mode)
&& __builtin_expect (reverse_float_storage_order_supported < 0, 0))
check_reverse_float_storage_order_support ();
- int_mode = mode_for_size (GET_MODE_PRECISION (mode), MODE_INT, 0);
- if (int_mode == BLKmode)
+ if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
{
sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
return x;
@@ -1422,11 +1419,10 @@ convert_extracted_bit_field (rtx x, machine_mode mode,
value via a SUBREG. */
if (!SCALAR_INT_MODE_P (tmode))
{
- machine_mode smode;
-
- smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
- x = convert_to_mode (smode, x, unsignedp);
- x = force_reg (smode, x);
+ scalar_int_mode int_mode
+ = *int_mode_for_size (GET_MODE_BITSIZE (tmode), 0);
+ x = convert_to_mode (int_mode, x, unsignedp);
+ x = force_reg (int_mode, x);
return gen_lowpart (tmode, x);
}
@@ -2664,9 +2664,9 @@ copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
copy_mode = word_mode;
if (MEM_P (target))
{
- machine_mode mem_mode = mode_for_size (bitsize, MODE_INT, 1);
- if (mem_mode != BLKmode)
- copy_mode = mem_mode;
+ opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
+ if (mem_mode.exists ())
+ copy_mode = *mem_mode;
}
else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
copy_mode = tmode;
@@ -61,7 +61,7 @@ along with GCC; see the file COPYING3. If not see
should use isl to derive the optimal type for each subexpression. */
static int max_mode_int_precision =
- GET_MODE_PRECISION (mode_for_size (MAX_FIXED_MODE_SIZE, MODE_INT, 0));
+ GET_MODE_PRECISION (*int_mode_for_size (MAX_FIXED_MODE_SIZE, 0));
static int graphite_expression_type_precision = 128 <= max_mode_int_precision ?
128 : max_mode_int_precision;
@@ -1467,7 +1467,7 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
{
struct separate_ops ops;
int prec = GET_MODE_PRECISION (mode);
- machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
+ scalar_int_mode hmode;
machine_mode wmode;
ops.op0 = make_tree (type, op0);
ops.op1 = make_tree (type, op1);
@@ -1503,7 +1503,8 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
PROB_VERY_LIKELY);
}
}
- else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
+ else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
+ && 2 * GET_MODE_PRECISION (hmode) == prec)
{
rtx_code_label *large_op0 = gen_label_rtx ();
rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
@@ -349,8 +349,7 @@ simple_move (rtx_insn *insn, bool speed_p)
size. */
mode = GET_MODE (SET_DEST (set));
if (!SCALAR_INT_MODE_P (mode)
- && (mode_for_size (GET_MODE_SIZE (mode) * BITS_PER_UNIT, MODE_INT, 0)
- == BLKmode))
+ && !int_mode_for_size (GET_MODE_BITSIZE (mode), 0).exists ())
return NULL_RTX;
/* Reject PARTIAL_INT modes. They are used for processor specific
@@ -558,6 +558,16 @@ extern const unsigned char mode_complex[NUM_MACHINE_MODES];
extern machine_mode mode_for_size (unsigned int, enum mode_class, int);
+/* Return the machine mode to use for a MODE_INT of SIZE bits, if one
+ exists. If LIMIT is nonzero, modes wider than MAX_FIXED_MODE_SIZE
+ will not be used. */
+
+inline opt_scalar_int_mode
+int_mode_for_size (unsigned int size, int limit)
+{
+ return dyn_cast <scalar_int_mode> (mode_for_size (size, MODE_INT, limit));
+}
+
/* Return the machine mode to use for a MODE_FLOAT of SIZE bits, if one
exists. */
@@ -858,8 +858,10 @@ init_optabs (void)
/* The ffs function operates on `int'. Fall back on it if we do not
have a libgcc2 function for that width. */
if (INT_TYPE_SIZE < BITS_PER_WORD)
- set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
- "ffs");
+ {
+ scalar_int_mode mode = *int_mode_for_size (INT_TYPE_SIZE, 0);
+ set_optab_libfunc (ffs_optab, mode, "ffs");
+ }
/* Explicitly initialize the bswap libfuncs since we need them to be
valid for things other than word_mode. */
@@ -1482,12 +1482,11 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
{
- machine_mode tmode
- = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
- - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ scalar_int_mode tmode;
gcc_assert (GET_MODE_BITSIZE (mode)
> GET_MODE_BITSIZE (GET_MODE (op)));
- if (tmode != BLKmode)
+ if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
@@ -1599,10 +1598,9 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
{
- machine_mode tmode
- = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
- - INTVAL (XEXP (op, 1)), MODE_INT, 1);
- if (tmode != BLKmode)
+ scalar_int_mode tmode;
+ if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
@@ -2444,10 +2444,11 @@ vector_type_mode (const_tree t)
/* For integers, try mapping it to a same-sized scalar mode. */
if (GET_MODE_CLASS (innermode) == MODE_INT)
{
- mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
- * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
-
- if (mode != VOIDmode && have_regs_of_mode[mode])
+ unsigned int size = (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode));
+ scalar_int_mode mode;
+ if (int_mode_for_size (size, 0).exists (&mode)
+ && have_regs_of_mode[mode])
return mode;
}
@@ -1989,8 +1989,8 @@ handle_builtin_memcmp (gimple_stmt_iterator *gsi)
unsigned align1 = get_pointer_alignment (arg1);
unsigned align2 = get_pointer_alignment (arg2);
unsigned align = MIN (align1, align2);
- machine_mode mode = mode_for_size (leni, MODE_INT, 1);
- if (mode != BLKmode
+ scalar_int_mode mode;
+ if (int_mode_for_size (leni, 1).exists (&mode)
&& (align >= leni || !SLOW_UNALIGNED_ACCESS (mode, align)))
{
location_t loc = gimple_location (stmt2);
@@ -58,15 +58,14 @@ static bool
vect_lanes_optab_supported_p (const char *name, convert_optab optab,
tree vectype, unsigned HOST_WIDE_INT count)
{
- machine_mode mode, array_mode;
+ machine_mode mode;
+ scalar_int_mode array_mode;
bool limit_p;
mode = TYPE_MODE (vectype);
limit_p = !targetm.array_mode_supported_p (mode, count);
- array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
- MODE_INT, limit_p);
-
- if (array_mode == BLKmode)
+ if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode),
+ limit_p).exists (&array_mode))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -288,7 +288,6 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type,
enum tree_code code)
{
tree result, compute_type;
- machine_mode mode;
int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD;
location_t loc = gimple_location (gsi_stmt (*gsi));
@@ -312,7 +311,8 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type,
else
{
/* Use a single scalar operation with a mode no wider than word_mode. */
- mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0);
+ scalar_int_mode mode
+ = *int_mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), 0);
compute_type = lang_hooks.types.type_for_mode (mode, 1);
result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code, type);
warning_at (loc, OPT_Wvector_operation_performance,
@@ -6992,7 +6992,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
to a larger load. */
unsigned lsize
= group_size * TYPE_PRECISION (TREE_TYPE (vectype));
- machine_mode elmode = mode_for_size (lsize, MODE_INT, 0);
+ scalar_int_mode elmode = *int_mode_for_size (lsize, 0);
machine_mode vmode = mode_for_vector (elmode,
nunits / group_size);
/* If we can't construct such a vector fall back to