@@ -1226,7 +1226,6 @@ static bool
vector_mode_valid_p (machine_mode mode)
{
enum mode_class mclass = GET_MODE_CLASS (mode);
- machine_mode innermode;
/* Doh! What's going on? */
if (mclass != MODE_VECTOR_INT
@@ -1241,14 +1240,12 @@ vector_mode_valid_p (machine_mode mode)
if (targetm.vector_mode_supported_p (mode))
return true;
- innermode = GET_MODE_INNER (mode);
-
/* We should probably return 1 if requesting V4DI and we have no DI,
but we have V2DI, but this is probably very unlikely. */
/* If we have support for the inner mode, we can safely emulate it.
We may not have V2DI, but me can emulate with a pair of DIs. */
- return targetm.scalar_mode_supported_p (innermode);
+ return targetm.scalar_mode_supported_p (GET_MODE_INNER (mode));
}
@@ -1324,7 +1321,8 @@ handle_mode_attribute (tree *node, tree name, tree args,
case MODE_UFRACT:
case MODE_ACCUM:
case MODE_UACCUM:
- valid_mode = targetm.scalar_mode_supported_p (mode);
+ valid_mode
+ = targetm.scalar_mode_supported_p (as_a <scalar_mode> (mode));
break;
case MODE_COMPLEX_INT:
@@ -1027,8 +1027,8 @@ c_build_vec_perm_expr (location_t loc, tree v0, tree v1, tree mask,
return error_mark_node;
}
- if (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (v0))))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (mask)))))
+ if (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (TREE_TYPE (v0))))
+ != GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (TREE_TYPE (mask)))))
{
if (complain)
error_at (loc, "__builtin_shuffle argument vector(s) inner type "
@@ -2125,12 +2125,12 @@ c_common_fixed_point_type_for_size (unsigned int ibit, unsigned int fbit,
else
mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
- machine_mode mode;
+ opt_scalar_mode mode;
FOR_EACH_MODE_IN_CLASS (mode, mclass)
- if (GET_MODE_IBIT (mode) >= ibit && GET_MODE_FBIT (mode) >= fbit)
+ if (GET_MODE_IBIT (*mode) >= ibit && GET_MODE_FBIT (*mode) >= fbit)
break;
- if (mode == VOIDmode || !targetm.scalar_mode_supported_p (mode))
+ if (!mode.exists () || !targetm.scalar_mode_supported_p (*mode))
{
sorry ("GCC cannot support operators with integer types and "
"fixed-point types that have too many integral and "
@@ -2138,7 +2138,7 @@ c_common_fixed_point_type_for_size (unsigned int ibit, unsigned int fbit,
return 0;
}
- return c_common_type_for_mode (mode, satp);
+ return c_common_type_for_mode (*mode, satp);
}
/* Used for communication between c_common_type_for_mode and
@@ -1109,7 +1109,7 @@ interpret_fixed (const cpp_token *token, unsigned int flags)
memcpy (copy, token->val.str.text, copylen);
copy[copylen] = '\0';
- fixed_from_string (&fixed, copy, TYPE_MODE (type));
+ fixed_from_string (&fixed, copy, SCALAR_TYPE_MODE (type));
/* Create a node with determined type and value. */
value = build_fixed (type, fixed);
@@ -808,11 +808,11 @@ c_common_type (tree t1, tree t2)
if (code1 == FIXED_POINT_TYPE || code2 == FIXED_POINT_TYPE)
{
unsigned int unsignedp = 0, satp = 0;
- machine_mode m1, m2;
+ scalar_mode m1, m2;
unsigned int fbit1, ibit1, fbit2, ibit2, max_fbit, max_ibit;
- m1 = TYPE_MODE (t1);
- m2 = TYPE_MODE (t2);
+ m1 = SCALAR_TYPE_MODE (t1);
+ m2 = SCALAR_TYPE_MODE (t2);
/* If one input type is saturating, the result type is saturating. */
if (TYPE_SATURATING (t1) || TYPE_SATURATING (t2))
@@ -844,7 +844,8 @@ c_common_type (tree t1, tree t2)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
- m1 = mode_for_size (GET_MODE_PRECISION (m1), mclass, 0);
+ m1 = as_a <scalar_mode>
+ (mode_for_size (GET_MODE_PRECISION (m1), mclass, 0));
}
if (code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2))
{
@@ -855,7 +856,8 @@ c_common_type (tree t1, tree t2)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
- m2 = mode_for_size (GET_MODE_PRECISION (m2), mclass, 0);
+ m2 = as_a <scalar_mode>
+ (mode_for_size (GET_MODE_PRECISION (m2), mclass, 0));
}
}
@@ -11217,7 +11219,8 @@ build_binary_op (location_t location, enum tree_code code,
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
- (TYPE_MODE (TREE_TYPE (type0))), 0);
+ (SCALAR_TYPE_MODE
+ (TREE_TYPE (type0))), 0);
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
@@ -11376,7 +11379,8 @@ build_binary_op (location_t location, enum tree_code code,
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
- (TYPE_MODE (TREE_TYPE (type0))), 0);
+ (SCALAR_TYPE_MODE
+ (TREE_TYPE (type0))), 0);
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
@@ -4812,7 +4812,7 @@ expand_debug_expr (tree exp)
GET_MODE_INNER (mode)));
else
{
- machine_mode imode = GET_MODE_INNER (mode);
+ scalar_mode imode = GET_MODE_INNER (mode);
rtx re, im;
if (MEM_P (op0))
@@ -10823,7 +10823,7 @@ aarch64_simd_container_mode (machine_mode mode, unsigned width)
/* Return 128-bit container as the preferred SIMD mode for MODE. */
static machine_mode
-aarch64_preferred_simd_mode (machine_mode mode)
+aarch64_preferred_simd_mode (scalar_mode mode)
{
return aarch64_simd_container_mode (mode, 128);
}
@@ -11555,7 +11555,7 @@ void
aarch64_expand_vector_init (rtx target, rtx vals)
{
machine_mode mode = GET_MODE (target);
- machine_mode inner_mode = GET_MODE_INNER (mode);
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
/* The number of vector elements. */
int n_elts = GET_MODE_NUNITS (mode);
/* The number of vector elements which are not constant. */
@@ -14575,7 +14575,7 @@ aarch64_libgcc_floating_mode_supported_p (scalar_float_mode mode)
if MODE is HFmode, and punt to the generic implementation otherwise. */
static bool
-aarch64_scalar_mode_supported_p (machine_mode mode)
+aarch64_scalar_mode_supported_p (scalar_mode mode)
{
return (mode == HFmode
? true
@@ -688,7 +688,7 @@ resolve_reload_operand (rtx op)
indicates only DFmode. */
static bool
-alpha_scalar_mode_supported_p (machine_mode mode)
+alpha_scalar_mode_supported_p (scalar_mode mode)
{
switch (mode)
{
@@ -273,7 +273,7 @@ arc_vector_mode_supported_p (machine_mode mode)
/* Implements target hook TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
static machine_mode
-arc_preferred_simd_mode (machine_mode mode)
+arc_preferred_simd_mode (scalar_mode mode)
{
switch (mode)
{
@@ -248,7 +248,7 @@ static bool arm_output_addr_const_extra (FILE *, rtx);
static bool arm_allocate_stack_slots_for_args (void);
static bool arm_warn_func_return (tree);
static tree arm_promoted_type (const_tree t);
-static bool arm_scalar_mode_supported_p (machine_mode);
+static bool arm_scalar_mode_supported_p (scalar_mode);
static bool arm_frame_pointer_required (void);
static bool arm_can_eliminate (const int, const int);
static void arm_asm_trampoline_template (FILE *);
@@ -260,7 +260,7 @@ static bool xscale_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int *);
static bool fa726te_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int *);
static bool arm_array_mode_supported_p (machine_mode,
unsigned HOST_WIDE_INT);
-static machine_mode arm_preferred_simd_mode (machine_mode);
+static machine_mode arm_preferred_simd_mode (scalar_mode);
static bool arm_class_likely_spilled_p (reg_class_t);
static HOST_WIDE_INT arm_vector_alignment (const_tree type);
static bool arm_vector_alignment_reachable (const_tree type, bool is_packed);
@@ -23317,7 +23317,7 @@ arm_promoted_type (const_tree t)
special-cased in the default hook. */
static bool
-arm_scalar_mode_supported_p (machine_mode mode)
+arm_scalar_mode_supported_p (scalar_mode mode)
{
if (mode == HFmode)
return (arm_fp16_format != ARM_FP16_FORMAT_NONE);
@@ -26633,7 +26633,7 @@ arm_array_mode_supported_p (machine_mode mode,
widths are supported properly by the middle-end. */
static machine_mode
-arm_preferred_simd_mode (machine_mode mode)
+arm_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_NEON)
switch (mode)
@@ -874,7 +874,7 @@ avr_regno_reg_class (int r)
/* Implement `TARGET_SCALAR_MODE_SUPPORTED_P'. */
static bool
-avr_scalar_mode_supported_p (machine_mode mode)
+avr_scalar_mode_supported_p (scalar_mode mode)
{
if (ALL_FIXED_POINT_MODE_P (mode))
return true;
@@ -8922,7 +8922,7 @@ avr_out_fract (rtx_insn *insn, rtx operands[], bool intsigned, int *plen)
const char*
avr_out_round (rtx_insn *insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
{
- machine_mode mode = GET_MODE (xop[0]);
+ scalar_mode mode = as_a <scalar_mode> (GET_MODE (xop[0]));
scalar_int_mode imode = *int_mode_for_mode (mode);
// The smallest fractional bit not cleared by the rounding is 2^(-RP).
int fbit = (int) GET_MODE_FBIT (mode);
@@ -6236,7 +6236,7 @@ c6x_vector_mode_supported_p (machine_mode mode)
/* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
static machine_mode
-c6x_preferred_simd_mode (machine_mode mode)
+c6x_preferred_simd_mode (scalar_mode mode)
{
switch (mode)
{
@@ -6253,7 +6253,7 @@ c6x_preferred_simd_mode (machine_mode mode)
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-c6x_scalar_mode_supported_p (machine_mode mode)
+c6x_scalar_mode_supported_p (scalar_mode mode)
{
if (ALL_FIXED_POINT_MODE_P (mode)
&& GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
@@ -2773,7 +2773,7 @@ epiphany_min_divisions_for_recip_mul (machine_mode mode)
}
static machine_mode
-epiphany_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED)
+epiphany_preferred_simd_mode (scalar_mode mode ATTRIBUTE_UNUSED)
{
return TARGET_VECT_DOUBLE ? DImode : SImode;
}
@@ -44075,7 +44075,7 @@ ix86_expand_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
/* Target hook for scalar_mode_supported_p. */
static bool
-ix86_scalar_mode_supported_p (machine_mode mode)
+ix86_scalar_mode_supported_p (scalar_mode mode)
{
if (DECIMAL_FLOAT_MODE_P (mode))
return default_decimal_float_supported_p ();
@@ -50266,7 +50266,7 @@ ix86_reassociation_width (unsigned int, machine_mode mode)
place emms and femms instructions. */
static machine_mode
-ix86_preferred_simd_mode (machine_mode mode)
+ix86_preferred_simd_mode (scalar_mode mode)
{
if (!TARGET_SSE)
return word_mode;
@@ -309,7 +309,7 @@ static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
static void ia64_encode_section_info (tree, rtx, int);
static rtx ia64_struct_value_rtx (tree, int);
static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
-static bool ia64_scalar_mode_supported_p (machine_mode mode);
+static bool ia64_scalar_mode_supported_p (scalar_mode mode);
static bool ia64_vector_mode_supported_p (machine_mode mode);
static bool ia64_legitimate_constant_p (machine_mode, rtx);
static bool ia64_legitimate_address_p (machine_mode, rtx, bool);
@@ -10963,7 +10963,7 @@ ia64_struct_value_rtx (tree fntype,
}
static bool
-ia64_scalar_mode_supported_p (machine_mode mode)
+ia64_scalar_mode_supported_p (scalar_mode mode)
{
switch (mode)
{
@@ -13304,7 +13304,7 @@ mips_vector_mode_supported_p (machine_mode mode)
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-mips_scalar_mode_supported_p (machine_mode mode)
+mips_scalar_mode_supported_p (scalar_mode mode)
{
if (ALL_FIXED_POINT_MODE_P (mode)
&& GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
@@ -13316,7 +13316,7 @@ mips_scalar_mode_supported_p (machine_mode mode)
/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
static machine_mode
-mips_preferred_simd_mode (machine_mode mode)
+mips_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_PAIRED_SINGLE_FLOAT
&& mode == SFmode)
@@ -864,7 +864,7 @@ msp430_option_override (void)
#define TARGET_SCALAR_MODE_SUPPORTED_P msp430_scalar_mode_supported_p
static bool
-msp430_scalar_mode_supported_p (machine_mode m)
+msp430_scalar_mode_supported_p (scalar_mode m)
{
if (m == PSImode && msp430x)
return true;
@@ -142,7 +142,7 @@ static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
static rtx hppa_builtin_saveregs (void);
static void hppa_va_start (tree, rtx);
static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
-static bool pa_scalar_mode_supported_p (machine_mode);
+static bool pa_scalar_mode_supported_p (scalar_mode);
static bool pa_commutative_p (const_rtx x, int outer_code);
static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
@@ -6398,7 +6398,7 @@ hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
static bool
-pa_scalar_mode_supported_p (machine_mode mode)
+pa_scalar_mode_supported_p (scalar_mode mode)
{
int precision = GET_MODE_PRECISION (mode);
@@ -160,7 +160,7 @@ static void pdp11_function_arg_advance (cumulative_args_t,
static void pdp11_conditional_register_usage (void);
static bool pdp11_legitimate_constant_p (machine_mode, rtx);
-static bool pdp11_scalar_mode_supported_p (machine_mode);
+static bool pdp11_scalar_mode_supported_p (scalar_mode);
/* Initialize the GCC target structure. */
#undef TARGET_ASM_BYTE_OP
@@ -1909,7 +1909,7 @@ pdp11_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-pdp11_scalar_mode_supported_p (machine_mode mode)
+pdp11_scalar_mode_supported_p (scalar_mode mode)
{
/* Support SFmode even with -mfloat64. */
if (mode == SFmode)
@@ -5465,7 +5465,7 @@ rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
/* Implement targetm.vectorize.preferred_simd_mode. */
static machine_mode
-rs6000_preferred_simd_mode (machine_mode mode)
+rs6000_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_VSX)
switch (mode)
@@ -37651,7 +37651,7 @@ rs6000_eh_return_filter_mode (void)
/* Target hook for scalar_mode_supported_p. */
static bool
-rs6000_scalar_mode_supported_p (machine_mode mode)
+rs6000_scalar_mode_supported_p (scalar_mode mode)
{
/* -m32 does not support TImode. This is the default, from
default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
@@ -1188,7 +1188,7 @@ s390_unwind_word_mode (void)
/* Return true if the back end supports mode MODE. */
static bool
-s390_scalar_mode_supported_p (machine_mode mode)
+s390_scalar_mode_supported_p (scalar_mode mode)
{
/* In contrast to the default implementation reject TImode constants on 31bit
TARGET_ZARCH for ABI compliance. */
@@ -15204,7 +15204,7 @@ s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
/* Return the vector mode to be used for inner mode MODE when doing
vectorization. */
static machine_mode
-s390_preferred_simd_mode (machine_mode mode)
+s390_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_VX)
switch (mode)
@@ -637,7 +637,7 @@ static void sparc_conditional_register_usage (void);
static const char *sparc_mangle_type (const_tree);
#endif
static void sparc_trampoline_init (rtx, tree, rtx);
-static machine_mode sparc_preferred_simd_mode (machine_mode);
+static machine_mode sparc_preferred_simd_mode (scalar_mode);
static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
static bool sparc_lra_p (void);
static bool sparc_print_operand_punct_valid_p (unsigned char);
@@ -7549,7 +7549,7 @@ sparc_vector_mode_supported_p (machine_mode mode)
/* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
static machine_mode
-sparc_preferred_simd_mode (machine_mode mode)
+sparc_preferred_simd_mode (scalar_mode mode)
{
if (TARGET_VIS)
switch (mode)
@@ -287,7 +287,7 @@ spu_option_override (void)
be manipulated in non-trivial ways. In particular, this means all
the arithmetic is supported. */
static bool
-spu_scalar_mode_supported_p (machine_mode mode)
+spu_scalar_mode_supported_p (scalar_mode mode)
{
switch (mode)
{
@@ -106,7 +106,7 @@ tilegx_option_override (void)
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-tilegx_scalar_mode_supported_p (machine_mode mode)
+tilegx_scalar_mode_supported_p (scalar_mode mode)
{
switch (mode)
{
@@ -83,7 +83,7 @@ tilepro_option_override (void)
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-tilepro_scalar_mode_supported_p (machine_mode mode)
+tilepro_scalar_mode_supported_p (scalar_mode mode)
{
switch (mode)
{
@@ -56,9 +56,11 @@ struct rtx_def;
typedef struct rtx_def *rtx;
typedef const struct rtx_def *const_rtx;
class machine_mode;
+class scalar_mode;
class scalar_int_mode;
class scalar_float_mode;
template<typename> class opt_mode;
+typedef opt_mode<scalar_mode> opt_scalar_mode;
typedef opt_mode<scalar_int_mode> opt_scalar_int_mode;
typedef opt_mode<scalar_float_mode> opt_scalar_float_mode;
template<typename> class pod_mode;
@@ -4866,8 +4866,8 @@ cp_build_binary_op (location_t location,
}
/* Always construct signed integer vector type. */
- intt = c_common_type_for_size (GET_MODE_BITSIZE
- (TYPE_MODE (TREE_TYPE (type0))), 0);
+ intt = c_common_type_for_size
+ (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (type0))), 0);
if (!intt)
{
if (complain & tf_error)
@@ -4204,7 +4204,7 @@ hook returns true for both @code{ptr_mode} and @code{Pmode}.
Define this to return nonzero if the memory reference @var{ref} may alias with the system C library errno location. The default version of this hook assumes the system C library errno location is either a declaration of type int or accessed by dereferencing a pointer to int.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_SCALAR_MODE_SUPPORTED_P (machine_mode @var{mode})
+@deftypefn {Target Hook} bool TARGET_SCALAR_MODE_SUPPORTED_P (scalar_mode @var{mode})
Define this to return nonzero if the port is prepared to handle
insns involving scalar mode @var{mode}. For a scalar mode to be
considered supported, all the basic arithmetic and comparisons
@@ -5787,7 +5787,7 @@ the elements in the vectors should be of type @var{type}. @var{is_packed}
parameter is true if the memory access is defined in a packed struct.
@end deftypefn
-@deftypefn {Target Hook} machine_mode TARGET_VECTORIZE_PREFERRED_SIMD_MODE (machine_mode @var{mode})
+@deftypefn {Target Hook} machine_mode TARGET_VECTORIZE_PREFERRED_SIMD_MODE (scalar_mode @var{mode})
This hook should return the preferred mode for vectorizing scalar
mode @var{mode}. The default is
equal to @code{word_mode}, because the vectorizer can do some
@@ -15665,10 +15665,11 @@ loc_descriptor (rtx rtl, machine_mode mode,
or a floating-point constant. A CONST_DOUBLE is used whenever
the constant requires more than one word in order to be
adequately represented. We output CONST_DOUBLEs as blocks. */
+ scalar_mode smode = as_a <scalar_mode> (mode);
loc_result = new_loc_descr (DW_OP_implicit_value,
- GET_MODE_SIZE (mode), 0);
+ GET_MODE_SIZE (smode), 0);
#if TARGET_SUPPORTS_WIDE_INT == 0
- if (!SCALAR_FLOAT_MODE_P (mode))
+ if (!SCALAR_FLOAT_MODE_P (smode))
{
loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double;
loc_result->dw_loc_oprnd2.v.val_double
@@ -15677,7 +15678,7 @@ loc_descriptor (rtx rtl, machine_mode mode,
else
#endif
{
- unsigned int length = GET_MODE_SIZE (mode);
+ unsigned int length = GET_MODE_SIZE (smode);
unsigned char *array = ggc_vec_alloc<unsigned char> (length);
insert_float (rtl, array);
@@ -599,7 +599,8 @@ rtx
immed_wide_int_const (const wide_int_ref &v, machine_mode mode)
{
unsigned int len = v.get_len ();
- unsigned int prec = GET_MODE_PRECISION (mode);
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
/* Allow truncation but not extension since we do not know if the
number is signed or unsigned. */
@@ -659,18 +660,10 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, machine_mode mode)
(i.e., i1 consists only from copies of the sign bit, and sign
of i0 and i1 are the same), then we return a CONST_INT for i0.
3) Otherwise, we create a CONST_DOUBLE for i0 and i1. */
- if (mode != VOIDmode)
- {
- gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
- || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
- /* We can get a 0 for an error mark. */
- || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
- || GET_MODE_CLASS (mode) == MODE_POINTER_BOUNDS);
-
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- return gen_int_mode (i0, mode);
- }
+ scalar_mode smode;
+ if (is_a <scalar_mode> (mode, &smode)
+ && GET_MODE_BITSIZE (smode) <= HOST_BITS_PER_WIDE_INT)
+ return gen_int_mode (i0, mode);
/* If this integer fits in one word, return a CONST_INT. */
if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0))
@@ -5867,6 +5860,7 @@ init_emit_once (void)
int i;
machine_mode mode;
scalar_float_mode double_mode;
+ opt_scalar_mode smode_iter;
/* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE,
CONST_FIXED, and memory attribute hash tables. */
@@ -5981,62 +5975,66 @@ init_emit_once (void)
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_FRACT)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_FRACT)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_UFRACT)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UFRACT)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_ACCUM)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_ACCUM)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_UACCUM)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UACCUM)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT)
@@ -6069,10 +6067,11 @@ init_emit_once (void)
if (STORE_FLAG_VALUE == 1)
const_tiny_rtx[1][(int) BImode] = const1_rtx;
- FOR_EACH_MODE_IN_CLASS (mode, MODE_POINTER_BOUNDS)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
{
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (mode));
- const_tiny_rtx[0][mode] = immed_wide_int_const (wi_zero, mode);
+ scalar_mode smode = *smode_iter;
+ wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
+ const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
}
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
@@ -48,14 +48,16 @@ static rtx break_out_memory_refs (rtx);
HOST_WIDE_INT
trunc_int_for_mode (HOST_WIDE_INT c, machine_mode mode)
{
- int width = GET_MODE_PRECISION (mode);
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ scalar_mode smode = as_a <scalar_mode> (mode);
+ int width = GET_MODE_PRECISION (smode);
/* You want to truncate to a _what_? */
gcc_assert (SCALAR_INT_MODE_P (mode)
|| POINTER_BOUNDS_MODE_P (mode));
/* Canonicalize BImode to 0 and STORE_FLAG_VALUE. */
- if (mode == BImode)
+ if (smode == BImode)
return c & 1 ? STORE_FLAG_VALUE : 0;
/* Sign-extend for the requested mode. */
@@ -784,6 +786,7 @@ promote_mode (const_tree type ATTRIBUTE_UNUSED, machine_mode mode,
#ifdef PROMOTE_MODE
enum tree_code code;
int unsignedp;
+ scalar_mode smode;
#endif
/* For libcalls this is invoked without TYPE from the backends
@@ -803,9 +806,11 @@ promote_mode (const_tree type ATTRIBUTE_UNUSED, machine_mode mode,
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE:
- PROMOTE_MODE (mode, unsignedp, type);
+ /* Values of these types always have scalar mode. */
+ smode = as_a <scalar_mode> (mode);
+ PROMOTE_MODE (smode, unsignedp, type);
*punsignedp = unsignedp;
- return mode;
+ return smode;
#ifdef POINTERS_EXTEND_UNSIGNED
case REFERENCE_TYPE:
@@ -759,16 +759,16 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* Use vec_set patterns for inserting parts of vectors whenever
available. */
- if (VECTOR_MODE_P (GET_MODE (op0))
+ machine_mode outermode = GET_MODE (op0);
+ scalar_mode innermode = GET_MODE_INNER (outermode);
+ if (VECTOR_MODE_P (outermode)
&& !MEM_P (op0)
- && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
- && fieldmode == GET_MODE_INNER (GET_MODE (op0))
- && bitsize == GET_MODE_UNIT_BITSIZE (GET_MODE (op0))
- && !(bitnum % GET_MODE_UNIT_BITSIZE (GET_MODE (op0))))
+ && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
+ && fieldmode == innermode
+ && bitsize == GET_MODE_BITSIZE (innermode)
+ && !(bitnum % GET_MODE_BITSIZE (innermode)))
{
struct expand_operand ops[3];
- machine_mode outermode = GET_MODE (op0);
- machine_mode innermode = GET_MODE_INNER (outermode);
enum insn_code icode = optab_handler (vec_set_optab, outermode);
int pos = bitnum / GET_MODE_BITSIZE (innermode);
@@ -1611,15 +1611,15 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* Use vec_extract patterns for extracting parts of vectors whenever
available. */
- if (VECTOR_MODE_P (GET_MODE (op0))
+ machine_mode outermode = GET_MODE (op0);
+ scalar_mode innermode = GET_MODE_INNER (outermode);
+ if (VECTOR_MODE_P (outermode)
&& !MEM_P (op0)
- && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
- && ((bitnum + bitsize - 1) / GET_MODE_UNIT_BITSIZE (GET_MODE (op0))
- == bitnum / GET_MODE_UNIT_BITSIZE (GET_MODE (op0))))
+ && optab_handler (vec_extract_optab, outermode) != CODE_FOR_nothing
+ && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (innermode)
+ == bitnum / GET_MODE_BITSIZE (innermode)))
{
struct expand_operand ops[3];
- machine_mode outermode = GET_MODE (op0);
- machine_mode innermode = GET_MODE_INNER (outermode);
enum insn_code icode = optab_handler (vec_extract_optab, outermode);
unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
@@ -100,6 +100,7 @@ static rtx const_vector_from_tree (tree);
static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
static tree tree_expr_size (const_tree);
static HOST_WIDE_INT int_expr_size (tree);
+static void convert_mode_scalar (rtx, rtx, int);
/* This is run to set up which modes can be used
@@ -214,17 +215,7 @@ convert_move (rtx to, rtx from, int unsignedp)
{
machine_mode to_mode = GET_MODE (to);
machine_mode from_mode = GET_MODE (from);
- int to_real = SCALAR_FLOAT_MODE_P (to_mode);
- int from_real = SCALAR_FLOAT_MODE_P (from_mode);
- enum insn_code code;
- rtx libcall;
-
- /* rtx code for making an equivalent value. */
- enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
- : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
-
- gcc_assert (to_real == from_real);
gcc_assert (to_mode != BLKmode);
gcc_assert (from_mode != BLKmode);
@@ -275,6 +266,28 @@ convert_move (rtx to, rtx from, int unsignedp)
return;
}
+ convert_mode_scalar (to, from, unsignedp);
+}
+
+/* Like convert_move, but deals only with scalar modes. */
+
+static void
+convert_mode_scalar (rtx to, rtx from, int unsignedp)
+{
+ /* Both modes should be scalar types. */
+ scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
+ scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
+ bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
+ bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
+ enum insn_code code;
+ rtx libcall;
+
+ gcc_assert (to_real == from_real);
+
+ /* rtx code for making an equivalent value. */
+ enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
+ : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
+
if (to_real)
{
rtx value;
@@ -411,7 +424,7 @@ convert_move (rtx to, rtx from, int unsignedp)
rtx fill_value;
rtx lowfrom;
int i;
- machine_mode lowpart_mode;
+ scalar_mode lowpart_mode;
int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
/* Try converting directly if the insn is supported. */
@@ -544,23 +557,28 @@ convert_move (rtx to, rtx from, int unsignedp)
}
else
{
- machine_mode intermediate;
+ scalar_mode intermediate;
rtx tmp;
int shift_amount;
/* Search for a mode to convert via. */
- FOR_EACH_MODE_FROM (intermediate, from_mode)
- if (((can_extend_p (to_mode, intermediate, unsignedp)
- != CODE_FOR_nothing)
- || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
- && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, intermediate)))
- && (can_extend_p (intermediate, from_mode, unsignedp)
- != CODE_FOR_nothing))
- {
- convert_move (to, convert_to_mode (intermediate, from,
- unsignedp), unsignedp);
- return;
- }
+ opt_scalar_mode intermediate_iter;
+ FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
+ {
+ scalar_mode intermediate = *intermediate_iter;
+ if (((can_extend_p (to_mode, intermediate, unsignedp)
+ != CODE_FOR_nothing)
+ || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
+ && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
+ intermediate)))
+ && (can_extend_p (intermediate, from_mode, unsignedp)
+ != CODE_FOR_nothing))
+ {
+ convert_move (to, convert_to_mode (intermediate, from,
+ unsignedp), unsignedp);
+ return;
+ }
+ }
/* No suitable intermediate mode.
Generate what we need with shifts. */
@@ -3107,7 +3125,7 @@ void
write_complex_part (rtx cplx, rtx val, bool imag_p)
{
machine_mode cmode;
- machine_mode imode;
+ scalar_mode imode;
unsigned ibitsize;
if (GET_CODE (cplx) == CONCAT)
@@ -3168,7 +3186,8 @@ write_complex_part (rtx cplx, rtx val, bool imag_p)
rtx
read_complex_part (rtx cplx, bool imag_p)
{
- machine_mode cmode, imode;
+ machine_mode cmode;
+ scalar_mode imode;
unsigned ibitsize;
if (GET_CODE (cplx) == CONCAT)
@@ -3364,7 +3383,7 @@ emit_move_resolve_push (machine_mode mode, rtx x)
rtx_insn *
emit_move_complex_push (machine_mode mode, rtx x, rtx y)
{
- machine_mode submode = GET_MODE_INNER (mode);
+ scalar_mode submode = GET_MODE_INNER (mode);
bool imag_first;
#ifdef PUSH_ROUNDING
@@ -7764,7 +7783,7 @@ expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
The expression is therefore always offset by the size of the
scalar type. */
offset = 0;
- bitpos = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)));
+ bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
inner = TREE_OPERAND (exp, 0);
break;
@@ -9261,7 +9280,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
GET_MODE_INNER (GET_MODE (target)), 0);
if (reg_overlap_mentioned_p (temp, op1))
{
- machine_mode imode = GET_MODE_INNER (GET_MODE (target));
+ scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
temp = adjust_address_nv (target, imode,
GET_MODE_SIZE (imode));
if (reg_overlap_mentioned_p (temp, op0))
@@ -9368,7 +9387,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
{
tree sel_type = TREE_TYPE (treeop2);
machine_mode vmode
- = mode_for_vector (TYPE_MODE (TREE_TYPE (sel_type)),
+ = mode_for_vector (SCALAR_TYPE_MODE (TREE_TYPE (sel_type)),
TYPE_VECTOR_SUBPARTS (sel_type));
gcc_assert (GET_MODE_CLASS (vmode) == MODE_VECTOR_INT);
op2 = simplify_subreg (vmode, op2, TYPE_MODE (sel_type), 0);
@@ -86,7 +86,7 @@ check_real_for_fixed_mode (REAL_VALUE_TYPE *real_value, machine_mode mode)
The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
FIXED_VALUE_TYPE
-fixed_from_double_int (double_int payload, machine_mode mode)
+fixed_from_double_int (double_int payload, scalar_mode mode)
{
FIXED_VALUE_TYPE value;
@@ -108,7 +108,7 @@ fixed_from_double_int (double_int payload, machine_mode mode)
/* Initialize from a decimal or hexadecimal string. */
void
-fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, machine_mode mode)
+fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, scalar_mode mode)
{
REAL_VALUE_TYPE real_value, fixed_value, base_value;
unsigned int fbit;
@@ -803,7 +803,7 @@ fixed_compare (int icode, const FIXED_VALUE_TYPE *op0,
Return true, if !SAT_P and overflow. */
bool
-fixed_convert (FIXED_VALUE_TYPE *f, machine_mode mode,
+fixed_convert (FIXED_VALUE_TYPE *f, scalar_mode mode,
const FIXED_VALUE_TYPE *a, bool sat_p)
{
bool overflow_p = false;
@@ -947,7 +947,7 @@ fixed_convert (FIXED_VALUE_TYPE *f, machine_mode mode,
Return true, if !SAT_P and overflow. */
bool
-fixed_convert_from_int (FIXED_VALUE_TYPE *f, machine_mode mode,
+fixed_convert_from_int (FIXED_VALUE_TYPE *f, scalar_mode mode,
double_int a, bool unsigned_p, bool sat_p)
{
bool overflow_p = false;
@@ -1031,7 +1031,7 @@ fixed_convert_from_int (FIXED_VALUE_TYPE *f, machine_mode mode,
Return true, if !SAT_P and overflow. */
bool
-fixed_convert_from_real (FIXED_VALUE_TYPE *f, machine_mode mode,
+fixed_convert_from_real (FIXED_VALUE_TYPE *f, scalar_mode mode,
const REAL_VALUE_TYPE *a, bool sat_p)
{
bool overflow_p = false;
@@ -1090,7 +1090,7 @@ fixed_convert_from_real (FIXED_VALUE_TYPE *f, machine_mode mode,
/* Convert to a new real mode from a fixed-point. */
void
-real_convert_from_fixed (REAL_VALUE_TYPE *r, machine_mode mode,
+real_convert_from_fixed (REAL_VALUE_TYPE *r, scalar_mode mode,
const FIXED_VALUE_TYPE *f)
{
REAL_VALUE_TYPE base_value, fixed_value, real_value;
@@ -47,14 +47,13 @@ extern rtx const_fixed_from_fixed_value (FIXED_VALUE_TYPE, machine_mode);
/* Construct a FIXED_VALUE from a bit payload and machine mode MODE.
The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
-extern FIXED_VALUE_TYPE fixed_from_double_int (double_int,
- machine_mode);
+extern FIXED_VALUE_TYPE fixed_from_double_int (double_int, scalar_mode);
/* Return a CONST_FIXED from a bit payload and machine mode MODE.
The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
static inline rtx
const_fixed_from_double_int (double_int payload,
- machine_mode mode)
+ scalar_mode mode)
{
return
const_fixed_from_fixed_value (fixed_from_double_int (payload, mode),
@@ -63,25 +62,25 @@ const_fixed_from_double_int (double_int payload,
/* Initialize from a decimal or hexadecimal string. */
extern void fixed_from_string (FIXED_VALUE_TYPE *, const char *,
- machine_mode);
+ scalar_mode);
/* In tree.c: wrap up a FIXED_VALUE_TYPE in a tree node. */
extern tree build_fixed (tree, FIXED_VALUE_TYPE);
/* Extend or truncate to a new mode. */
-extern bool fixed_convert (FIXED_VALUE_TYPE *, machine_mode,
+extern bool fixed_convert (FIXED_VALUE_TYPE *, scalar_mode,
const FIXED_VALUE_TYPE *, bool);
/* Convert to a fixed-point mode from an integer. */
-extern bool fixed_convert_from_int (FIXED_VALUE_TYPE *, machine_mode,
+extern bool fixed_convert_from_int (FIXED_VALUE_TYPE *, scalar_mode,
double_int, bool, bool);
/* Convert to a fixed-point mode from a real. */
-extern bool fixed_convert_from_real (FIXED_VALUE_TYPE *, machine_mode,
+extern bool fixed_convert_from_real (FIXED_VALUE_TYPE *, scalar_mode,
const REAL_VALUE_TYPE *, bool);
/* Convert to a real mode from a fixed-point. */
-extern void real_convert_from_fixed (REAL_VALUE_TYPE *, machine_mode,
+extern void real_convert_from_fixed (REAL_VALUE_TYPE *, scalar_mode,
const FIXED_VALUE_TYPE *);
/* Compare two fixed-point objects for bitwise identity. */
@@ -2025,8 +2025,8 @@ fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
tree t;
bool overflow_p;
- overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
- TYPE_SATURATING (type));
+ overflow_p = fixed_convert (&value, SCALAR_TYPE_MODE (type),
+ &TREE_FIXED_CST (arg1), TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
@@ -2054,7 +2054,7 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1)
else
di.high = TREE_INT_CST_ELT (arg1, 1);
- overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type), di,
+ overflow_p = fixed_convert_from_int (&value, SCALAR_TYPE_MODE (type), di,
TYPE_UNSIGNED (TREE_TYPE (arg1)),
TYPE_SATURATING (type));
t = build_fixed (type, value);
@@ -2075,7 +2075,7 @@ fold_convert_const_fixed_from_real (tree type, const_tree arg1)
tree t;
bool overflow_p;
- overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
+ overflow_p = fixed_convert_from_real (&value, SCALAR_TYPE_MODE (type),
&TREE_REAL_CST (arg1),
TYPE_SATURATING (type));
t = build_fixed (type, value);
@@ -6940,7 +6940,7 @@ static int
native_encode_fixed (const_tree expr, unsigned char *ptr, int len, int off)
{
tree type = TREE_TYPE (expr);
- machine_mode mode = TYPE_MODE (type);
+ scalar_mode mode = SCALAR_TYPE_MODE (type);
int total_bytes = GET_MODE_SIZE (mode);
FIXED_VALUE_TYPE value;
tree i_value, i_type;
@@ -7042,7 +7042,7 @@ native_encode_complex (const_tree expr, unsigned char *ptr, int len, int off)
return 0;
part = TREE_IMAGPART (expr);
if (off != -1)
- off = MAX (0, off - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (part))));
+ off = MAX (0, off - GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (part))));
isize = native_encode_expr (part, ptr+rsize, len-rsize, off);
if (off == -1
&& isize != rsize)
@@ -7066,7 +7066,7 @@ native_encode_vector (const_tree expr, unsigned char *ptr, int len, int off)
offset = 0;
count = VECTOR_CST_NELTS (expr);
itype = TREE_TYPE (TREE_TYPE (expr));
- size = GET_MODE_SIZE (TYPE_MODE (itype));
+ size = GET_MODE_SIZE (SCALAR_TYPE_MODE (itype));
for (i = 0; i < count; i++)
{
if (off >= size)
@@ -7194,7 +7194,8 @@ native_interpret_int (tree type, const unsigned char *ptr, int len)
static tree
native_interpret_fixed (tree type, const unsigned char *ptr, int len)
{
- int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
+ scalar_mode mode = SCALAR_TYPE_MODE (type);
+ int total_bytes = GET_MODE_SIZE (mode);
double_int result;
FIXED_VALUE_TYPE fixed_value;
@@ -7203,7 +7204,7 @@ native_interpret_fixed (tree type, const unsigned char *ptr, int len)
return NULL_TREE;
result = double_int::from_buffer (ptr, total_bytes);
- fixed_value = fixed_from_double_int (result, TYPE_MODE (type));
+ fixed_value = fixed_from_double_int (result, mode);
return build_fixed (type, fixed_value);
}
@@ -7225,7 +7226,6 @@ native_interpret_real (tree type, const unsigned char *ptr, int len)
REAL_VALUE_TYPE r;
long tmp[6];
- total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
if (total_bytes > len || total_bytes > 24)
return NULL_TREE;
int words = (32 / BITS_PER_UNIT) / UNITS_PER_WORD;
@@ -7280,7 +7280,7 @@ native_interpret_complex (tree type, const unsigned char *ptr, int len)
int size;
etype = TREE_TYPE (type);
- size = GET_MODE_SIZE (TYPE_MODE (etype));
+ size = GET_MODE_SIZE (SCALAR_TYPE_MODE (etype));
if (size * 2 > len)
return NULL_TREE;
rpart = native_interpret_expr (etype, ptr, size);
@@ -7305,7 +7305,7 @@ native_interpret_vector (tree type, const unsigned char *ptr, int len)
tree *elements;
etype = TREE_TYPE (type);
- size = GET_MODE_SIZE (TYPE_MODE (etype));
+ size = GET_MODE_SIZE (SCALAR_TYPE_MODE (etype));
count = TYPE_VECTOR_SUBPARTS (type);
if (size * count > len)
return NULL_TREE;
@@ -3371,8 +3371,7 @@ assign_parm_setup_reg (struct assign_parm_data_all *all, tree parm,
/* Mark complex types separately. */
if (GET_CODE (parmreg) == CONCAT)
{
- machine_mode submode
- = GET_MODE_INNER (GET_MODE (parmreg));
+ scalar_mode submode = GET_MODE_INNER (GET_MODE (parmreg));
int regnor = REGNO (XEXP (parmreg, 0));
int regnoi = REGNO (XEXP (parmreg, 1));
rtx stackr = adjust_address_nv (data->stack_parm, submode, 0);
@@ -3509,7 +3508,7 @@ assign_parms_unsplit_complex (struct assign_parm_data_all *all,
&& targetm.calls.split_complex_arg (TREE_TYPE (parm)))
{
rtx tmp, real, imag;
- machine_mode inner = GET_MODE_INNER (DECL_MODE (parm));
+ scalar_mode inner = GET_MODE_INNER (DECL_MODE (parm));
real = DECL_RTL (fnargs[i]);
imag = DECL_RTL (fnargs[i + 1]);
@@ -1141,6 +1141,13 @@ get_mode_class (struct mode_data *mode)
case MODE_PARTIAL_INT:
return "scalar_int_mode";
+ case MODE_FRACT:
+ case MODE_UFRACT:
+ case MODE_ACCUM:
+ case MODE_UACCUM:
+ case MODE_POINTER_BOUNDS:
+ return "scalar_mode";
+
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
return "scalar_float_mode";
@@ -381,6 +381,58 @@ scalar_float_mode::from_int (int i)
return machine_mode_enum (i);
}
+/* Represents a machine mode that is known to be scalar. All properties
+ (size, precision, etc.) are compile-time constants. */
+class scalar_mode
+{
+public:
+ ALWAYS_INLINE scalar_mode () {}
+ ALWAYS_INLINE scalar_mode (const scalar_int_mode &m) : m_mode (m) {}
+ ALWAYS_INLINE scalar_mode (const scalar_float_mode &m) : m_mode (m) {}
+ ALWAYS_INLINE scalar_mode (const scalar_int_mode_pod &m) : m_mode (m) {}
+ ALWAYS_INLINE operator machine_mode_enum () const { return m_mode; }
+
+ static bool includes_p (machine_mode_enum);
+ static scalar_mode from_int (int);
+
+PROTECT_ENUM_CONVERSION:
+ ALWAYS_INLINE scalar_mode (machine_mode_enum m) : m_mode (m) {}
+
+protected:
+ machine_mode_enum m_mode;
+};
+
+/* Return true if M represents some kind of scalar value. */
+
+inline bool
+scalar_mode::includes_p (machine_mode_enum m)
+{
+ switch (GET_MODE_CLASS (m))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_FRACT:
+ case MODE_UFRACT:
+ case MODE_ACCUM:
+ case MODE_UACCUM:
+ case MODE_FLOAT:
+ case MODE_DECIMAL_FLOAT:
+ case MODE_POINTER_BOUNDS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Return M as a scalar_mode. This function should only be used by
+ utility functions; general code should use as_a<T> instead. */
+
+ALWAYS_INLINE scalar_mode
+scalar_mode::from_int (int i)
+{
+ return machine_mode_enum (i);
+}
+
/* Represents a general machine mode (scalar or non-scalar). */
class machine_mode
{
@@ -434,14 +486,14 @@ mode_to_precision (machine_mode_enum mode)
/* Return the base GET_MODE_NUNITS value for MODE. */
-ALWAYS_INLINE machine_mode
+ALWAYS_INLINE scalar_mode
mode_to_inner (machine_mode_enum mode)
{
#if GCC_VERSION >= 4001
- return (machine_mode_enum) (__builtin_constant_p (mode) ?
- mode_inner_inline (mode) : mode_inner[mode]);
+ return scalar_mode::from_int (__builtin_constant_p (mode) ?
+ mode_inner_inline (mode) : mode_inner[mode]);
#else
- return (machine_mode_enum) mode_inner[mode];
+ return scalar_mode::from_int (mode_inner[mode]);
#endif
}
@@ -609,7 +661,7 @@ extern machine_mode bitwise_mode_for_mode (machine_mode);
/* Return a mode that is suitable for representing a vector,
or BLKmode on failure. */
-extern machine_mode mode_for_vector (machine_mode, unsigned);
+extern machine_mode mode_for_vector (scalar_mode, unsigned);
/* A class for iterating through possible bitfield modes. */
class bit_field_mode_iterator
@@ -713,6 +765,21 @@ is_int_mode (machine_mode mode, T *int_mode)
return false;
}
+/* Return true if MODE has class MODE_FLOAT, storing it as a
+ scalar_float_mode in *FLOAT_MODE if so. */
+
+template<typename T>
+inline bool
+is_float_mode (machine_mode mode, T *float_mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ *float_mode = scalar_float_mode::from_int (mode);
+ return true;
+ }
+ return false;
+}
+
namespace mode_iterator
{
/* Start mode iterator *ITER at the first mode in class MCLASS, if any. */
@@ -777,6 +844,13 @@ namespace mode_iterator
/* Set mode iterator *ITER to the mode that is two times wider than the
current one, if such a mode exists. */
+ template<typename T>
+ inline void
+ get_2xwider (opt_mode<T> *iter)
+ {
+ *iter = GET_MODE_2XWIDER_MODE (**iter);
+ }
+
inline void
get_2xwider (machine_mode *iter)
{
@@ -4300,16 +4300,18 @@ omp_clause_aligned_alignment (tree clause)
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
- machine_mode mode, vmode;
+ opt_scalar_mode mode_iter;
int vs = targetm.vectorize.autovectorize_vector_sizes ();
if (vs)
vs = 1 << floor_log2 (vs);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
- FOR_EACH_MODE_IN_CLASS (mode, classes[i])
+ /* The for loop above dictates that we only walk through scalar classes. */
+ FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
{
- vmode = targetm.vectorize.preferred_simd_mode (mode);
+ scalar_mode mode = *mode_iter;
+ machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
while (vs
@@ -12781,18 +12783,20 @@ expand_omp_atomic (struct omp_region *region)
/* __sync builtins require strict data alignment. */
if (exact_log2 (align) >= index)
{
+ scalar_mode mode;
+
/* Atomic load. */
if (loaded_val == stored_val
- && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
- || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
- && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
+ && (is_int_mode (TYPE_MODE (type), &mode)
+ || is_float_mode (TYPE_MODE (type), &mode))
+ && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
&& expand_omp_atomic_load (load_bb, addr, loaded_val, index))
return;
/* Atomic store. */
- if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
- || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
- && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
+ if ((is_int_mode (TYPE_MODE (type), &mode)
+ || is_float_mode (TYPE_MODE (type), &mode))
+ && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
&& store_bb == single_succ (load_bb)
&& first_stmt (store_bb) == store
&& expand_omp_atomic_store (load_bb, addr, loaded_val,
@@ -496,7 +496,7 @@ simd_clone_adjust_return_type (struct cgraph_node *node)
veclen = node->simdclone->vecsize_int;
else
veclen = node->simdclone->vecsize_float;
- veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
+ veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t));
if (veclen > node->simdclone->simdlen)
veclen = node->simdclone->simdlen;
if (POINTER_TYPE_P (t))
@@ -606,7 +606,7 @@ simd_clone_adjust_argument_types (struct cgraph_node *node)
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
- veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
+ veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
adj.arg_prefix = "simd";
@@ -650,7 +650,7 @@ simd_clone_adjust_argument_types (struct cgraph_node *node)
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
- veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
+ veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
if (sc->mask_mode != VOIDmode)
@@ -792,8 +792,8 @@ simd_clone_init_simd_arrays (struct cgraph_node *node,
arg = DECL_CHAIN (arg);
j++;
}
- elemsize
- = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
+ tree elemtype = TREE_TYPE (TREE_TYPE (arg));
+ elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype));
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, k * elemsize));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
@@ -1227,7 +1227,7 @@ simd_clone_adjust (struct cgraph_node *node)
mask_array, iter1, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
- int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
+ int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref)));
if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
{
aref = build1 (VIEW_CONVERT_EXPR,
@@ -524,7 +524,11 @@ can_vec_mask_load_store_p (machine_mode mode,
/* See if there is any chance the mask load or store might be
vectorized. If not, punt. */
- vmode = targetm.vectorize.preferred_simd_mode (mode);
+ scalar_mode smode;
+ if (!is_a <scalar_mode> (mode, &smode))
+ return false;
+
+ vmode = targetm.vectorize.preferred_simd_mode (smode);
if (!VECTOR_MODE_P (vmode))
return false;
@@ -541,9 +545,9 @@ can_vec_mask_load_store_p (machine_mode mode,
{
unsigned int cur = 1 << floor_log2 (vector_sizes);
vector_sizes &= ~cur;
- if (cur <= GET_MODE_SIZE (mode))
+ if (cur <= GET_MODE_SIZE (smode))
continue;
- vmode = mode_for_vector (mode, cur / GET_MODE_SIZE (mode));
+ vmode = mode_for_vector (smode, cur / GET_MODE_SIZE (smode));
mask_mode = targetm.vectorize.get_mask_mode (GET_MODE_NUNITS (vmode),
cur);
if (VECTOR_MODE_P (vmode)
@@ -1228,7 +1228,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
{
/* The scalar may have been extended to be too wide. Truncate
it back to the proper size to fit in the broadcast vector. */
- machine_mode inner_mode = GET_MODE_INNER (mode);
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
if (!CONST_INT_P (op1)
&& (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
> GET_MODE_BITSIZE (inner_mode)))
@@ -4612,6 +4612,7 @@ expand_float (rtx to, rtx from, int unsignedp)
{
enum insn_code icode;
rtx target = to;
+ scalar_mode from_mode, to_mode;
machine_mode fmode, imode;
bool can_do_signed = false;
@@ -4661,8 +4662,12 @@ expand_float (rtx to, rtx from, int unsignedp)
/* Unsigned integer, and no way to convert directly. Convert as signed,
then unconditionally adjust the result. */
- if (unsignedp && can_do_signed)
+ if (unsignedp
+ && can_do_signed
+ && is_a <scalar_mode> (GET_MODE (to), &to_mode)
+ && is_a <scalar_mode> (GET_MODE (from), &from_mode))
{
+ opt_scalar_mode fmode_iter;
rtx_code_label *label = gen_label_rtx ();
rtx temp;
REAL_VALUE_TYPE offset;
@@ -4671,19 +4676,21 @@ expand_float (rtx to, rtx from, int unsignedp)
least as wide as the target. Using FMODE will avoid rounding woes
with unsigned values greater than the signed maximum value. */
- FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
- if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
- && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
+ FOR_EACH_MODE_FROM (fmode_iter, to_mode)
+ if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (*fmode_iter)
+ && can_float_p (*fmode_iter, from_mode, 0) != CODE_FOR_nothing)
break;
- if (fmode == VOIDmode)
+ if (fmode_iter.exists ())
+ fmode = *fmode_iter;
+ else
{
/* There is no such mode. Pretend the target is wide enough. */
- fmode = GET_MODE (to);
+ fmode = to_mode;
/* Avoid double-rounding when TO is narrower than FROM. */
if ((significand_size (fmode) + 1)
- < GET_MODE_PRECISION (GET_MODE (from)))
+ < GET_MODE_PRECISION (from_mode))
{
rtx temp1;
rtx_code_label *neglabel = gen_label_rtx ();
@@ -4695,7 +4702,7 @@ expand_float (rtx to, rtx from, int unsignedp)
|| GET_MODE (target) != fmode)
target = gen_reg_rtx (fmode);
- imode = GET_MODE (from);
+ imode = from_mode;
do_pending_stack_adjust ();
/* Test whether the sign bit is set. */
@@ -4735,7 +4742,7 @@ expand_float (rtx to, rtx from, int unsignedp)
/* If we are about to do some arithmetic to correct for an
unsigned operand, do it in a pseudo-register. */
- if (GET_MODE (to) != fmode
+ if (to_mode != fmode
|| !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
target = gen_reg_rtx (fmode);
@@ -4746,11 +4753,11 @@ expand_float (rtx to, rtx from, int unsignedp)
correct its value by 2**bitwidth. */
do_pending_stack_adjust ();
- emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
+ emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
0, label);
- real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
+ real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
temp = expand_binop (fmode, add_optab, target,
const_double_from_real_value (offset, fmode),
target, 0, OPTAB_LIB_WIDEN);
@@ -4811,6 +4818,7 @@ expand_fix (rtx to, rtx from, int unsignedp)
enum insn_code icode;
rtx target = to;
machine_mode fmode, imode;
+ opt_scalar_mode fmode_iter;
bool must_trunc = false;
/* We first try to find a pair of modes, one real and one integer, at
@@ -4878,67 +4886,74 @@ expand_fix (rtx to, rtx from, int unsignedp)
2^63. The subtraction of 2^63 should not generate any rounding as it
simply clears out that bit. The rest is trivial. */
- if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
- FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
- if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
- && (!DECIMAL_FLOAT_MODE_P (fmode)
- || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
- {
- int bitsize;
- REAL_VALUE_TYPE offset;
- rtx limit;
- rtx_code_label *lab1, *lab2;
- rtx_insn *insn;
-
- bitsize = GET_MODE_PRECISION (GET_MODE (to));
- real_2expN (&offset, bitsize - 1, fmode);
- limit = const_double_from_real_value (offset, fmode);
- lab1 = gen_label_rtx ();
- lab2 = gen_label_rtx ();
-
- if (fmode != GET_MODE (from))
- from = convert_to_mode (fmode, from, 0);
-
- /* See if we need to do the subtraction. */
- do_pending_stack_adjust ();
- emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
- 0, lab1);
-
- /* If not, do the signed "fix" and branch around fixup code. */
- expand_fix (to, from, 0);
- emit_jump_insn (targetm.gen_jump (lab2));
- emit_barrier ();
-
- /* Otherwise, subtract 2**(N-1), convert to signed number,
- then add 2**(N-1). Do the addition using XOR since this
- will often generate better code. */
- emit_label (lab1);
- target = expand_binop (GET_MODE (from), sub_optab, from, limit,
- NULL_RTX, 0, OPTAB_LIB_WIDEN);
- expand_fix (to, target, 0);
- target = expand_binop (GET_MODE (to), xor_optab, to,
- gen_int_mode
- (HOST_WIDE_INT_1 << (bitsize - 1),
- GET_MODE (to)),
- to, 1, OPTAB_LIB_WIDEN);
-
- if (target != to)
- emit_move_insn (to, target);
-
- emit_label (lab2);
-
- if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
- {
- /* Make a place for a REG_NOTE and add it. */
- insn = emit_move_insn (to, to);
- set_dst_reg_note (insn, REG_EQUAL,
- gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
- copy_rtx (from)),
- to);
- }
+ scalar_int_mode to_mode;
+ if (unsignedp
+ && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
+ && HWI_COMPUTABLE_MODE_P (to_mode))
+ FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
+ {
+ scalar_mode fmode = *fmode_iter;
+ if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode,
+ 0, &must_trunc)
+ && (!DECIMAL_FLOAT_MODE_P (fmode)
+ || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
+ {
+ int bitsize;
+ REAL_VALUE_TYPE offset;
+ rtx limit;
+ rtx_code_label *lab1, *lab2;
+ rtx_insn *insn;
+
+ bitsize = GET_MODE_PRECISION (to_mode);
+ real_2expN (&offset, bitsize - 1, fmode);
+ limit = const_double_from_real_value (offset, fmode);
+ lab1 = gen_label_rtx ();
+ lab2 = gen_label_rtx ();
- return;
- }
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
+
+ /* See if we need to do the subtraction. */
+ do_pending_stack_adjust ();
+ emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
+ GET_MODE (from), 0, lab1);
+
+ /* If not, do the signed "fix" and branch around fixup code. */
+ expand_fix (to, from, 0);
+ emit_jump_insn (targetm.gen_jump (lab2));
+ emit_barrier ();
+
+ /* Otherwise, subtract 2**(N-1), convert to signed number,
+ then add 2**(N-1). Do the addition using XOR since this
+ will often generate better code. */
+ emit_label (lab1);
+ target = expand_binop (GET_MODE (from), sub_optab, from, limit,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ expand_fix (to, target, 0);
+ target = expand_binop (GET_MODE (to), xor_optab, to,
+ gen_int_mode
+ (HOST_WIDE_INT_1 << (bitsize - 1),
+ GET_MODE (to)),
+ to, 1, OPTAB_LIB_WIDEN);
+
+ if (target != to)
+ emit_move_insn (to, target);
+
+ emit_label (lab2);
+
+ if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
+ {
+ /* Make a place for a REG_NOTE and add it. */
+ insn = emit_move_insn (to, to);
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
+ copy_rtx (from)),
+ to);
+ }
+
+ return;
+ }
+ }
/* We can't do it with an insn, so use a library call. But first ensure
that the mode of TO is at least as wide as SImode, since those are the
@@ -2120,8 +2120,7 @@ namespace wi
inline unsigned int
wi::int_traits <rtx_mode_t>::get_precision (const rtx_mode_t &x)
{
- gcc_checking_assert (x.second != BLKmode && x.second != VOIDmode);
- return GET_MODE_PRECISION (x.second);
+ return GET_MODE_PRECISION (as_a <scalar_mode> (x.second));
}
inline wi::storage_ref
@@ -2166,7 +2165,7 @@ namespace wi
inline wi::hwi_with_prec
wi::shwi (HOST_WIDE_INT val, machine_mode_enum mode)
{
- return shwi (val, GET_MODE_PRECISION (mode));
+ return shwi (val, GET_MODE_PRECISION (as_a <scalar_mode> (mode)));
}
/* Produce the smallest number that is represented in MODE. The precision
@@ -2174,7 +2173,7 @@ wi::shwi (HOST_WIDE_INT val, machine_mode_enum mode)
inline wide_int
wi::min_value (machine_mode_enum mode, signop sgn)
{
- return min_value (GET_MODE_PRECISION (mode), sgn);
+ return min_value (GET_MODE_PRECISION (as_a <scalar_mode> (mode)), sgn);
}
/* Produce the largest number that is represented in MODE. The precision
@@ -2182,7 +2181,7 @@ wi::min_value (machine_mode_enum mode, signop sgn)
inline wide_int
wi::max_value (machine_mode_enum mode, signop sgn)
{
- return max_value (GET_MODE_PRECISION (mode), sgn);
+ return max_value (GET_MODE_PRECISION (as_a <scalar_mode> (mode)), sgn);
}
extern void init_rtlanal (void);
@@ -3612,7 +3612,7 @@ subreg_get_info (unsigned int xregno, machine_mode xmode,
{
nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
unsigned int nunits = GET_MODE_NUNITS (xmode);
- machine_mode xmode_unit = GET_MODE_INNER (xmode);
+ scalar_mode xmode_unit = GET_MODE_INNER (xmode);
gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
gcc_assert (nregs_xmode
== (nunits
@@ -1277,11 +1277,15 @@ sdbout_parms (tree parms)
the parm with the variable's declared type, and adjust
the address if the least significant bytes (which we are
using) are not the first ones. */
+ scalar_mode from_mode, to_mode;
if (BYTES_BIG_ENDIAN
- && TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
- current_sym_value +=
- (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
- - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+ && TREE_TYPE (parms) != DECL_ARG_TYPE (parms)
+ && is_a <scalar_mode> (TYPE_MODE (DECL_ARG_TYPE (parms)),
+ &from_mode)
+ && is_a <scalar_mode> (GET_MODE (DECL_RTL (parms)),
+ &to_mode))
+ current_sym_value += (GET_MODE_SIZE (from_mode)
+ - GET_MODE_SIZE (to_mode));
if (MEM_P (DECL_RTL (parms))
&& GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
@@ -5732,7 +5732,7 @@ simplify_immed_subreg (machine_mode outermode, rtx op,
rtx result_s;
rtvec result_v = NULL;
enum mode_class outer_class;
- machine_mode outer_submode;
+ scalar_mode outer_submode;
int max_bitsize;
/* Some ports misuse CCmode. */
@@ -5798,7 +5798,7 @@ simplify_immed_subreg (machine_mode outermode, rtx op,
case CONST_WIDE_INT:
{
- rtx_mode_t val = rtx_mode_t (el, innermode);
+ rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
unsigned char extend = wi::sign_mask (val);
int prec = wi::get_precision (val);
@@ -478,7 +478,7 @@ bitwise_type_for_mode (machine_mode mode)
is no suitable mode. */
machine_mode
-mode_for_vector (machine_mode innermode, unsigned nunits)
+mode_for_vector (scalar_mode innermode, unsigned nunits)
{
machine_mode mode;
@@ -2145,10 +2145,13 @@ layout_type (tree type)
}
case FIXED_POINT_TYPE:
- /* TYPE_MODE (type) has been set already. */
- TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
- TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
- break;
+ {
+ /* TYPE_MODE (type) has been set already. */
+ scalar_mode mode = SCALAR_TYPE_MODE (type);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
+ break;
+ }
case COMPLEX_TYPE:
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
@@ -2169,7 +2172,8 @@ layout_type (tree type)
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
SET_TYPE_MODE (type,
- mode_for_vector (TYPE_MODE (innertype), nunits));
+ mode_for_vector (SCALAR_TYPE_MODE (innertype),
+ nunits));
TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
@@ -1839,7 +1839,7 @@ mode @var{mode}. The default is\n\
equal to @code{word_mode}, because the vectorizer can do some\n\
transformations even in absence of specialized @acronym{SIMD} hardware.",
machine_mode,
- (machine_mode mode),
+ (scalar_mode mode),
default_preferred_simd_mode)
/* Returns a mask of vector sizes to iterate over when auto-vectorizing
@@ -3295,7 +3295,7 @@ The default version of this hook returns true for any mode\n\
required to handle the basic C types (as defined by the port).\n\
Included here are the double-word arithmetic supported by the\n\
code in @file{optabs.c}.",
- bool, (machine_mode mode),
+ bool, (scalar_mode mode),
default_scalar_mode_supported_p)
/* Similarly for vector modes. "Supported" here is less strict. At
@@ -393,7 +393,7 @@ default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
supported by optabs.c. */
bool
-default_scalar_mode_supported_p (machine_mode mode)
+default_scalar_mode_supported_p (scalar_mode mode)
{
int precision = GET_MODE_PRECISION (mode);
@@ -1163,7 +1163,7 @@ default_builtin_support_vector_misalignment (machine_mode mode,
possibly adds/subtracts using bit-twiddling. */
machine_mode
-default_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED)
+default_preferred_simd_mode (scalar_mode)
{
return word_mode;
}
@@ -71,7 +71,7 @@ extern void default_print_operand_address (FILE *, machine_mode, rtx);
extern bool default_print_operand_punct_valid_p (unsigned char);
extern tree default_mangle_assembler_name (const char *);
-extern bool default_scalar_mode_supported_p (machine_mode);
+extern bool default_scalar_mode_supported_p (scalar_mode);
extern bool default_libgcc_floating_mode_supported_p (scalar_float_mode);
extern opt_scalar_float_mode default_floatn_mode (int, bool);
extern bool targhook_words_big_endian (void);
@@ -100,7 +100,7 @@ extern bool
default_builtin_support_vector_misalignment (machine_mode mode,
const_tree,
int, bool);
-extern machine_mode default_preferred_simd_mode (machine_mode mode);
+extern machine_mode default_preferred_simd_mode (scalar_mode mode);
extern unsigned int default_autovectorize_vector_sizes (void);
extern machine_mode default_get_mask_mode (unsigned, unsigned);
extern void *default_init_cost (struct loop *);
@@ -4147,7 +4147,7 @@ verify_gimple_assign_ternary (gassign *stmt)
if (TREE_CODE (TREE_TYPE (rhs3_type)) != INTEGER_TYPE
|| GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs3_type)))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1_type))))
+ != GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (rhs1_type))))
{
error ("invalid mask type in vector permute expression");
debug_generic_expr (lhs_type);
@@ -3846,7 +3846,7 @@ estimate_move_cost (tree type, bool ARG_UNUSED (speed_p))
if (TREE_CODE (type) == VECTOR_TYPE)
{
- machine_mode inner = TYPE_MODE (TREE_TYPE (type));
+ scalar_mode inner = SCALAR_TYPE_MODE (TREE_TYPE (type));
machine_mode simd
= targetm.vectorize.preferred_simd_mode (inner);
int simd_mode_size = GET_MODE_SIZE (simd);
@@ -3227,7 +3227,8 @@ convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
optab this_optab;
enum tree_code wmult_code;
enum insn_code handler;
- machine_mode to_mode, from_mode, actual_mode;
+ scalar_mode to_mode, from_mode;
+ machine_mode actual_mode;
location_t loc = gimple_location (stmt);
int actual_precision;
bool from_unsigned1, from_unsigned2;
@@ -3323,8 +3324,8 @@ convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
else
return false;
- to_mode = TYPE_MODE (type);
- from_mode = TYPE_MODE (type1);
+ to_mode = SCALAR_TYPE_MODE (type);
+ from_mode = SCALAR_TYPE_MODE (type1);
from_unsigned1 = TYPE_UNSIGNED (type1);
from_unsigned2 = TYPE_UNSIGNED (type2);
optype = type1;
@@ -4740,7 +4740,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
(index_vec_type);
/* Get an unsigned integer version of the type of the data vector. */
- int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
+ int scalar_precision
+ = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
tree vectype_unsigned = build_vector_type
(scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
@@ -5981,7 +5982,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
else
{
- int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
+ int scalar_precision
+ = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
cr_index_scalar_type = make_unsigned_type (scalar_precision);
cr_index_vector_type = build_vector_type
(cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype_out));
@@ -939,7 +939,7 @@ vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
tree itype = type;
if (TYPE_PRECISION (type) > TYPE_PRECISION (half_type0) * 2)
itype = build_nonstandard_integer_type
- (GET_MODE_BITSIZE (TYPE_MODE (half_type0)) * 2,
+ (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (half_type0)) * 2,
TYPE_UNSIGNED (type));
/* Pattern detected. */
@@ -3083,7 +3083,7 @@ vect_recog_mixed_size_cond_pattern (vec<gimple *> *stmts, tree *type_in,
TYPE_UNSIGNED (type));
if (itype == NULL_TREE
- || GET_MODE_BITSIZE (TYPE_MODE (itype)) != cmp_mode_size)
+ || GET_MODE_BITSIZE (SCALAR_TYPE_MODE (itype)) != cmp_mode_size)
return NULL;
vecitype = get_vectype_for_scalar_type (itype);
@@ -3202,7 +3202,7 @@ check_bool_pattern (tree var, vec_info *vinfo, hash_set<gimple *> &stmts)
if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE)
{
- machine_mode mode = TYPE_MODE (TREE_TYPE (rhs1));
+ scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
tree itype
= build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
vecitype = get_vectype_for_scalar_type (itype);
@@ -3324,7 +3324,7 @@ adjust_bool_pattern (tree var, tree out_type,
irhs1 = *defs.get (rhs1);
tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
if (TYPE_PRECISION (TREE_TYPE (irhs1))
- == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1))))
+ == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1))))
{
rhs_code = def_rhs_code;
rhs1 = def_rhs1;
@@ -3343,7 +3343,7 @@ adjust_bool_pattern (tree var, tree out_type,
irhs2 = *defs.get (rhs2);
tree def_rhs1 = gimple_assign_rhs1 (def_stmt);
if (TYPE_PRECISION (TREE_TYPE (irhs2))
- == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (def_rhs1))))
+ == GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (def_rhs1))))
{
rhs_code = def_rhs_code;
rhs1 = def_rhs1;
@@ -3393,7 +3393,7 @@ adjust_bool_pattern (tree var, tree out_type,
|| (TYPE_PRECISION (TREE_TYPE (rhs1))
!= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
{
- machine_mode mode = TYPE_MODE (TREE_TYPE (rhs1));
+ scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
itype
= build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
}
@@ -3548,7 +3548,7 @@ search_type_for_mask_1 (tree var, vec_info *vinfo,
if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
|| !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
{
- machine_mode mode = TYPE_MODE (TREE_TYPE (rhs1));
+ scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
res = build_nonstandard_integer_type (GET_MODE_BITSIZE (mode), 1);
}
else
@@ -3704,7 +3704,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
v1 = SLP_TREE_VEC_STMTS (node).copy ();
SLP_TREE_VEC_STMTS (node).truncate (0);
tree meltype = build_nonstandard_integer_type
- (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ (GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype))), 1);
tree mvectype = get_same_sized_vectype (meltype, vectype);
unsigned k = 0, l;
for (j = 0; j < v0.length (); ++j)
@@ -4036,7 +4036,6 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
vec<tree> interm_types = vNULL;
tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
int op_type;
- machine_mode rhs_mode;
unsigned short fltsz;
/* Is STMT a vectorizable conversion? */
@@ -4182,6 +4181,11 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (ncopies >= 1);
/* Supportable by target? */
+ bool found_mode = false;
+ scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
+ scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
+ opt_scalar_mode rhs_mode_iter;
+
switch (modifier)
{
case NONE:
@@ -4209,13 +4213,13 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
}
if (code != FLOAT_EXPR
- || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
- <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
+ || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
goto unsupported;
- fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
- FOR_EACH_2XWIDER_MODE (rhs_mode, TYPE_MODE (rhs_type))
+ fltsz = GET_MODE_SIZE (lhs_mode);
+ FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
{
+ rhs_mode = *rhs_mode_iter;
if (GET_MODE_SIZE (rhs_mode) > fltsz)
break;
@@ -4242,10 +4246,13 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
vectype_in, &code1, &code2,
&multi_step_cvt, &interm_types))
- break;
+ {
+ found_mode = true;
+ break;
+ }
}
- if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
+ if (!found_mode)
goto unsupported;
if (GET_MODE_SIZE (rhs_mode) == fltsz)
@@ -4266,11 +4273,9 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
break;
if (code != FIX_TRUNC_EXPR
- || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
- >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
+ || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
goto unsupported;
- rhs_mode = TYPE_MODE (rhs_type);
cvt_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
@@ -8859,18 +8864,16 @@ free_stmt_vec_info (gimple *stmt)
static tree
get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
{
- machine_mode inner_mode = TYPE_MODE (scalar_type);
+ scalar_mode inner_mode;
machine_mode simd_mode;
- unsigned int nbytes = GET_MODE_SIZE (inner_mode);
int nunits;
tree vectype;
- if (nbytes == 0)
+ if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
+ && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
return NULL_TREE;
- if (GET_MODE_CLASS (inner_mode) != MODE_INT
- && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
- return NULL_TREE;
+ unsigned int nbytes = GET_MODE_SIZE (inner_mode);
/* For vector types of elements whose mode precision doesn't
match their types precision we use a element type of mode
@@ -2130,8 +2130,9 @@ build_minus_one_cst (tree type)
case FIXED_POINT_TYPE:
/* We can only generate 1 for accum types. */
gcc_assert (ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)));
- return build_fixed (type, fixed_from_double_int (double_int_minus_one,
- TYPE_MODE (type)));
+ return build_fixed (type,
+ fixed_from_double_int (double_int_minus_one,
+ SCALAR_TYPE_MODE (type)));
case VECTOR_TYPE:
{
@@ -1829,6 +1829,8 @@ extern void protected_set_expr_location (tree, location_t);
((const machine_mode &) \
(VECTOR_TYPE_P (TYPE_CHECK (NODE)) \
? vector_type_mode (NODE) : machine_mode ((NODE)->type_common.mode)))
+#define SCALAR_TYPE_MODE(NODE) \
+ (as_a <scalar_mode> (TYPE_CHECK (NODE)->type_common.mode))
#define SCALAR_INT_TYPE_MODE(NODE) \
(as_a <scalar_int_mode> (TYPE_CHECK (NODE)->type_common.mode))
#define SCALAR_FLOAT_TYPE_MODE(NODE) \
@@ -120,7 +120,8 @@ tree
ubsan_encode_value (tree t, bool in_expand_p)
{
tree type = TREE_TYPE (t);
- const unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
+ scalar_mode mode = SCALAR_TYPE_MODE (type);
+ const unsigned int bitsize = GET_MODE_BITSIZE (mode);
if (bitsize <= POINTER_SIZE)
switch (TREE_CODE (type))
{
@@ -147,10 +148,8 @@ ubsan_encode_value (tree t, bool in_expand_p)
tree tem = build2 (MODIFY_EXPR, void_type_node, var, t);
if (in_expand_p)
{
- rtx mem
- = assign_stack_temp_for_type (TYPE_MODE (type),
- GET_MODE_SIZE (TYPE_MODE (type)),
- type);
+ rtx mem = assign_stack_temp_for_type (mode, GET_MODE_SIZE (mode),
+ type);
SET_DECL_RTL (var, mem);
expand_assignment (var, t, false);
return build_fold_addr_expr (var);
@@ -3853,7 +3853,7 @@ output_constant_pool_2 (machine_mode mode, rtx x, unsigned int align)
case MODE_VECTOR_UACCUM:
{
int i, units;
- machine_mode submode = GET_MODE_INNER (mode);
+ scalar_mode submode = GET_MODE_INNER (mode);
unsigned int subalign = MIN (align, GET_MODE_BITSIZE (submode));
gcc_assert (GET_CODE (x) == CONST_VECTOR);
@@ -4831,7 +4831,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align,
break;
case VECTOR_CST:
{
- machine_mode inner = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ scalar_mode inner = SCALAR_TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
unsigned int nalign = MIN (align, GET_MODE_ALIGNMENT (inner));
int elt_size = GET_MODE_SIZE (inner);
output_constant (VECTOR_CST_ELT (exp, 0), elt_size, align,