commit 126c5b92eea2850367f005ebe89f86c5b8b4e4f9
Author: Alan Lawrence <alan.lawrence@arm.com>
Date: Wed Aug 6 14:23:00 2014 +0100
float64x1 reinterprets can now be casts too. GC builtins, patterns, iterators.
@@ -122,23 +122,6 @@ aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned };
#define TYPES_UNOPU (aarch64_types_unopu_qualifiers)
#define TYPES_CREATE (aarch64_types_unop_qualifiers)
-#define TYPES_REINTERP_SS (aarch64_types_unop_qualifiers)
-static enum aarch64_type_qualifiers
-aarch64_types_unop_su_qualifiers[SIMD_MAX_BUILTIN_ARGS]
- = { qualifier_none, qualifier_unsigned };
-#define TYPES_REINTERP_SU (aarch64_types_unop_su_qualifiers)
-static enum aarch64_type_qualifiers
-aarch64_types_unop_sp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
- = { qualifier_none, qualifier_poly };
-#define TYPES_REINTERP_SP (aarch64_types_unop_sp_qualifiers)
-static enum aarch64_type_qualifiers
-aarch64_types_unop_us_qualifiers[SIMD_MAX_BUILTIN_ARGS]
- = { qualifier_unsigned, qualifier_none };
-#define TYPES_REINTERP_US (aarch64_types_unop_us_qualifiers)
-static enum aarch64_type_qualifiers
-aarch64_types_unop_ps_qualifiers[SIMD_MAX_BUILTIN_ARGS]
- = { qualifier_poly, qualifier_none };
-#define TYPES_REINTERP_PS (aarch64_types_unop_ps_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_none, qualifier_maybe_immediate };
@@ -324,8 +307,6 @@ aarch64_types_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
v4si, v2di, v2sf, v4sf, v2df, di, df)
#define BUILTIN_VB(T, N, MAP) \
VAR2 (T, N, MAP, v8qi, v16qi)
-#define BUILTIN_VD(T, N, MAP) \
- VAR4 (T, N, MAP, v8qi, v4hi, v2si, v2sf)
#define BUILTIN_VD1(T, N, MAP) \
VAR5 (T, N, MAP, v8qi, v4hi, v2si, v2sf, v1df)
#define BUILTIN_VDC(T, N, MAP) \
@@ -1281,24 +1262,6 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
BUILTIN_VALLDI (UNOP, abs, 2)
return fold_build1 (ABS_EXPR, type, args[0]);
break;
- VAR1 (REINTERP_SS, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv2si, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv2sf, 0, v1df)
- BUILTIN_VD (REINTERP_SS, reinterpretv1df, 0)
- BUILTIN_VD (REINTERP_SU, reinterpretv1df, 0)
- VAR1 (REINTERP_US, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv2si, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv2sf, 0, v1df)
- BUILTIN_VD (REINTERP_SP, reinterpretv1df, 0)
- VAR1 (REINTERP_PS, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv2sf, 0, v1df)
- return fold_build1 (VIEW_CONVERT_EXPR, type, args[0]);
VAR1 (UNOP, floatv2si, 2, v2sf)
VAR1 (UNOP, floatv4si, 2, v4sf)
VAR1 (UNOP, floatv2di, 2, v2df)
@@ -1490,7 +1453,6 @@ aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#undef BUILTIN_V2F
#undef BUILTIN_VALL
#undef BUILTIN_VB
-#undef BUILTIN_VD
#undef BUILTIN_VD1
#undef BUILTIN_VDC
#undef BUILTIN_VDIC
@@ -269,9 +269,6 @@ rtx aarch64_simd_expand_builtin (int, tree, rtx);
void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-/* Emit code for reinterprets. */
-void aarch64_simd_reinterpret (rtx, rtx);
-
void aarch64_split_128bit_move (rtx, rtx);
bool aarch64_split_128bit_move_p (rtx, rtx);
@@ -49,29 +49,6 @@
BUILTIN_VALL (GETLANE, be_checked_get_lane, 0)
- VAR1 (REINTERP_SS, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv2si, 0, v1df)
- VAR1 (REINTERP_SS, reinterpretv2sf, 0, v1df)
- BUILTIN_VD (REINTERP_SS, reinterpretv1df, 0)
-
- BUILTIN_VD (REINTERP_SU, reinterpretv1df, 0)
-
- VAR1 (REINTERP_US, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv2si, 0, v1df)
- VAR1 (REINTERP_US, reinterpretv2sf, 0, v1df)
-
- BUILTIN_VD (REINTERP_SP, reinterpretv1df, 0)
-
- VAR1 (REINTERP_PS, reinterpretdi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv8qi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv4hi, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv2si, 0, v1df)
- VAR1 (REINTERP_PS, reinterpretv2sf, 0, v1df)
-
/* Implemented by aarch64_<sur>q<r>shl<mode>. */
BUILTIN_VSDQ_I (BINOP, sqshl, 0)
BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0)
@@ -2350,114 +2350,6 @@
DONE;
})
-(define_expand "aarch64_reinterpretv8qi<mode>"
- [(match_operand:V8QI 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4hi<mode>"
- [(match_operand:V4HI 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2si<mode>"
- [(match_operand:V2SI 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2sf<mode>"
- [(match_operand:V2SF 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretdi<mode>"
- [(match_operand:DI 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv1df<mode>"
- [(match_operand:V1DF 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv16qi<mode>"
- [(match_operand:V16QI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv8hi<mode>"
- [(match_operand:V8HI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4si<mode>"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4sf<mode>"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2di<mode>"
- [(match_operand:V2DI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2df<mode>"
- [(match_operand:V2DF 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
;; In this insn, operand 1 should be low, and operand 2 the high part of the
;; dest vector.
@@ -7941,14 +7941,6 @@ aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
error ("constant out of range");
}
-/* Emit code to reinterpret one AdvSIMD type as another,
- without altering bits. */
-void
-aarch64_simd_reinterpret (rtx dest, rtx src)
-{
- emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
-}
-
/* Emit code to place a AdvSIMD pair result in memory locations (with equal
registers). */
void
@@ -2662,7 +2662,7 @@ vgetq_lane_u64 (uint64x2_t __a, const int __b)
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vreinterpret_p8_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv8qiv1df_ps (__a);
+ return (poly8x8_t) __a;
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
@@ -2794,7 +2794,7 @@ vreinterpretq_p8_p16 (poly16x8_t __a)
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vreinterpret_p16_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv4hiv1df_ps (__a);
+ return (poly16x4_t) __a;
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
@@ -2926,7 +2926,7 @@ vreinterpretq_p16_p8 (poly8x16_t __a)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv2sfv1df (__a);
+ return (float32x2_t) __a;
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
@@ -3058,37 +3058,37 @@ vreinterpretq_f32_p16 (poly16x8_t __a)
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_f32 (float32x2_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv2sf (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_p8 (poly8x8_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv8qi_sp (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_p16 (poly16x4_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv4hi_sp (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s8 (int8x8_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv8qi (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s16 (int16x4_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv4hi (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s32 (int32x2_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv2si (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
@@ -3100,19 +3100,19 @@ vreinterpret_f64_s64 (int64x1_t __a)
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u8 (uint8x8_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv8qi_su (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u16 (uint16x4_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv4hi_su (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u32 (uint32x2_t __a)
{
- return __builtin_aarch64_reinterpretv1dfv2si_su (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
@@ -3190,7 +3190,7 @@ vreinterpretq_f64_u64 (uint64x2_t __a)
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vreinterpret_s64_f64 (float64x1_t __a)
{
- return (int64x1_t) {__builtin_aarch64_reinterpretdiv1df (__a)};
+ return (int64x1_t) __a;
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -3322,7 +3322,7 @@ vreinterpretq_s64_p16 (poly16x8_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vreinterpret_u64_f64 (float64x1_t __a)
{
- return (uint64x1_t) {__builtin_aarch64_reinterpretdiv1df_us (__a)};
+ return (uint64x1_t) __a;
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -3454,7 +3454,7 @@ vreinterpretq_u64_p16 (poly16x8_t __a)
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vreinterpret_s8_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv8qiv1df (__a);
+ return (int8x8_t) __a;
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
@@ -3586,7 +3586,7 @@ vreinterpretq_s8_p16 (poly16x8_t __a)
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vreinterpret_s16_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv4hiv1df (__a);
+ return (int16x4_t) __a;
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
@@ -3718,7 +3718,7 @@ vreinterpretq_s16_p16 (poly16x8_t __a)
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vreinterpret_s32_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv2siv1df (__a);
+ return (int32x2_t) __a;
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
@@ -3850,7 +3850,7 @@ vreinterpretq_s32_p16 (poly16x8_t __a)
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vreinterpret_u8_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv8qiv1df_us (__a);
+ return (uint8x8_t) __a;
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -3982,7 +3982,7 @@ vreinterpretq_u8_p16 (poly16x8_t __a)
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vreinterpret_u16_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv4hiv1df_us (__a);
+ return (uint16x4_t) __a;
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
@@ -4114,7 +4114,7 @@ vreinterpretq_u16_p16 (poly16x8_t __a)
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vreinterpret_u32_f64 (float64x1_t __a)
{
- return __builtin_aarch64_reinterpretv2siv1df_us (__a);
+ return (uint32x2_t) __a;
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
@@ -144,9 +144,6 @@
;; Double vector modes for combines.
(define_mode_iterator VDIC [V8QI V4HI V2SI])
-;; Double vector modes, inc. V1DF and the DI "vector" mode, for VREINTERPRET.
-(define_mode_iterator VD_RE [V8QI V4HI V2SI DI V1DF V2SF])
-
;; Double vector modes inc V1DF
(define_mode_iterator VD1 [V8QI V4HI V2SI V2SF V1DF])