@@ -2148,6 +2148,9 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () > 1)
+ return NULL;
+
/* Punt to rtl if the effect of the reinterpret on registers does not
conform to GCC's endianness model. */
if (!targetm.can_change_mode_class (f.vector_mode (0),
@@ -2164,7 +2167,7 @@ public:
rtx
expand (function_expander &e) const override
{
- machine_mode mode = e.vector_mode (0);
+ machine_mode mode = e.tuple_mode (0);
return e.use_exact_insn (code_for_aarch64_sve_reinterpret (mode));
}
};
@@ -248,7 +248,7 @@ DEF_SVE_FUNCTION (svrdffr, rdffr, none, z_or_none)
DEF_SVE_FUNCTION (svrecpe, unary, all_float, none)
DEF_SVE_FUNCTION (svrecps, binary, all_float, none)
DEF_SVE_FUNCTION (svrecpx, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svreinterpret, unary_convert, reinterpret, none)
+DEF_SVE_FUNCTION_GS (svreinterpret, reinterpret, reinterpret, x1234, none)
DEF_SVE_FUNCTION (svrev, unary, all_data, none)
DEF_SVE_FUNCTION (svrev, unary_pred, all_pred, none)
DEF_SVE_FUNCTION (svrevb, unary, hsd_integer, mxz)
@@ -48,8 +48,13 @@ public:
: m_vectors_per_tuple (vectors_per_tuple) {}
unsigned int
- vectors_per_tuple () const override
+ vectors_per_tuple (const function_instance &fi) const override
{
+ if (fi.group_suffix_id != GROUP_none)
+ {
+ gcc_checking_assert (m_vectors_per_tuple == 1);
+ return fi.group_suffix ().vectors_per_tuple;
+ }
return m_vectors_per_tuple;
}
@@ -2400,6 +2400,34 @@ struct reduction_wide_def : public overloaded_base<0>
};
SHAPE (reduction_wide)
+/* sv<t0>x<g>_t svfoo_t0[_t1_g](sv<t1>x<g>_t)
+
+ where the target type <t0> must be specified explicitly but the source
+ type <t1> can be inferred. */
+struct reinterpret_def : public overloaded_base<1>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (1)
+ || !(type = r.infer_sve_type (0)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (reinterpret)
+
/* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
where the second argument is an integer constant expression in the
@@ -133,6 +133,7 @@ namespace aarch64_sve
extern const function_shape *const rdffr;
extern const function_shape *const reduction;
extern const function_shape *const reduction_wide;
+ extern const function_shape *const reinterpret;
extern const function_shape *const set;
extern const function_shape *const setffr;
extern const function_shape *const shift_left_imm_long;
@@ -494,6 +494,10 @@ static const group_suffix_index groups_none[] = {
GROUP_none, NUM_GROUP_SUFFIXES
};
+static const group_suffix_index groups_x1234[] = {
+ GROUP_none, GROUP_x2, GROUP_x3, GROUP_x4, NUM_GROUP_SUFFIXES
+};
+
/* Used by functions that have no governing predicate. */
static const predication_index preds_none[] = { PRED_none, NUM_PREDS };
@@ -534,8 +538,8 @@ static const predication_index preds_z[] = { PRED_z, NUM_PREDS };
/* A list of all SVE ACLE functions. */
static CONSTEXPR const function_group_info function_groups[] = {
-#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
- { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, groups_none, \
+#define DEF_SVE_FUNCTION_GS(NAME, SHAPE, TYPES, GROUPS, PREDS) \
+ { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, groups_##GROUPS, \
preds_##PREDS, REQUIRED_EXTENSIONS },
#include "aarch64-sve-builtins.def"
};
@@ -33,8 +33,13 @@
#define DEF_SVE_GROUP_SUFFIX(A, B, C)
#endif
+#ifndef DEF_SVE_FUNCTION_GS
+#define DEF_SVE_FUNCTION_GS(A, B, C, D, E)
+#endif
+
#ifndef DEF_SVE_FUNCTION
-#define DEF_SVE_FUNCTION(A, B, C, D)
+#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
+ DEF_SVE_FUNCTION_GS (NAME, SHAPE, TYPES, none, PREDS)
#endif
DEF_SVE_MODE (n, none, none, none)
@@ -107,6 +112,7 @@ DEF_SVE_GROUP_SUFFIX (x4, 0, 4)
#include "aarch64-sve-builtins-sve2.def"
#undef DEF_SVE_FUNCTION
+#undef DEF_SVE_FUNCTION_GS
#undef DEF_SVE_GROUP_SUFFIX
#undef DEF_SVE_TYPE_SUFFIX
#undef DEF_SVE_TYPE
@@ -364,6 +364,7 @@ public:
tree tuple_type (unsigned int) const;
unsigned int elements_per_vq (unsigned int i) const;
machine_mode vector_mode (unsigned int) const;
+ machine_mode tuple_mode (unsigned int) const;
machine_mode gp_mode (unsigned int) const;
/* The properties of the function. */
@@ -666,7 +667,7 @@ public:
/* If the function operates on tuples of vectors, return the number
of vectors in the tuples, otherwise return 1. */
- virtual unsigned int vectors_per_tuple () const { return 1; }
+ virtual unsigned int vectors_per_tuple (const function_instance &) const;
/* If the function addresses memory, return the type of a single
scalar memory element. */
@@ -841,7 +842,7 @@ function_instance::operator!= (const function_instance &other) const
inline unsigned int
function_instance::vectors_per_tuple () const
{
- return base->vectors_per_tuple ();
+ return base->vectors_per_tuple (*this);
}
/* If the function addresses memory, return the type of a single
@@ -945,6 +946,15 @@ function_instance::vector_mode (unsigned int i) const
return type_suffix (i).vector_mode;
}
+/* Return the mode of tuple_type (I). */
+inline machine_mode
+function_instance::tuple_mode (unsigned int i) const
+{
+ if (group_suffix ().vectors_per_tuple > 1)
+ return TYPE_MODE (tuple_type (i));
+ return vector_mode (i);
+}
+
/* Return the mode of the governing predicate to use when operating on
type suffix I. */
inline machine_mode
@@ -971,6 +981,12 @@ function_base::call_properties (const function_instance &instance) const
return flags;
}
+inline unsigned int
+function_base::vectors_per_tuple (const function_instance &instance) const
+{
+ return instance.group_suffix ().vectors_per_tuple;
+}
+
/* Return the mode of the result of a call. */
inline machine_mode
function_expander::result_mode () const
@@ -787,8 +787,8 @@ (define_insn_and_split "*aarch64_sve_mov<mode>_subreg_be"
;; This is equivalent to a subreg on little-endian targets but not for
;; big-endian; see the comment at the head of the file for details.
(define_expand "@aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand")
+ (unspec:SVE_ALL_STRUCT
[(match_operand 1 "aarch64_any_register_operand")]
UNSPEC_REINTERPRET))]
"TARGET_SVE"
@@ -805,8 +805,8 @@ (define_expand "@aarch64_sve_reinterpret<mode>"
;; A pattern for handling type punning on big-endian targets. We use a
;; special predicate for operand 1 to reduce the number of patterns.
(define_insn_and_split "*aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand" "=w")
+ (unspec:SVE_ALL_STRUCT
[(match_operand 1 "aarch64_any_register_operand" "w")]
UNSPEC_REINTERPRET))]
"TARGET_SVE"
@@ -430,14 +430,6 @@ (define_mode_iterator VNx4SF_ONLY [VNx4SF])
(define_mode_iterator VNx2DI_ONLY [VNx2DI])
(define_mode_iterator VNx2DF_ONLY [VNx2DF])
-;; All SVE vector structure modes.
-(define_mode_iterator SVE_STRUCT [VNx32QI VNx16HI VNx8SI VNx4DI
- VNx16BF VNx16HF VNx8SF VNx4DF
- VNx48QI VNx24HI VNx12SI VNx6DI
- VNx24BF VNx24HF VNx12SF VNx6DF
- VNx64QI VNx32HI VNx16SI VNx8DI
- VNx32BF VNx32HF VNx16SF VNx8DF])
-
;; All fully-packed SVE vector modes.
(define_mode_iterator SVE_FULL [VNx16QI VNx8HI VNx4SI VNx2DI
VNx8BF VNx8HF VNx4SF VNx2DF])
@@ -509,6 +501,24 @@ (define_mode_iterator SVE_ALL [VNx16QI VNx8QI VNx4QI VNx2QI
VNx2DI
VNx2DF])
+;; All SVE 2-vector modes.
+(define_mode_iterator SVE_FULLx2 [VNx32QI VNx16HI VNx8SI VNx4DI
+ VNx16BF VNx16HF VNx8SF VNx4DF])
+
+;; All SVE 3-vector modes.
+(define_mode_iterator SVE_FULLx3 [VNx48QI VNx24HI VNx12SI VNx6DI
+ VNx24BF VNx24HF VNx12SF VNx6DF])
+
+;; All SVE 4-vector modes.
+(define_mode_iterator SVE_FULLx4 [VNx64QI VNx32HI VNx16SI VNx8DI
+ VNx32BF VNx32HF VNx16SF VNx8DF])
+
+;; All SVE vector structure modes.
+(define_mode_iterator SVE_STRUCT [SVE_FULLx2 SVE_FULLx3 SVE_FULLx4])
+
+;; All SVE vector and structure modes.
+(define_mode_iterator SVE_ALL_STRUCT [SVE_ALL SVE_STRUCT])
+
;; All SVE integer vector modes.
(define_mode_iterator SVE_I [VNx16QI VNx8QI VNx4QI VNx2QI
VNx8HI VNx4HI VNx2HI
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_bf16_u64_tied1, svbfloat16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_bf16_u64_untied, svbfloat16_t, svuint64_t,
z0 = svreinterpret_bf16_u64 (z4),
z0 = svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_bf16_x2_tied1, svbfloat16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_bf16_bf16_x2 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f32_x2_untied, svbfloat16x2_t, svfloat32x2_t, z0,
+ svreinterpret_bf16_f32_x2 (z4),
+ svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_s64_x3_tied1, svbfloat16x3_t, svint64x3_t,
+ z0_res = svreinterpret_bf16_s64_x3 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_u8_x3_untied, svbfloat16x3_t, svuint8x3_t, z18,
+ svreinterpret_bf16_u8_x3 (z23),
+ svreinterpret_bf16 (z23))
+
+/*
+** reinterpret_bf16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_u32_x4_tied1, svbfloat16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_bf16_u32_x4 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f64_x4_untied, svbfloat16x4_t, svfloat64x4_t, z28,
+ svreinterpret_bf16_f64_x4 (z4),
+ svreinterpret_bf16 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f16_u64_tied1, svfloat16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f16_u64_untied, svfloat16_t, svuint64_t,
z0 = svreinterpret_f16_u64 (z4),
z0 = svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_bf16_x2_tied1, svfloat16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f16_bf16_x2 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f32_x2_untied, svfloat16x2_t, svfloat32x2_t, z0,
+ svreinterpret_f16_f32_x2 (z4),
+ svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_s64_x3_tied1, svfloat16x3_t, svint64x3_t,
+ z0_res = svreinterpret_f16_s64_x3 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_u8_x3_untied, svfloat16x3_t, svuint8x3_t, z18,
+ svreinterpret_f16_u8_x3 (z23),
+ svreinterpret_f16 (z23))
+
+/*
+** reinterpret_f16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_u32_x4_tied1, svfloat16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f16_u32_x4 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f64_x4_untied, svfloat16x4_t, svfloat64x4_t, z28,
+ svreinterpret_f16_f64_x4 (z4),
+ svreinterpret_f16 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f32_u64_tied1, svfloat32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f32_u64_untied, svfloat32_t, svuint64_t,
z0 = svreinterpret_f32_u64 (z4),
z0 = svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_bf16_x2_tied1, svfloat32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f32_bf16_x2 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f32_x2_untied, svfloat32x2_t, svfloat32x2_t, z0,
+ svreinterpret_f32_f32_x2 (z4),
+ svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_s64_x3_tied1, svfloat32x3_t, svint64x3_t,
+ z0_res = svreinterpret_f32_s64_x3 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_u8_x3_untied, svfloat32x3_t, svuint8x3_t, z18,
+ svreinterpret_f32_u8_x3 (z23),
+ svreinterpret_f32 (z23))
+
+/*
+** reinterpret_f32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_u32_x4_tied1, svfloat32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f32_u32_x4 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f64_x4_untied, svfloat32x4_t, svfloat64x4_t, z28,
+ svreinterpret_f32_f64_x4 (z4),
+ svreinterpret_f32 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f64_u64_tied1, svfloat64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f64_u64_untied, svfloat64_t, svuint64_t,
z0 = svreinterpret_f64_u64 (z4),
z0 = svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_bf16_x2_tied1, svfloat64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f64_bf16_x2 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f32_x2_untied, svfloat64x2_t, svfloat32x2_t, z0,
+ svreinterpret_f64_f32_x2 (z4),
+ svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_s64_x3_tied1, svfloat64x3_t, svint64x3_t,
+ z0_res = svreinterpret_f64_s64_x3 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_u8_x3_untied, svfloat64x3_t, svuint8x3_t, z18,
+ svreinterpret_f64_u8_x3 (z23),
+ svreinterpret_f64 (z23))
+
+/*
+** reinterpret_f64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_u32_x4_tied1, svfloat64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f64_u32_x4 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f64_x4_untied, svfloat64x4_t, svfloat64x4_t, z28,
+ svreinterpret_f64_f64_x4 (z4),
+ svreinterpret_f64 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s16_u64_tied1, svint16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s16_u64_untied, svint16_t, svuint64_t,
z0 = svreinterpret_s16_u64 (z4),
z0 = svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_bf16_x2_tied1, svint16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s16_bf16_x2 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f32_x2_untied, svint16x2_t, svfloat32x2_t, z0,
+ svreinterpret_s16_f32_x2 (z4),
+ svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_s64_x3_tied1, svint16x3_t, svint64x3_t,
+ z0_res = svreinterpret_s16_s64_x3 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_u8_x3_untied, svint16x3_t, svuint8x3_t, z18,
+ svreinterpret_s16_u8_x3 (z23),
+ svreinterpret_s16 (z23))
+
+/*
+** reinterpret_s16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_u32_x4_tied1, svint16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s16_u32_x4 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f64_x4_untied, svint16x4_t, svfloat64x4_t, z28,
+ svreinterpret_s16_f64_x4 (z4),
+ svreinterpret_s16 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s32_u64_tied1, svint32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s32_u64_untied, svint32_t, svuint64_t,
z0 = svreinterpret_s32_u64 (z4),
z0 = svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_bf16_x2_tied1, svint32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s32_bf16_x2 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f32_x2_untied, svint32x2_t, svfloat32x2_t, z0,
+ svreinterpret_s32_f32_x2 (z4),
+ svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_s64_x3_tied1, svint32x3_t, svint64x3_t,
+ z0_res = svreinterpret_s32_s64_x3 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_u8_x3_untied, svint32x3_t, svuint8x3_t, z18,
+ svreinterpret_s32_u8_x3 (z23),
+ svreinterpret_s32 (z23))
+
+/*
+** reinterpret_s32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_u32_x4_tied1, svint32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s32_u32_x4 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f64_x4_untied, svint32x4_t, svfloat64x4_t, z28,
+ svreinterpret_s32_f64_x4 (z4),
+ svreinterpret_s32 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s64_u64_tied1, svint64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s64_u64_untied, svint64_t, svuint64_t,
z0 = svreinterpret_s64_u64 (z4),
z0 = svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_bf16_x2_tied1, svint64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s64_bf16_x2 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f32_x2_untied, svint64x2_t, svfloat32x2_t, z0,
+ svreinterpret_s64_f32_x2 (z4),
+ svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_s64_x3_tied1, svint64x3_t, svint64x3_t,
+ z0_res = svreinterpret_s64_s64_x3 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_u8_x3_untied, svint64x3_t, svuint8x3_t, z18,
+ svreinterpret_s64_u8_x3 (z23),
+ svreinterpret_s64 (z23))
+
+/*
+** reinterpret_s64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_u32_x4_tied1, svint64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s64_u32_x4 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f64_x4_untied, svint64x4_t, svfloat64x4_t, z28,
+ svreinterpret_s64_f64_x4 (z4),
+ svreinterpret_s64 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s8_u64_tied1, svint8_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s8_u64_untied, svint8_t, svuint64_t,
z0 = svreinterpret_s8_u64 (z4),
z0 = svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_bf16_x2_tied1, svint8x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s8_bf16_x2 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f32_x2_untied, svint8x2_t, svfloat32x2_t, z0,
+ svreinterpret_s8_f32_x2 (z4),
+ svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_s64_x3_tied1, svint8x3_t, svint64x3_t,
+ z0_res = svreinterpret_s8_s64_x3 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_u8_x3_untied, svint8x3_t, svuint8x3_t, z18,
+ svreinterpret_s8_u8_x3 (z23),
+ svreinterpret_s8 (z23))
+
+/*
+** reinterpret_s8_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_u32_x4_tied1, svint8x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s8_u32_x4 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f64_x4_untied, svint8x4_t, svfloat64x4_t, z28,
+ svreinterpret_s8_f64_x4 (z4),
+ svreinterpret_s8 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u16_u64_tied1, svuint16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u16_u64_untied, svuint16_t, svuint64_t,
z0 = svreinterpret_u16_u64 (z4),
z0 = svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_bf16_x2_tied1, svuint16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u16_bf16_x2 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f32_x2_untied, svuint16x2_t, svfloat32x2_t, z0,
+ svreinterpret_u16_f32_x2 (z4),
+ svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_s64_x3_tied1, svuint16x3_t, svint64x3_t,
+ z0_res = svreinterpret_u16_s64_x3 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_u8_x3_untied, svuint16x3_t, svuint8x3_t, z18,
+ svreinterpret_u16_u8_x3 (z23),
+ svreinterpret_u16 (z23))
+
+/*
+** reinterpret_u16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_u32_x4_tied1, svuint16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u16_u32_x4 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f64_x4_untied, svuint16x4_t, svfloat64x4_t, z28,
+ svreinterpret_u16_f64_x4 (z4),
+ svreinterpret_u16 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u32_u64_tied1, svuint32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u32_u64_untied, svuint32_t, svuint64_t,
z0 = svreinterpret_u32_u64 (z4),
z0 = svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_bf16_x2_tied1, svuint32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u32_bf16_x2 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f32_x2_untied, svuint32x2_t, svfloat32x2_t, z0,
+ svreinterpret_u32_f32_x2 (z4),
+ svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_s64_x3_tied1, svuint32x3_t, svint64x3_t,
+ z0_res = svreinterpret_u32_s64_x3 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_u8_x3_untied, svuint32x3_t, svuint8x3_t, z18,
+ svreinterpret_u32_u8_x3 (z23),
+ svreinterpret_u32 (z23))
+
+/*
+** reinterpret_u32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_u32_x4_tied1, svuint32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u32_u32_x4 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f64_x4_untied, svuint32x4_t, svfloat64x4_t, z28,
+ svreinterpret_u32_f64_x4 (z4),
+ svreinterpret_u32 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u64_u64_tied1, svuint64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u64_u64_untied, svuint64_t, svuint64_t,
z0 = svreinterpret_u64_u64 (z4),
z0 = svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_bf16_x2_tied1, svuint64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u64_bf16_x2 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f32_x2_untied, svuint64x2_t, svfloat32x2_t, z0,
+ svreinterpret_u64_f32_x2 (z4),
+ svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_s64_x3_tied1, svuint64x3_t, svint64x3_t,
+ z0_res = svreinterpret_u64_s64_x3 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_u8_x3_untied, svuint64x3_t, svuint8x3_t, z18,
+ svreinterpret_u64_u8_x3 (z23),
+ svreinterpret_u64 (z23))
+
+/*
+** reinterpret_u64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_u32_x4_tied1, svuint64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u64_u32_x4 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f64_x4_untied, svuint64x4_t, svfloat64x4_t, z28,
+ svreinterpret_u64_f64_x4 (z4),
+ svreinterpret_u64 (z4))
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u8_u64_tied1, svuint8_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u8_u64_untied, svuint8_t, svuint64_t,
z0 = svreinterpret_u8_u64 (z4),
z0 = svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_bf16_x2_tied1, svuint8x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u8_bf16_x2 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f32_x2_untied, svuint8x2_t, svfloat32x2_t, z0,
+ svreinterpret_u8_f32_x2 (z4),
+ svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_s64_x3_tied1, svuint8x3_t, svint64x3_t,
+ z0_res = svreinterpret_u8_s64_x3 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_u8_x3_untied, svuint8x3_t, svuint8x3_t, z18,
+ svreinterpret_u8_u8_x3 (z23),
+ svreinterpret_u8 (z23))
+
+/*
+** reinterpret_u8_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_u32_x4_tied1, svuint8x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u8_u32_x4 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f64_x4_untied, svuint8x4_t, svfloat64x4_t, z28,
+ svreinterpret_u8_f64_x4 (z4),
+ svreinterpret_u8 (z4))
@@ -421,4 +421,18 @@
return z0_res; \
}
+#define TEST_DUAL_XN(NAME, TTYPE1, TTYPE2, RES, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register TTYPE1 z0 __asm ("z0"); \
+ register TTYPE2 z4 __asm ("z4"); \
+ register TTYPE1 z18 __asm ("z18"); \
+ register TTYPE2 z23 __asm ("z23"); \
+ register TTYPE1 z28 __asm ("z28"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z4), "=w" (z18), \
+ "=w" (z23), "=w" (z28)); \
+ INVOKE (RES = CODE1, RES = CODE2); \
+ __asm volatile ("" :: "w" (RES)); \
+ }
+
#endif