diff mbox series

[06/21] aarch64: Add tuple forms of svreinterpret

Message ID mptedgothq9.fsf@arm.com
State New
Headers show
Series aarch64: Add support for SME | expand

Commit Message

Richard Sandiford Nov. 17, 2023, 5:25 p.m. UTC
SME2 adds a number of intrinsics that operate on tuples of 2 and 4
vectors.  The ACLE therefore extends the existing svreinterpret
intrinsics to handle tuples as well.

gcc/
	* config/aarch64/aarch64-sve-builtins-base.cc
	(svreinterpret_impl::fold): Punt on tuple forms.
	(svreinterpret_impl::expand): Use tuple_mode instead of vector_mode.
	* config/aarch64/aarch64-sve-builtins-base.def (svreinterpret):
	Extend to x1234 groups.
	* config/aarch64/aarch64-sve-builtins-functions.h
	(multi_vector_function::vectors_per_tuple): If the function has
	a group suffix, get the number of vectors from there.
	* config/aarch64/aarch64-sve-builtins-shapes.h (reinterpret): Declare.
	* config/aarch64/aarch64-sve-builtins-shapes.cc (reinterpret_def)
	(reinterpret): New function shape.
	* config/aarch64/aarch64-sve-builtins.cc (function_groups): Handle
	DEF_SVE_FUNCTION_GS.
	(function_resolver::infer_vector_type_and_group_suffix): New
	function.
	* config/aarch64/aarch64-sve-builtins.def (DEF_SVE_FUNCTION_GS): New
	macro.
	(DEF_SVE_FUNCTION): Forward to DEF_SVE_FUNCTION_GS by default.
	* config/aarch64/aarch64-sve-builtins.h
	(function_instance::tuple_mode): New member function.
	(function_resolver::infer_vector_type_and_group_suffix): Likewise.
	(function_base::vectors_per_tuple): Take the function instance
	as argument and get the number from the group suffix.
	(function_instance::vectors_per_tuple): Update accordingly.
	* config/aarch64/iterators.md (SVE_FULLx2, SVE_FULLx3, SVE_FULLx4)
	(SVE_ALL_STRUCT): New mode iterators.
	(SVE_STRUCT): Redefine in terms of SVE_FULL*.
	* config/aarch64/aarch64-sve.md (@aarch64_sve_reinterpret<mode>)
	(*aarch64_sve_reinterpret<mode>): Extend to SVE structure modes.

gcc/testsuite/
	* gcc.target/aarch64/sve/acle/asm/test_sve_acle.h (TEST_DUAL_XN):
	New macro.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c: Add tests for
	tuple forms.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c: Likewise.
	* gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c: Likewise.
---
 .../aarch64/aarch64-sve-builtins-base.cc      |  5 +-
 .../aarch64/aarch64-sve-builtins-base.def     |  2 +-
 .../aarch64/aarch64-sve-builtins-functions.h  |  7 ++-
 .../aarch64/aarch64-sve-builtins-shapes.cc    | 30 +++++++++
 .../aarch64/aarch64-sve-builtins-shapes.h     |  1 +
 gcc/config/aarch64/aarch64-sve-builtins.cc    | 52 +++++++++++++++-
 gcc/config/aarch64/aarch64-sve-builtins.def   |  8 ++-
 gcc/config/aarch64/aarch64-sve-builtins.h     | 23 ++++++-
 gcc/config/aarch64/aarch64-sve.md             |  8 +--
 gcc/config/aarch64/iterators.md               | 26 +++++---
 .../aarch64/sve/acle/asm/reinterpret_bf16.c   | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_f16.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_f32.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_f64.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_s16.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_s32.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_s64.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_s8.c     | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_u16.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_u32.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_u64.c    | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/reinterpret_u8.c     | 62 +++++++++++++++++++
 .../aarch64/sve/acle/asm/test_sve_acle.h      | 14 +++++
 23 files changed, 900 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index b84e245eb3e..5b75b903e5f 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -2161,6 +2161,9 @@  public:
   gimple *
   fold (gimple_folder &f) const override
   {
+    if (f.vectors_per_tuple () > 1)
+      return NULL;
+
     /* Punt to rtl if the effect of the reinterpret on registers does not
        conform to GCC's endianness model.  */
     if (!targetm.can_change_mode_class (f.vector_mode (0),
@@ -2177,7 +2180,7 @@  public:
   rtx
   expand (function_expander &e) const override
   {
-    machine_mode mode = e.vector_mode (0);
+    machine_mode mode = e.tuple_mode (0);
     return e.use_exact_insn (code_for_aarch64_sve_reinterpret (mode));
   }
 };
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.def b/gcc/config/aarch64/aarch64-sve-builtins-base.def
index 0484863d3f7..4e31f67ac47 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.def
@@ -248,7 +248,7 @@  DEF_SVE_FUNCTION (svrdffr, rdffr, none, z_or_none)
 DEF_SVE_FUNCTION (svrecpe, unary, all_float, none)
 DEF_SVE_FUNCTION (svrecps, binary, all_float, none)
 DEF_SVE_FUNCTION (svrecpx, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svreinterpret, unary_convert, reinterpret, none)
+DEF_SVE_FUNCTION_GS (svreinterpret, reinterpret, reinterpret, x1234, none)
 DEF_SVE_FUNCTION (svrev, unary, all_data, none)
 DEF_SVE_FUNCTION (svrev, unary_pred, all_pred, none)
 DEF_SVE_FUNCTION (svrevb, unary, hsd_integer, mxz)
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-functions.h b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
index 2729877d914..4a10102038a 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-functions.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
@@ -48,8 +48,13 @@  public:
     : m_vectors_per_tuple (vectors_per_tuple) {}
 
   unsigned int
-  vectors_per_tuple () const override
+  vectors_per_tuple (const function_instance &fi) const override
   {
+    if (fi.group_suffix_id != GROUP_none)
+      {
+	gcc_checking_assert (m_vectors_per_tuple == 1);
+	return fi.group_suffix ().vectors_per_tuple;
+      }
     return m_vectors_per_tuple;
   }
 
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
index dc255fc59f2..aa5dbb5df9d 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
@@ -2400,6 +2400,36 @@  struct reduction_wide_def : public overloaded_base<0>
 };
 SHAPE (reduction_wide)
 
+/* sv<t0>x<g>_t svfoo_t0[_t1_g](sv<t1>x<g>_t)
+
+   where the target type <t0> must be specified explicitly but the source
+   type <t1> can be inferred.  */
+struct reinterpret_def : public overloaded_base<1>
+{
+  bool explicit_group_suffix_p () const override { return false; }
+
+  void
+  build (function_builder &b, const function_group_info &group) const override
+  {
+    b.add_overloaded_functions (group, MODE_none);
+    build_all (b, "t0,t1", group, MODE_none);
+  }
+
+  tree
+  resolve (function_resolver &r) const override
+  {
+    type_suffix_index type;
+    group_suffix_index group;
+    if (!r.check_num_arguments (1)
+	|| !r.infer_vector_type_and_group_suffix (0, &type, &group))
+      return error_mark_node;
+
+    return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0],
+			 type, group);
+  }
+};
+SHAPE (reinterpret)
+
 /* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
 
    where the second argument is an integer constant expression in the
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
index 7483c1d04b8..38d494761ae 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
@@ -133,6 +133,7 @@  namespace aarch64_sve
     extern const function_shape *const rdffr;
     extern const function_shape *const reduction;
     extern const function_shape *const reduction_wide;
+    extern const function_shape *const reinterpret;
     extern const function_shape *const set;
     extern const function_shape *const setffr;
     extern const function_shape *const shift_left_imm_long;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc
index 196534df61e..ced3fcfafdf 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc
@@ -494,6 +494,10 @@  static const group_suffix_index groups_none[] = {
   GROUP_none, NUM_GROUP_SUFFIXES
 };
 
+static const group_suffix_index groups_x1234[] = {
+  GROUP_none, GROUP_x2, GROUP_x3, GROUP_x4, NUM_GROUP_SUFFIXES
+};
+
 /* Used by functions that have no governing predicate.  */
 static const predication_index preds_none[] = { PRED_none, NUM_PREDS };
 
@@ -534,8 +538,8 @@  static const predication_index preds_z[] = { PRED_z, NUM_PREDS };
 
 /* A list of all SVE ACLE functions.  */
 static CONSTEXPR const function_group_info function_groups[] = {
-#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
-  { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, groups_none, \
+#define DEF_SVE_FUNCTION_GS(NAME, SHAPE, TYPES, GROUPS, PREDS) \
+  { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, groups_##GROUPS, \
     preds_##PREDS, REQUIRED_EXTENSIONS },
 #include "aarch64-sve-builtins.def"
 };
@@ -1485,6 +1489,50 @@  function_resolver::infer_tuple_type (unsigned int argno)
   return infer_vector_or_tuple_type (argno, vectors_per_tuple ());
 }
 
+/* Require argument ARGNO to be a single vector or a tuple, inferring both
+   the vector element type and the number of vectors in a tuple.  Return true
+   on success, storing the type suffix in *TYPE_OUT and the group suffix
+   in *GROUP_OUT.  Report an error and return false on failure.  */
+bool
+function_resolver::
+infer_vector_type_and_group_suffix (unsigned int argno,
+				    type_suffix_index *type_out,
+				    group_suffix_index *group_out)
+{
+  tree actual = get_argument_type (argno);
+  if (actual == error_mark_node)
+    return false;
+
+  /* A linear search should be OK here, since the code isn't hot and
+     the number of types is only small.  */
+  for (unsigned int size_i = 0; size_i < MAX_TUPLE_SIZE; ++size_i)
+    for (unsigned int suffix_i = 0; suffix_i < NUM_TYPE_SUFFIXES; ++suffix_i)
+      {
+	vector_type_index type_i = type_suffixes[suffix_i].vector_type;
+	tree type = acle_vector_types[size_i][type_i];
+	if (type && matches_type_p (type, actual))
+	  {
+	    if (size_i == 0)
+	      *group_out = GROUP_none;
+	    else if (size_i == 1)
+	      *group_out = GROUP_x2;
+	    else if (size_i == 2)
+	      *group_out = GROUP_x3;
+	    else if (size_i == 3)
+	      *group_out = GROUP_x4;
+	    else
+	      gcc_unreachable ();
+	    *type_out = type_suffix_index (suffix_i);
+	    return true;
+	  }
+      }
+
+  error_at (location, "passing %qT to argument %d of %qE, which"
+	    " expects an SVE vector or tuple type",
+	    actual, argno + 1, fndecl);
+  return false;
+}
+
 /* Require argument ARGNO to be a vector or scalar argument.  Return true
    if it is, otherwise report an appropriate error.  */
 bool
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.def b/gcc/config/aarch64/aarch64-sve-builtins.def
index 5fbd486d74e..14d12f07415 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins.def
@@ -33,8 +33,13 @@ 
 #define DEF_SVE_GROUP_SUFFIX(A, B, C)
 #endif
 
+#ifndef DEF_SVE_FUNCTION_GS
+#define DEF_SVE_FUNCTION_GS(A, B, C, D, E)
+#endif
+
 #ifndef DEF_SVE_FUNCTION
-#define DEF_SVE_FUNCTION(A, B, C, D)
+#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
+  DEF_SVE_FUNCTION_GS (NAME, SHAPE, TYPES, none, PREDS)
 #endif
 
 DEF_SVE_MODE (n, none, none, none)
@@ -107,6 +112,7 @@  DEF_SVE_GROUP_SUFFIX (x4, 0, 4)
 #include "aarch64-sve-builtins-sve2.def"
 
 #undef DEF_SVE_FUNCTION
+#undef DEF_SVE_FUNCTION_GS
 #undef DEF_SVE_GROUP_SUFFIX
 #undef DEF_SVE_TYPE_SUFFIX
 #undef DEF_SVE_TYPE
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.h b/gcc/config/aarch64/aarch64-sve-builtins.h
index a861e22ae6c..981a57d82d2 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins.h
@@ -330,6 +330,7 @@  public:
   tree tuple_type (unsigned int) const;
   unsigned int elements_per_vq (unsigned int i) const;
   machine_mode vector_mode (unsigned int) const;
+  machine_mode tuple_mode (unsigned int) const;
   machine_mode gp_mode (unsigned int) const;
 
   /* The properties of the function.  */
@@ -436,6 +437,9 @@  public:
   type_suffix_index infer_unsigned_vector_type (unsigned int);
   type_suffix_index infer_sd_vector_type (unsigned int);
   type_suffix_index infer_tuple_type (unsigned int);
+  bool infer_vector_type_and_group_suffix (unsigned int,
+					   type_suffix_index *,
+					   group_suffix_index *);
 
   bool require_vector_or_scalar_type (unsigned int);
 
@@ -627,7 +631,7 @@  public:
 
   /* If the function operates on tuples of vectors, return the number
      of vectors in the tuples, otherwise return 1.  */
-  virtual unsigned int vectors_per_tuple () const { return 1; }
+  virtual unsigned int vectors_per_tuple (const function_instance &) const;
 
   /* If the function addresses memory, return the type of a single
      scalar memory element.  */
@@ -799,7 +803,7 @@  function_instance::operator!= (const function_instance &other) const
 inline unsigned int
 function_instance::vectors_per_tuple () const
 {
-  return base->vectors_per_tuple ();
+  return base->vectors_per_tuple (*this);
 }
 
 /* If the function addresses memory, return the type of a single
@@ -903,6 +907,15 @@  function_instance::vector_mode (unsigned int i) const
   return type_suffix (i).vector_mode;
 }
 
+/* Return the mode of tuple_type (I).  */
+inline machine_mode
+function_instance::tuple_mode (unsigned int i) const
+{
+  if (group_suffix ().vectors_per_tuple > 1)
+    return TYPE_MODE (tuple_type (i));
+  return vector_mode (i);
+}
+
 /* Return the mode of the governing predicate to use when operating on
    type suffix I.  */
 inline machine_mode
@@ -929,6 +942,12 @@  function_base::call_properties (const function_instance &instance) const
   return flags;
 }
 
+inline unsigned int
+function_base::vectors_per_tuple (const function_instance &instance) const
+{
+  return instance.group_suffix ().vectors_per_tuple;
+}
+
 /* Return the mode of the result of a call.  */
 inline machine_mode
 function_expander::result_mode () const
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index cfadac4f1be..e9cebffe3e0 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -787,8 +787,8 @@  (define_insn_and_split "*aarch64_sve_mov<mode>_subreg_be"
 ;; This is equivalent to a subreg on little-endian targets but not for
 ;; big-endian; see the comment at the head of the file for details.
 (define_expand "@aarch64_sve_reinterpret<mode>"
-  [(set (match_operand:SVE_ALL 0 "register_operand")
-	(unspec:SVE_ALL
+  [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand")
+	(unspec:SVE_ALL_STRUCT
 	  [(match_operand 1 "aarch64_any_register_operand")]
 	  UNSPEC_REINTERPRET))]
   "TARGET_SVE"
@@ -805,8 +805,8 @@  (define_expand "@aarch64_sve_reinterpret<mode>"
 ;; A pattern for handling type punning on big-endian targets.  We use a
 ;; special predicate for operand 1 to reduce the number of patterns.
 (define_insn_and_split "*aarch64_sve_reinterpret<mode>"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
-	(unspec:SVE_ALL
+  [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand" "=w")
+	(unspec:SVE_ALL_STRUCT
 	  [(match_operand 1 "aarch64_any_register_operand" "w")]
 	  UNSPEC_REINTERPRET))]
   "TARGET_SVE"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index a920de99ffc..e7aa7e35ae1 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -430,14 +430,6 @@  (define_mode_iterator VNx4SF_ONLY [VNx4SF])
 (define_mode_iterator VNx2DI_ONLY [VNx2DI])
 (define_mode_iterator VNx2DF_ONLY [VNx2DF])
 
-;; All SVE vector structure modes.
-(define_mode_iterator SVE_STRUCT [VNx32QI VNx16HI VNx8SI VNx4DI
-				  VNx16BF VNx16HF VNx8SF VNx4DF
-				  VNx48QI VNx24HI VNx12SI VNx6DI
-				  VNx24BF VNx24HF VNx12SF VNx6DF
-				  VNx64QI VNx32HI VNx16SI VNx8DI
-				  VNx32BF VNx32HF VNx16SF VNx8DF])
-
 ;; All fully-packed SVE vector modes.
 (define_mode_iterator SVE_FULL [VNx16QI VNx8HI VNx4SI VNx2DI
 			        VNx8BF VNx8HF VNx4SF VNx2DF])
@@ -509,6 +501,24 @@  (define_mode_iterator SVE_ALL [VNx16QI VNx8QI VNx4QI VNx2QI
 			       VNx2DI
 			       VNx2DF])
 
+;; All SVE 2-vector modes.
+(define_mode_iterator SVE_FULLx2 [VNx32QI VNx16HI VNx8SI VNx4DI
+			          VNx16BF VNx16HF VNx8SF VNx4DF])
+
+;; All SVE 3-vector modes.
+(define_mode_iterator SVE_FULLx3 [VNx48QI VNx24HI VNx12SI VNx6DI
+			          VNx24BF VNx24HF VNx12SF VNx6DF])
+
+;; All SVE 4-vector modes.
+(define_mode_iterator SVE_FULLx4 [VNx64QI VNx32HI VNx16SI VNx8DI
+			          VNx32BF VNx32HF VNx16SF VNx8DF])
+
+;; All SVE vector structure modes.
+(define_mode_iterator SVE_STRUCT [SVE_FULLx2 SVE_FULLx3 SVE_FULLx4])
+
+;; All SVE vector and structure modes.
+(define_mode_iterator SVE_ALL_STRUCT [SVE_ALL SVE_STRUCT])
+
 ;; All SVE integer vector modes.
 (define_mode_iterator SVE_I [VNx16QI VNx8QI VNx4QI VNx2QI
 			     VNx8HI VNx4HI VNx2HI
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
index 2d2c2a714b9..dd0daf2eff0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_bf16_u64_tied1, svbfloat16_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_bf16_u64_untied, svbfloat16_t, svuint64_t,
 	     z0 = svreinterpret_bf16_u64 (z4),
 	     z0 = svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_bf16_x2_tied1, svbfloat16x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_bf16_bf16_x2 (z0),
+		 z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f32_x2_untied, svbfloat16x2_t, svfloat32x2_t, z0,
+	      svreinterpret_bf16_f32_x2 (z4),
+	      svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_s64_x3_tied1, svbfloat16x3_t, svint64x3_t,
+		 z0_res = svreinterpret_bf16_s64_x3 (z0),
+		 z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_u8_x3_untied, svbfloat16x3_t, svuint8x3_t, z18,
+	      svreinterpret_bf16_u8_x3 (z23),
+	      svreinterpret_bf16 (z23))
+
+/*
+** reinterpret_bf16_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_u32_x4_tied1, svbfloat16x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_bf16_u32_x4 (z0),
+		 z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f64_x4_untied, svbfloat16x4_t, svfloat64x4_t, z28,
+	      svreinterpret_bf16_f64_x4 (z4),
+	      svreinterpret_bf16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
index 60705e62879..9b6f8227d2a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_f16_u64_tied1, svfloat16_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_f16_u64_untied, svfloat16_t, svuint64_t,
 	     z0 = svreinterpret_f16_u64 (z4),
 	     z0 = svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_bf16_x2_tied1, svfloat16x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_f16_bf16_x2 (z0),
+		 z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f32_x2_untied, svfloat16x2_t, svfloat32x2_t, z0,
+	      svreinterpret_f16_f32_x2 (z4),
+	      svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_s64_x3_tied1, svfloat16x3_t, svint64x3_t,
+		 z0_res = svreinterpret_f16_s64_x3 (z0),
+		 z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f16_u8_x3_untied, svfloat16x3_t, svuint8x3_t, z18,
+	      svreinterpret_f16_u8_x3 (z23),
+	      svreinterpret_f16 (z23))
+
+/*
+** reinterpret_f16_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_u32_x4_tied1, svfloat16x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_f16_u32_x4 (z0),
+		 z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f64_x4_untied, svfloat16x4_t, svfloat64x4_t, z28,
+	      svreinterpret_f16_f64_x4 (z4),
+	      svreinterpret_f16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
index 06fc46f25de..ce981fce9d8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_f32_u64_tied1, svfloat32_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_f32_u64_untied, svfloat32_t, svuint64_t,
 	     z0 = svreinterpret_f32_u64 (z4),
 	     z0 = svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_bf16_x2_tied1, svfloat32x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_f32_bf16_x2 (z0),
+		 z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f32_x2_untied, svfloat32x2_t, svfloat32x2_t, z0,
+	      svreinterpret_f32_f32_x2 (z4),
+	      svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_s64_x3_tied1, svfloat32x3_t, svint64x3_t,
+		 z0_res = svreinterpret_f32_s64_x3 (z0),
+		 z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f32_u8_x3_untied, svfloat32x3_t, svuint8x3_t, z18,
+	      svreinterpret_f32_u8_x3 (z23),
+	      svreinterpret_f32 (z23))
+
+/*
+** reinterpret_f32_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_u32_x4_tied1, svfloat32x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_f32_u32_x4 (z0),
+		 z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f64_x4_untied, svfloat32x4_t, svfloat64x4_t, z28,
+	      svreinterpret_f32_f64_x4 (z4),
+	      svreinterpret_f32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
index 003ee3fe220..4f51824ab7e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_f64_u64_tied1, svfloat64_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_f64_u64_untied, svfloat64_t, svuint64_t,
 	     z0 = svreinterpret_f64_u64 (z4),
 	     z0 = svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_bf16_x2_tied1, svfloat64x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_f64_bf16_x2 (z0),
+		 z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f32_x2_untied, svfloat64x2_t, svfloat32x2_t, z0,
+	      svreinterpret_f64_f32_x2 (z4),
+	      svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_s64_x3_tied1, svfloat64x3_t, svint64x3_t,
+		 z0_res = svreinterpret_f64_s64_x3 (z0),
+		 z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f64_u8_x3_untied, svfloat64x3_t, svuint8x3_t, z18,
+	      svreinterpret_f64_u8_x3 (z23),
+	      svreinterpret_f64 (z23))
+
+/*
+** reinterpret_f64_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_u32_x4_tied1, svfloat64x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_f64_u32_x4 (z0),
+		 z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f64_x4_untied, svfloat64x4_t, svfloat64x4_t, z28,
+	      svreinterpret_f64_f64_x4 (z4),
+	      svreinterpret_f64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
index d62817c2cac..7e15f3e9bd3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_s16_u64_tied1, svint16_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_s16_u64_untied, svint16_t, svuint64_t,
 	     z0 = svreinterpret_s16_u64 (z4),
 	     z0 = svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_bf16_x2_tied1, svint16x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_s16_bf16_x2 (z0),
+		 z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f32_x2_untied, svint16x2_t, svfloat32x2_t, z0,
+	      svreinterpret_s16_f32_x2 (z4),
+	      svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_s64_x3_tied1, svint16x3_t, svint64x3_t,
+		 z0_res = svreinterpret_s16_s64_x3 (z0),
+		 z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s16_u8_x3_untied, svint16x3_t, svuint8x3_t, z18,
+	      svreinterpret_s16_u8_x3 (z23),
+	      svreinterpret_s16 (z23))
+
+/*
+** reinterpret_s16_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_u32_x4_tied1, svint16x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_s16_u32_x4 (z0),
+		 z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f64_x4_untied, svint16x4_t, svfloat64x4_t, z28,
+	      svreinterpret_s16_f64_x4 (z4),
+	      svreinterpret_s16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
index e1068f244ed..60da8aef333 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_s32_u64_tied1, svint32_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_s32_u64_untied, svint32_t, svuint64_t,
 	     z0 = svreinterpret_s32_u64 (z4),
 	     z0 = svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_bf16_x2_tied1, svint32x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_s32_bf16_x2 (z0),
+		 z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f32_x2_untied, svint32x2_t, svfloat32x2_t, z0,
+	      svreinterpret_s32_f32_x2 (z4),
+	      svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_s64_x3_tied1, svint32x3_t, svint64x3_t,
+		 z0_res = svreinterpret_s32_s64_x3 (z0),
+		 z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s32_u8_x3_untied, svint32x3_t, svuint8x3_t, z18,
+	      svreinterpret_s32_u8_x3 (z23),
+	      svreinterpret_s32 (z23))
+
+/*
+** reinterpret_s32_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_u32_x4_tied1, svint32x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_s32_u32_x4 (z0),
+		 z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f64_x4_untied, svint32x4_t, svfloat64x4_t, z28,
+	      svreinterpret_s32_f64_x4 (z4),
+	      svreinterpret_s32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
index cada7533c53..d705c60dfd7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_s64_u64_tied1, svint64_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_s64_u64_untied, svint64_t, svuint64_t,
 	     z0 = svreinterpret_s64_u64 (z4),
 	     z0 = svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_bf16_x2_tied1, svint64x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_s64_bf16_x2 (z0),
+		 z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f32_x2_untied, svint64x2_t, svfloat32x2_t, z0,
+	      svreinterpret_s64_f32_x2 (z4),
+	      svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_s64_x3_tied1, svint64x3_t, svint64x3_t,
+		 z0_res = svreinterpret_s64_s64_x3 (z0),
+		 z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s64_u8_x3_untied, svint64x3_t, svuint8x3_t, z18,
+	      svreinterpret_s64_u8_x3 (z23),
+	      svreinterpret_s64 (z23))
+
+/*
+** reinterpret_s64_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_u32_x4_tied1, svint64x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_s64_u32_x4 (z0),
+		 z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f64_x4_untied, svint64x4_t, svfloat64x4_t, z28,
+	      svreinterpret_s64_f64_x4 (z4),
+	      svreinterpret_s64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
index 23a40d0bab7..ab90a54d746 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_s8_u64_tied1, svint8_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_s8_u64_untied, svint8_t, svuint64_t,
 	     z0 = svreinterpret_s8_u64 (z4),
 	     z0 = svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_bf16_x2_tied1, svint8x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_s8_bf16_x2 (z0),
+		 z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f32_x2_untied, svint8x2_t, svfloat32x2_t, z0,
+	      svreinterpret_s8_f32_x2 (z4),
+	      svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_s64_x3_tied1, svint8x3_t, svint64x3_t,
+		 z0_res = svreinterpret_s8_s64_x3 (z0),
+		 z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s8_u8_x3_untied, svint8x3_t, svuint8x3_t, z18,
+	      svreinterpret_s8_u8_x3 (z23),
+	      svreinterpret_s8 (z23))
+
+/*
+** reinterpret_s8_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_u32_x4_tied1, svint8x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_s8_u32_x4 (z0),
+		 z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f64_x4_untied, svint8x4_t, svfloat64x4_t, z28,
+	      svreinterpret_s8_f64_x4 (z4),
+	      svreinterpret_s8 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
index 48e8ecaff44..fcfc0eb9da5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_u16_u64_tied1, svuint16_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_u16_u64_untied, svuint16_t, svuint64_t,
 	     z0 = svreinterpret_u16_u64 (z4),
 	     z0 = svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_bf16_x2_tied1, svuint16x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_u16_bf16_x2 (z0),
+		 z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f32_x2_untied, svuint16x2_t, svfloat32x2_t, z0,
+	      svreinterpret_u16_f32_x2 (z4),
+	      svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_s64_x3_tied1, svuint16x3_t, svint64x3_t,
+		 z0_res = svreinterpret_u16_s64_x3 (z0),
+		 z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u16_u8_x3_untied, svuint16x3_t, svuint8x3_t, z18,
+	      svreinterpret_u16_u8_x3 (z23),
+	      svreinterpret_u16 (z23))
+
+/*
+** reinterpret_u16_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_u32_x4_tied1, svuint16x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_u16_u32_x4 (z0),
+		 z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f64_x4_untied, svuint16x4_t, svfloat64x4_t, z28,
+	      svreinterpret_u16_f64_x4 (z4),
+	      svreinterpret_u16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
index 1d4e857120e..6d7e05857fe 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_u32_u64_tied1, svuint32_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_u32_u64_untied, svuint32_t, svuint64_t,
 	     z0 = svreinterpret_u32_u64 (z4),
 	     z0 = svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_bf16_x2_tied1, svuint32x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_u32_bf16_x2 (z0),
+		 z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f32_x2_untied, svuint32x2_t, svfloat32x2_t, z0,
+	      svreinterpret_u32_f32_x2 (z4),
+	      svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_s64_x3_tied1, svuint32x3_t, svint64x3_t,
+		 z0_res = svreinterpret_u32_s64_x3 (z0),
+		 z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u32_u8_x3_untied, svuint32x3_t, svuint8x3_t, z18,
+	      svreinterpret_u32_u8_x3 (z23),
+	      svreinterpret_u32 (z23))
+
+/*
+** reinterpret_u32_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_u32_x4_tied1, svuint32x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_u32_u32_x4 (z0),
+		 z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f64_x4_untied, svuint32x4_t, svfloat64x4_t, z28,
+	      svreinterpret_u32_f64_x4 (z4),
+	      svreinterpret_u32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
index 07af69dce8d..55c0baefb6f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_u64_u64_tied1, svuint64_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_u64_u64_untied, svuint64_t, svuint64_t,
 	     z0 = svreinterpret_u64_u64 (z4),
 	     z0 = svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_bf16_x2_tied1, svuint64x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_u64_bf16_x2 (z0),
+		 z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f32_x2_untied, svuint64x2_t, svfloat32x2_t, z0,
+	      svreinterpret_u64_f32_x2 (z4),
+	      svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_s64_x3_tied1, svuint64x3_t, svint64x3_t,
+		 z0_res = svreinterpret_u64_s64_x3 (z0),
+		 z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u64_u8_x3_untied, svuint64x3_t, svuint8x3_t, z18,
+	      svreinterpret_u64_u8_x3 (z23),
+	      svreinterpret_u64 (z23))
+
+/*
+** reinterpret_u64_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_u32_x4_tied1, svuint64x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_u64_u32_x4 (z0),
+		 z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f64_x4_untied, svuint64x4_t, svfloat64x4_t, z28,
+	      svreinterpret_u64_f64_x4 (z4),
+	      svreinterpret_u64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
index a4c7f4c8d21..f7302196162 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
@@ -205,3 +205,65 @@  TEST_DUAL_Z_REV (reinterpret_u8_u64_tied1, svuint8_t, svuint64_t,
 TEST_DUAL_Z (reinterpret_u8_u64_untied, svuint8_t, svuint64_t,
 	     z0 = svreinterpret_u8_u64 (z4),
 	     z0 = svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_bf16_x2_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_bf16_x2_tied1, svuint8x2_t, svbfloat16x2_t,
+		 z0_res = svreinterpret_u8_bf16_x2 (z0),
+		 z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f32_x2_untied:
+** (
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** |
+**	mov	z0\.d, z4\.d
+**	mov	z1\.d, z5\.d
+** )
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f32_x2_untied, svuint8x2_t, svfloat32x2_t, z0,
+	      svreinterpret_u8_f32_x2 (z4),
+	      svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_s64_x3_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_s64_x3_tied1, svuint8x3_t, svint64x3_t,
+		 z0_res = svreinterpret_u8_s64_x3 (z0),
+		 z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_u8_x3_untied:
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	mov	(z18|z19|z20)\.d, (z23|z24|z25)\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u8_u8_x3_untied, svuint8x3_t, svuint8x3_t, z18,
+	      svreinterpret_u8_u8_x3 (z23),
+	      svreinterpret_u8 (z23))
+
+/*
+** reinterpret_u8_u32_x4_tied1:
+**	ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_u32_x4_tied1, svuint8x4_t, svuint32x4_t,
+		 z0_res = svreinterpret_u8_u32_x4 (z0),
+		 z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f64_x4_untied:
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	mov	(z28|z29|z30|z31)\.d, z[4-7]\.d
+**	ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f64_x4_untied, svuint8x4_t, svfloat64x4_t, z28,
+	      svreinterpret_u8_f64_x4 (z4),
+	      svreinterpret_u8 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
index fbf392b3ed4..2da61ff5c0b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
@@ -421,4 +421,18 @@ 
     return z0_res;						\
   }
 
+#define TEST_DUAL_XN(NAME, TTYPE1, TTYPE2, RES, CODE1, CODE2)	\
+  PROTO (NAME, void, ())					\
+  {								\
+    register TTYPE1 z0 __asm ("z0");				\
+    register TTYPE2 z4 __asm ("z4");				\
+    register TTYPE1 z18 __asm ("z18");				\
+    register TTYPE2 z23 __asm ("z23");				\
+    register TTYPE1 z28 __asm ("z28");				\
+    __asm volatile ("" : "=w" (z0), "=w" (z4), "=w" (z18),	\
+		    "=w" (z23), "=w" (z28));			\
+    INVOKE (RES = CODE1, RES = CODE2);				\
+    __asm volatile ("" :: "w" (RES));				\
+  }
+
 #endif