diff mbox series

[1/2] AArch64: Improve SIMD immediate generation

Message ID PAWPR08MB89824D6DCEB255744F352B0883442@PAWPR08MB8982.eurprd08.prod.outlook.com
State New
Headers show
Series [1/2] AArch64: Improve SIMD immediate generation | expand

Commit Message

Wilco Dijkstra Oct. 14, 2024, 2:19 p.m. UTC
Cleanup the various interfaces related to SIMD immediate generation.  Introduce new functions
that make it clear which operation (AND, OR, MOV) we are testing for rather than guessing the
final instruction.  Reduce the use of overly long names, unused and default parameters for
clarity.  No changes to internals or generated code.

Passes regress & bootstrap, OK for commit?

gcc/ChangeLog:

        * config/aarch64/aarch64-protos.h (enum simd_immediate_check): Move to aarch64.cc.
        (aarch64_output_simd_mov_immediate): Remove.
        (aarch64_output_simd_mov_imm): New prototype.
        (aarch64_output_simd_orr_imm): Likewise.
        (aarch64_output_simd_and_imm): Likewise.
        (aarch64_simd_valid_immediate): Remove.
        (aarch64_simd_valid_and_imm): New prototype.
        (aarch64_simd_valid_mov_imm): Likewise.
        (aarch64_simd_valid_orr_imm): Likewise.
        * config/aarch64/aarch64-simd.md: Use aarch64_output_simd_mov_imm.
        * config/aarch64/aarch64.cc (enum simd_immediate_check): Moved from aarch64-protos.h.
        Use AARCH64_CHECK_AND rather than AARCH64_CHECk_BIC.
        (aarch64_expand_sve_const_vector): Use aarch64_simd_valid_mov_imm.
        (aarch64_expand_mov_immediate): Likewise.
        (aarch64_can_const_movi_rtx_p): Likewise.
        (aarch64_secondary_reload): Likewise.
        (aarch64_legitimate_constant_p): Likewise.
        (aarch64_advsimd_valid_immediate): Simplify checks on 'which' param.
        (aarch64_sve_valid_immediate): Add extra param for move vs logical.
        (aarch64_simd_valid_immediate): Rename to aarch64_simd_valid_imm.
        (aarch64_simd_valid_mov_imm): New function.
        (aarch64_simd_valid_orr_imm): Likewise.
        (aarch64_simd_valid_and_imm): Likewise.
        (aarch64_mov_operand_p): Use aarch64_simd_valid_mov_imm.
        (aarch64_simd_scalar_immediate_valid_for_move): Likewise.
        (aarch64_simd_make_constant): Likewise.
        (aarch64_expand_vector_init_fallback): Likewise.
        (aarch64_output_simd_mov_immediate): Rename to aarch64_output_simd_imm.
        (aarch64_output_simd_orr_imm): New function.
        (aarch64_output_simd_and_imm): Likewise.
        (aarch64_output_simd_mov_imm): Likewise.
        (aarch64_output_scalar_simd_mov_immediate): Use aarch64_output_simd_mov_imm.
        (aarch64_output_sve_mov_immediate): Use aarch64_simd_valid_imm.
        (aarch64_output_sve_ptrues): Likewise.
        * config/aarch64/constraints.md (Do): Use aarch64_simd_valid_orr_imm.
        (Db): Use aarch64_simd_valid_and_imm.
        * config/aarch64/predicates.md (aarch64_reg_or_bic_imm): Use aarch64_simd_valid_orr_imm.
        (aarch64_reg_or_and_imm): Use aarch64_simd_valid_and_imm.

---

Comments

Wilco Dijkstra Oct. 14, 2024, 2:22 p.m. UTC | #1
Allow use of SVE immediates when generating AdvSIMD code and SVE is available.
First check for a valid AdvSIMD immediate, and if SVE is available, try using
an SVE move or bitmask immediate.

Passes bootstrap & regress, OK for commit?

gcc/ChangeLog:

        * config/aarch64/aarch64-simd.md (ior<mode>3<vczle><vczbe>):
        Use aarch64_reg_or_orr_imm predicate.  Combine SVE/AdvSIMD immediates
        and use aarch64_output_simd_orr_imm.
        * config/aarch64/aarch64.cc (struct simd_immediate_info): Add SVE_MOV enum.
        (aarch64_sve_valid_immediate): Use SVE_MOV for SVE move immediates.
        (aarch64_simd_valid_imm): Enable SVE SIMD immediates when possible.
        (aarch64_output_simd_imm): Support emitting SVE SIMD immediates. 
        * config/aarch64/predicates.md (aarch64_orr_imm_sve_advsimd): Remove.

gcc/testsuite/ChangeLog:

        * gcc.target/aarch64/sve/acle/asm/insr_s64.c: Allow SVE MOV imm.
        * gcc.target/aarch64/sve/acle/asm/insr_u64.c: Likewise.
        * gcc.target/aarch64/sve/fneg-abs_1.c: Update to check for ORRI.
        * gcc.target/aarch64/sve/fneg-abs_2.c: Likewise.

---

diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 6eeb5aa4871eceabb8e46e52bd63f0aa634b9f3d..2e9f30b9bf50eec7a575f4e5037d3350f7ebc95a 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1135,13 +1135,11 @@ (define_insn "and<mode>3<vczle><vczbe>"
 (define_insn "ior<mode>3<vczle><vczbe>"
   [(set (match_operand:VDQ_I 0 "register_operand")
 	(ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
-		   (match_operand:VDQ_I 2 "aarch64_orr_imm_sve_advsimd")))]
+		   (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
   "TARGET_SIMD"
-  {@ [ cons: =0 , 1 , 2; attrs: arch ]
-     [ w        , w , w  ; simd      ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
-     [ w        , 0 , vsl; sve       ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
-     [ w        , 0 , Do ; simd      ] \
-       << aarch64_output_simd_orr_imm (operands[2], <bitsize>);
+  {@ [ cons: =0 , 1 , 2  ]
+     [ w        , w , w  ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+     [ w        , 0 , Do ] << aarch64_output_simd_orr_imm (operands[2], <bitsize>);
   }
   [(set_attr "type" "neon_logic<q>")]
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index d38345770ebab15cf872c24b3ec8ab8cc5cce3e7..7c656476c4974529ae71a6d73328a0cd68dd5ef8 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -140,7 +140,7 @@ enum simd_immediate_check {
 /* Information about a legitimate vector immediate operand.  */
 struct simd_immediate_info
 {
-  enum insn_type { MOV, MVN, INDEX, PTRUE };
+  enum insn_type { MOV, MVN, INDEX, PTRUE, SVE_MOV };
   enum modifier_type { LSL, MSL };
 
   simd_immediate_info () {}
@@ -22987,14 +22987,16 @@ aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT val64,
 	{
 	  /* DUP with no shift.  */
 	  if (info)
-	    *info = simd_immediate_info (mode, val);
+	    *info = simd_immediate_info (mode, val,
+					 simd_immediate_info::SVE_MOV);
 	  return true;
 	}
       if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
 	{
 	  /* DUP with LSL #8.  */
 	  if (info)
-	    *info = simd_immediate_info (mode, val);
+	    *info = simd_immediate_info (mode, val,
+					 simd_immediate_info::SVE_MOV);
 	  return true;
 	}
     }
@@ -23002,7 +23004,7 @@ aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT val64,
     {
       /* DUPM.  */
       if (info)
-	*info = simd_immediate_info (mode, val);
+	*info = simd_immediate_info (mode, val, simd_immediate_info::SVE_MOV);
       return true;
     }
   return false;
@@ -23209,8 +23211,13 @@ aarch64_simd_valid_imm (rtx op, simd_immediate_info *info,
 
   if (vec_flags & VEC_SVE_DATA)
     return aarch64_sve_valid_immediate (val64, info, which);
-  else
-    return aarch64_advsimd_valid_immediate (val64, info, which);
+
+  if (aarch64_advsimd_valid_immediate (val64, info, which))
+    return true;
+
+  if (TARGET_SVE)
+    return aarch64_sve_valid_immediate (val64, info, which);
+  return false;
 }
 
 /* Return true if OP is a valid SIMD move immediate for SVE or AdvSIMD.  */
@@ -25391,6 +25398,14 @@ aarch64_output_simd_imm (rtx const_vector, unsigned width,
 	  return templ;
 	}
 
+      if (info.insn == simd_immediate_info::SVE_MOV)
+	{
+	  gcc_assert (TARGET_SVE);
+	  snprintf (templ, sizeof (templ), "mov\t%%Z0.%c, #" HOST_WIDE_INT_PRINT_DEC,
+		    element_char, INTVAL (info.u.mov.value));
+	  return templ;
+	}
+
       mnemonic = info.insn == simd_immediate_info::MVN ? "mvni" : "movi";
       shift_op = (info.u.mov.modifier == simd_immediate_info::MSL
 		  ? "msl" : "lsl");
@@ -25410,8 +25425,18 @@ aarch64_output_simd_imm (rtx const_vector, unsigned width,
   else
     {
       /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND.  */
-      mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "orr";
-      if (info.u.mov.shift)
+      mnemonic = "orr";
+      if (which == AARCH64_CHECK_AND)
+	mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "and";
+
+      if (info.insn == simd_immediate_info::SVE_MOV)
+	{
+	  gcc_assert (TARGET_SVE);
+	  snprintf (templ, sizeof (templ), "%s\t%%Z0.%c, %%Z0.%c, "
+		    HOST_WIDE_INT_PRINT_DEC, mnemonic, element_char,
+		    element_char, INTVAL (info.u.mov.value));
+	}
+      else if (info.u.mov.shift)
 	snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, #"
 		  HOST_WIDE_INT_PRINT_DEC ", %s #%d", mnemonic, lane_count,
 		  element_char, UINTVAL (info.u.mov.value), "lsl",
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 0a171387b1a73b85db0ae2ccbc788a3d7f28a082..2c18af94b8eca7a7985a238a4de8c5d0b3766acb 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -943,11 +943,6 @@ (define_predicate "aarch64_sve_logical_operand"
   (ior (match_operand 0 "register_operand")
        (match_operand 0 "aarch64_sve_logical_immediate")))
 
-(define_predicate "aarch64_orr_imm_sve_advsimd"
-  (ior (match_operand 0 "aarch64_reg_or_orr_imm")
-       (and (match_test "TARGET_SVE")
-	    (match_operand 0 "aarch64_sve_logical_operand"))))
-
 (define_predicate "aarch64_sve_gather_offset_b"
   (ior (match_operand 0 "register_operand")
        (match_operand 0 "aarch64_sve_gather_immediate_b")))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
index 32cdc8263d194729e4a89023c7602c7e3b80d022..6f36f32415ac92c2638c317844d8e62ecda7e484 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
@@ -43,8 +43,8 @@ TEST_UNIFORM_Z (insr_0_s64_untied, svint64_t,
 /*
 ** insr_1_s64:
 ** (
-**	mov	(x[0-9]+), #?1
-**	insr	z0\.d, \1
+**	mov	z([0-9]+)\.d, #?1
+**	insr	z0\.d, d\1
 ** |
 **	movi	v([0-9]+)\.2d, 0x1
 **	insr	z0\.d, d\2
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
index ab23f677d4fc93487affc2c9095e38df36371a4b..f92059a97f576f9d4e8a03cbfcac0985c1baa489 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
@@ -43,8 +43,8 @@ TEST_UNIFORM_Z (insr_0_u64_untied, svuint64_t,
 /*
 ** insr_1_u64:
 ** (
-**	mov	(x[0-9]+), #?1
-**	insr	z0\.d, \1
+**	mov	z([0-9]+)\.d, #?1
+**	insr	z0\.d, d\1
 ** |
 **	movi	v([0-9]+)\.2d, 0x1
 **	insr	z0\.d, d\2
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
index a8b27199ff83d0eebadfc7dcf03f94e1229d76b8..03560008fda16b1d7c62fe2daaed8cad98127827 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
@@ -6,7 +6,7 @@
 
 /*
 ** t1:
-**	orr	z[0-9]+.s, z[0-9]+.s, #-2147483648
+**	orr	v0.2s, #?128, lsl #?24
 **	ret
 */
 float32x2_t t1 (float32x2_t a)
@@ -16,7 +16,7 @@ float32x2_t t1 (float32x2_t a)
 
 /*
 ** t2:
-**	orr	z[0-9]+.s, z[0-9]+.s, #-2147483648
+**	orr	v0.4s, #?128, lsl #?24
 **	ret
 */
 float32x4_t t2 (float32x4_t a)
@@ -26,7 +26,7 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**	orr	z[0-9]+.d, z[0-9]+.d, #-9223372036854775808
+**	orr	z[0-9]+.d, z[0-9]+.d, #?-9223372036854775808
 **	ret
 */
 float64x2_t t3 (float64x2_t a)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
index 19a7695e605bc8aced486a9c450d1cdc6be4691a..fe08fe31fe87aab4a7ce8497d05488a42fe9ae21 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
@@ -7,7 +7,7 @@
 
 /*
 ** f1:
-**	orr	z0.s, z0.s, #-2147483648
+**	orr	v0.2s, #?128, lsl #?24
 **	ret
 */
 float32_t f1 (float32_t a)
@@ -17,7 +17,7 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**	orr	z0.d, z0.d, #-9223372036854775808
+**	orr	z0.d, z0.d, #?-9223372036854775808
 **	ret
 */
 float64_t f2 (float64_t a)
Wilco Dijkstra Oct. 15, 2024, 11:33 a.m. UTC | #2
Add support for SVE xor immediate when generating AdvSIMD code and SVE is available.

Passes bootstrap & regress, OK for commit?

gcc/ChangeLog:

        * config/aarch64/aarch64.cc (enum simd_immediate_check): Add AARCH64_CHECK_XOR.
        (aarch64_simd_valid_xor_imm): New function.
        (aarch64_output_simd_imm): Add AARCH64_CHECK_XOR support.
        (aarch64_output_simd_xor_imm): New function.
        * config/aarch64/aarch64-protos.h (aarch64_output_simd_xor_imm): New prototype.
        (aarch64_simd_valid_xor_imm): New prototype.
        * config/aarch64/aarch64-simd.md (xor<mode>3<vczle><vczbe>):
        Use aarch64_reg_or_xor_imm predicate and add an immediate alternative.
        * config/aarch64/predicates.md (aarch64_reg_or_xor_imm): Add new predicate.

gcc/testsuite/ChangeLog:

        * gcc.target/aarch64/sve/simd_imm.c: New test.

---

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 3f2d40603426a590a0a14ba4792fe9b325d1e585..16ab79c02da62c1a8aa03309708dfe401d1ffb7e 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -827,6 +827,7 @@ char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
 char *aarch64_output_simd_mov_imm (rtx, unsigned);
 char *aarch64_output_simd_orr_imm (rtx, unsigned);
 char *aarch64_output_simd_and_imm (rtx, unsigned);
+char *aarch64_output_simd_xor_imm (rtx, unsigned);
 
 char *aarch64_output_sve_mov_immediate (rtx);
 char *aarch64_output_sve_ptrues (rtx);
@@ -844,6 +845,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
 bool aarch64_simd_valid_and_imm (rtx);
 bool aarch64_simd_valid_mov_imm (rtx);
 bool aarch64_simd_valid_orr_imm (rtx);
+bool aarch64_simd_valid_xor_imm (rtx);
 bool aarch64_valid_sysreg_name_p (const char *);
 const char *aarch64_retrieve_sysreg (const char *, bool, bool);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 5c1de57ce6c3f2064d8be25f903a6a8d949685ef..18795a08b61da874a9e811822ed82e7eb9350bb4 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1144,12 +1144,16 @@ (define_insn "ior<mode>3<vczle><vczbe>"
   [(set_attr "type" "neon_logic<q>")]
 )
 
+;; For EOR (vector, register) and SVE EOR (vector, immediate)
 (define_insn "xor<mode>3<vczle><vczbe>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w")
-        (xor:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w")
-		 (match_operand:VDQ_I 2 "register_operand" "w")))]
+  [(set (match_operand:VDQ_I 0 "register_operand")
+        (xor:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+                   (match_operand:VDQ_I 2 "aarch64_reg_or_xor_imm")))]
   "TARGET_SIMD"
-  "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+  {@ [ cons: =0 , 1 , 2  ]
+     [ w        , w , w  ] eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+     [ w        , 0 , Do ] << aarch64_output_simd_xor_imm (operands[2], <bitsize>);
+  }
   [(set_attr "type" "neon_logic<q>")]
 )
 
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 1a228147e6f945772edbd5540c44167e3a876a74..c019f21e39d9773746792d5885fa0f6805f9bb44 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -134,7 +134,8 @@ constexpr auto AARCH64_STATE_OUT = 1U << 2;
 enum simd_immediate_check {
   AARCH64_CHECK_MOV,
   AARCH64_CHECK_ORR,
-  AARCH64_CHECK_AND
+  AARCH64_CHECK_AND,
+  AARCH64_CHECK_XOR
 };
 
 /* Information about a legitimate vector immediate operand.  */
@@ -23320,6 +23321,13 @@ aarch64_simd_valid_and_imm (rtx op)
   return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_AND);
 }
 
+/* Return true if OP is a valid SIMD xor immediate for SVE.  */
+bool
+aarch64_simd_valid_xor_imm (rtx op)
+{
+  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_XOR);
+}
+
 /* Check whether X is a VEC_SERIES-like constant that starts at 0 and
    has a step in the range of INDEX.  Return the index expression if so,
    otherwise return null.  */
@@ -25503,10 +25511,12 @@ aarch64_output_simd_imm (rtx const_vector, unsigned width,
     }
   else
     {
-      /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND.  */
+      /* AARCH64_CHECK_ORR, AARCH64_CHECK_AND or AARCH64_CHECK_XOR.  */
       mnemonic = "orr";
       if (which == AARCH64_CHECK_AND)
 	mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "and";
+      else if (which == AARCH64_CHECK_XOR)
+	mnemonic = "eor";
 
       if (info.insn == simd_immediate_info::SVE_MOV)
 	{
@@ -25544,6 +25554,14 @@ aarch64_output_simd_and_imm (rtx const_vector, unsigned width)
   return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_AND);
 }
 
+/* Returns the string with the EOR instruction for the SIMD immediate
+   CONST_VECTOR of WIDTH bits.  */
+char*
+aarch64_output_simd_xor_imm (rtx const_vector, unsigned width)
+{
+  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_XOR);
+}
+
 /* Returns the string with the MOV instruction for the SIMD immediate
    CONST_VECTOR of WIDTH bits.  */
 char*
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 2c18af94b8eca7a7985a238a4de8c5d0b3766acb..6ad9a4bd8b92fcbe10260093abf1331c909a474a 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -125,6 +125,11 @@ (define_predicate "aarch64_reg_or_and_imm"
 	(and (match_code "const_vector")
 	     (match_test "aarch64_simd_valid_and_imm (op)"))))
 
+(define_predicate "aarch64_reg_or_xor_imm"
+   (ior (match_operand 0 "register_operand")
+        (and (match_code "const_vector")
+             (match_test "aarch64_simd_valid_xor_imm (op)"))))
+
 (define_predicate "aarch64_fp_compare_operand"
   (ior (match_operand 0 "register_operand")
        (and (match_code "const_double")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/simd_imm.c b/gcc/testsuite/gcc.target/aarch64/sve/simd_imm.c
new file mode 100644
index 0000000000000000000000000000000000000000..69ece074eed03c1b1779eea979318343b8bd7b61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/simd_imm.c
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <arm_neon.h>
+
+/*
+** t1:
+**	and	z[0-9]+.s, z[0-9]+.s, #?3
+**	ret
+*/
+uint32x2_t t1 (uint32x2_t a)
+{
+  return vand_u32 (a, vdup_n_u32 (3));
+}
+
+/*
+** t2:
+**      orr     z[0-9]+.s, z[0-9]+.s, #?-3
+**      ret
+*/
+uint32x2_t t2 (uint32x2_t a)
+{
+  return vorr_u32 (a, vdup_n_u32 (~2));
+}
+
+/*
+** t3:
+**      eor     z[0-9]+.s, z[0-9]+.s, #?3
+**      ret
+*/
+uint32x2_t t3 (uint32x2_t a)
+{
+  return veor_u32 (a, vdup_n_u32 (3));
+}
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index d03c1fe798b2ccc2258b8581473a6eb7dc4af850..e789ca9358341363b976988f01d7c7c7aa88cfe4 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -665,16 +665,6 @@  enum aarch64_extra_tuning_flags
   AARCH64_EXTRA_TUNE_ALL = (1u << AARCH64_EXTRA_TUNE_index_END) - 1
 };
 
-/* Enum to distinguish which type of check is to be done in
-   aarch64_simd_valid_immediate.  This is used as a bitmask where
-   AARCH64_CHECK_MOV has both bits set.  Thus AARCH64_CHECK_MOV will
-   perform all checks.  Adding new types would require changes accordingly.  */
-enum simd_immediate_check {
-  AARCH64_CHECK_ORR  = 1 << 0,
-  AARCH64_CHECK_BIC  = 1 << 1,
-  AARCH64_CHECK_MOV  = AARCH64_CHECK_ORR | AARCH64_CHECK_BIC
-};
-
 extern struct tune_params aarch64_tune_params;
 
 /* The available SVE predicate patterns, known in the ACLE as "svpattern".  */
@@ -834,8 +824,10 @@  char *aarch64_output_sve_rdvl (rtx);
 char *aarch64_output_sve_addvl_addpl (rtx);
 char *aarch64_output_sve_vector_inc_dec (const char *, rtx);
 char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
-char *aarch64_output_simd_mov_immediate (rtx, unsigned,
-			enum simd_immediate_check w = AARCH64_CHECK_MOV);
+char *aarch64_output_simd_mov_imm (rtx, unsigned);
+char *aarch64_output_simd_orr_imm (rtx, unsigned);
+char *aarch64_output_simd_and_imm (rtx, unsigned);
+
 char *aarch64_output_sve_mov_immediate (rtx);
 char *aarch64_output_sve_ptrues (rtx);
 bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
@@ -849,8 +841,9 @@  bool aarch64_pars_overlap_p (rtx, rtx);
 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
 bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
 bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
-bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
-			enum simd_immediate_check w = AARCH64_CHECK_MOV);
+bool aarch64_simd_valid_and_imm (rtx);
+bool aarch64_simd_valid_mov_imm (rtx);
+bool aarch64_simd_valid_orr_imm (rtx);
 bool aarch64_valid_sysreg_name_p (const char *);
 const char *aarch64_retrieve_sysreg (const char *, bool, bool);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 11d405ed640f7937f985c4bae43ecd634a096604..6eeb5aa4871eceabb8e46e52bd63f0aa634b9f3d 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -160,7 +160,7 @@  (define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
      [?r, w ; neon_to_gp<q>      , *        , *] fmov\t%x0, %d1
      [?w, r ; f_mcr              , *        , *] fmov\t%d0, %1
      [?r, r ; mov_reg            , *        , *] mov\t%0, %1
-     [w , Dn; neon_move<q>       , simd     , *] << aarch64_output_simd_mov_immediate (operands[1], 64);
+     [w , Dn; neon_move<q>       , simd     , *] << aarch64_output_simd_mov_imm (operands[1], 64);
      [w , Dz; f_mcr              , *        , *] fmov\t%d0, xzr
      [w , Dx; neon_move          , simd     , 8] #
   }
@@ -189,7 +189,7 @@  (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , w ; multiple           , *   , 8] #
      [?w , r ; multiple           , *   , 8] #
      [?r , r ; multiple           , *   , 8] #
-     [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
+     [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_imm (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
      [w  , Dx; neon_move          , simd, 8] #
   }
@@ -1122,11 +1122,11 @@  (define_insn "fabd<mode>3<vczle><vczbe>"
 (define_insn "and<mode>3<vczle><vczbe>"
   [(set (match_operand:VDQ_I 0 "register_operand")
 	(and:VDQ_I (match_operand:VDQ_I 1 "register_operand")
-		   (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm")))]
+		   (match_operand:VDQ_I 2 "aarch64_reg_or_and_imm")))]
   "TARGET_SIMD"
   {@ [ cons: =0 , 1 , 2   ]
      [ w        , w , w   ] and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
-     [ w        , 0 , Db  ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_BIC);
+     [ w        , 0 , Db  ] << aarch64_output_simd_and_imm (operands[2], <bitsize>);
   }
   [(set_attr "type" "neon_logic<q>")]
 )
@@ -1141,8 +1141,7 @@  (define_insn "ior<mode>3<vczle><vczbe>"
      [ w        , w , w  ; simd      ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
      [ w        , 0 , vsl; sve       ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
      [ w        , 0 , Do ; simd      ] \
-       << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, \
-					     AARCH64_CHECK_ORR);
+       << aarch64_output_simd_orr_imm (operands[2], <bitsize>);
   }
   [(set_attr "type" "neon_logic<q>")]
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 102680a0efca1ce928e6945033c01cfb68a65152..d38345770ebab15cf872c24b3ec8ab8cc5cce3e7 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -129,6 +129,14 @@  constexpr auto AARCH64_STATE_SHARED = 1U << 0;
 constexpr auto AARCH64_STATE_IN = 1U << 1;
 constexpr auto AARCH64_STATE_OUT = 1U << 2;
 
+/* Enum to distinguish which type of check is to be done in
+   aarch64_simd_valid_imm.  */
+enum simd_immediate_check {
+  AARCH64_CHECK_MOV,
+  AARCH64_CHECK_ORR,
+  AARCH64_CHECK_AND
+};
+
 /* Information about a legitimate vector immediate operand.  */
 struct simd_immediate_info
 {
@@ -5657,7 +5665,7 @@  aarch64_expand_sve_const_vector (rtx target, rtx src)
 	  builder.quick_push (CONST_VECTOR_ENCODED_ELT (src, srci));
 	}
       rtx vq_src = builder.build ();
-      if (aarch64_simd_valid_immediate (vq_src, NULL))
+      if (aarch64_simd_valid_mov_imm (vq_src))
 	{
 	  vq_src = force_reg (vq_mode, vq_src);
 	  return aarch64_expand_sve_dupq (target, mode, vq_src);
@@ -6169,8 +6177,7 @@  aarch64_expand_mov_immediate (rtx dest, rtx imm)
 	    }
 	}
 
-      if (GET_CODE (imm) == HIGH
-	  || aarch64_simd_valid_immediate (imm, NULL))
+      if (GET_CODE (imm) == HIGH || aarch64_simd_valid_mov_imm (imm))
 	{
 	  emit_insn (gen_rtx_SET (dest, imm));
 	  return;
@@ -11112,7 +11119,7 @@  aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode)
   vmode = aarch64_simd_container_mode (imode, width);
   rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, ival);
 
-  return aarch64_simd_valid_immediate (v_op, NULL);
+  return aarch64_simd_valid_mov_imm (v_op);
 }
 
 
@@ -12893,7 +12900,7 @@  aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
   unsigned int vec_flags = aarch64_classify_vector_mode (mode);
   if (reg_class_subset_p (rclass, FP_REGS)
       && !((REG_P (x) && HARD_REGISTER_P (x))
-	   || aarch64_simd_valid_immediate (x, NULL))
+	   || aarch64_simd_valid_mov_imm (x))
       && mode != VNx16QImode
       && (vec_flags & VEC_SVE_DATA)
       && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
@@ -15479,7 +15486,7 @@  cost_plus:
     case CONST_VECTOR:
 	{
 	  /* Load using MOVI/MVNI.  */
-	  if (aarch64_simd_valid_immediate (x, NULL))
+	  if (aarch64_simd_valid_mov_imm (x))
 	    *cost = extra_cost->vect.movi;
 	  else /* Load using constant pool.  */
 	    *cost = extra_cost->ldst.load;
@@ -21155,7 +21162,7 @@  aarch64_legitimate_constant_p (machine_mode mode, rtx x)
      ??? It would be possible (but complex) to handle rematerialization
      of other constants via secondary reloads.  */
   if (!GET_MODE_SIZE (mode).is_constant ())
-    return aarch64_simd_valid_immediate (x, NULL);
+    return aarch64_simd_valid_mov_imm (x);
 
   /* Otherwise, accept any CONST_VECTOR that, if all else fails, can at
      least be forced to memory and loaded from there.  */
@@ -22909,12 +22916,12 @@  aarch64_advsimd_valid_immediate (unsigned HOST_WIDE_INT val64,
 
   if (val32 == (val64 >> 32))
     {
-      if ((which & AARCH64_CHECK_ORR) != 0
+      if ((which == AARCH64_CHECK_MOV || which == AARCH64_CHECK_ORR)
 	  && aarch64_advsimd_valid_immediate_hs (val32, info, which,
 						 simd_immediate_info::MOV))
 	return true;
 
-      if ((which & AARCH64_CHECK_BIC) != 0
+      if ((which == AARCH64_CHECK_MOV || which == AARCH64_CHECK_AND)
 	  && aarch64_advsimd_valid_immediate_hs (~val32, info, which,
 						 simd_immediate_info::MVN))
 	return true;
@@ -22955,7 +22962,8 @@  aarch64_advsimd_valid_immediate (unsigned HOST_WIDE_INT val64,
 
 static bool
 aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT val64,
-			     simd_immediate_info *info)
+			     simd_immediate_info *info,
+			     enum simd_immediate_check which)
 {
   scalar_int_mode mode = DImode;
   unsigned int val32 = val64 & 0xffffffff;
@@ -22972,19 +22980,23 @@  aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT val64,
 	}
     }
   HOST_WIDE_INT val = trunc_int_for_mode (val64, mode);
-  if (IN_RANGE (val, -0x80, 0x7f))
-    {
-      /* DUP with no shift.  */
-      if (info)
-	*info = simd_immediate_info (mode, val);
-      return true;
-    }
-  if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
+
+  if (which == AARCH64_CHECK_MOV)
     {
-      /* DUP with LSL #8.  */
-      if (info)
-	*info = simd_immediate_info (mode, val);
-      return true;
+      if (IN_RANGE (val, -0x80, 0x7f))
+	{
+	  /* DUP with no shift.  */
+	  if (info)
+	    *info = simd_immediate_info (mode, val);
+	  return true;
+	}
+      if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
+	{
+	  /* DUP with LSL #8.  */
+	  if (info)
+	    *info = simd_immediate_info (mode, val);
+	  return true;
+	}
     }
   if (aarch64_bitmask_imm (val64, mode))
     {
@@ -23070,9 +23082,9 @@  aarch64_sve_pred_valid_immediate (rtx x, simd_immediate_info *info)
 /* Return true if OP is a valid SIMD immediate for the operation
    described by WHICH.  If INFO is nonnull, use it to describe valid
    immediates.  */
-bool
-aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
-			      enum simd_immediate_check which)
+static bool
+aarch64_simd_valid_imm (rtx op, simd_immediate_info *info,
+			enum simd_immediate_check which)
 {
   machine_mode mode = GET_MODE (op);
   unsigned int vec_flags = aarch64_classify_vector_mode (mode);
@@ -23196,11 +23208,32 @@  aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
 	      << (i * BITS_PER_UNIT));
 
   if (vec_flags & VEC_SVE_DATA)
-    return aarch64_sve_valid_immediate (val64, info);
+    return aarch64_sve_valid_immediate (val64, info, which);
   else
     return aarch64_advsimd_valid_immediate (val64, info, which);
 }
 
+/* Return true if OP is a valid SIMD move immediate for SVE or AdvSIMD.  */
+bool
+aarch64_simd_valid_mov_imm (rtx op)
+{
+  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_MOV);
+}
+
+/* Return true if OP is a valid SIMD orr immediate for SVE or AdvSIMD.  */
+bool
+aarch64_simd_valid_orr_imm (rtx op)
+{
+  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_ORR);
+}
+
+/* Return true if OP is a valid SIMD and immediate for SVE or AdvSIMD.  */
+bool
+aarch64_simd_valid_and_imm (rtx op)
+{
+  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_AND);
+}
+
 /* Check whether X is a VEC_SERIES-like constant that starts at 0 and
    has a step in the range of INDEX.  Return the index expression if so,
    otherwise return null.  */
@@ -23264,7 +23297,7 @@  aarch64_mov_operand_p (rtx x, machine_mode mode)
 	  && GET_MODE (x) != VNx16BImode)
 	return false;
 
-      return aarch64_simd_valid_immediate (x, NULL);
+      return aarch64_simd_valid_mov_imm (x);
     }
 
   /* Remove UNSPEC_SALT_ADDR before checking symbol reference.  */
@@ -23365,7 +23398,7 @@  aarch64_simd_scalar_immediate_valid_for_move (rtx op, scalar_int_mode mode)
 
   vmode = aarch64_simd_container_mode (mode, 64);
   rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
-  return aarch64_simd_valid_immediate (op_v, NULL);
+  return aarch64_simd_valid_mov_imm (op_v);
 }
 
 /* Construct and return a PARALLEL RTX vector with elements numbering the
@@ -23845,7 +23878,7 @@  aarch64_simd_make_constant (rtx vals)
     gcc_unreachable ();
 
   if (const_vec != NULL_RTX
-      && aarch64_simd_valid_immediate (const_vec, NULL))
+      && aarch64_simd_valid_mov_imm (const_vec))
     /* Load using MOVI/MVNI.  */
     return const_vec;
   else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
@@ -24050,7 +24083,7 @@  aarch64_expand_vector_init_fallback (rtx target, rtx vals)
       /* Load constant part of vector.  We really don't care what goes into the
 	 parts we will overwrite, but we're more likely to be able to load the
 	 constant efficiently if it has fewer, larger, repeating parts
-	 (see aarch64_simd_valid_immediate).  */
+	 (see aarch64_simd_valid_imm).  */
       for (int i = 0; i < n_elts; i++)
 	{
 	  rtx x = XVECEXP (vals, 0, i);
@@ -25298,12 +25331,11 @@  aarch64_float_const_representable_p (rtx x)
   return (exponent >= 0 && exponent <= 7);
 }
 
-/* Returns the string with the instruction for AdvSIMD MOVI, MVNI, ORR or BIC
-   immediate with a CONST_VECTOR of MODE and WIDTH.  WHICH selects whether to
-   output MOVI/MVNI, ORR or BIC immediate.  */
+/* Returns the string with the instruction for the SIMD immediate
+   CONST_VECTOR of WIDTH bits.  WHICH selects a move, and(bic) or orr.  */
 char*
-aarch64_output_simd_mov_immediate (rtx const_vector, unsigned width,
-				   enum simd_immediate_check which)
+aarch64_output_simd_imm (rtx const_vector, unsigned width,
+			 enum simd_immediate_check which)
 {
   bool is_valid;
   static char templ[40];
@@ -25314,11 +25346,7 @@  aarch64_output_simd_mov_immediate (rtx const_vector, unsigned width,
 
   struct simd_immediate_info info;
 
-  /* This will return true to show const_vector is legal for use as either
-     a AdvSIMD MOVI instruction (or, implicitly, MVNI), ORR or BIC immediate.
-     It will also update INFO to show how the immediate should be generated.
-     WHICH selects whether to check for MOVI/MVNI, ORR or BIC.  */
-  is_valid = aarch64_simd_valid_immediate (const_vector, &info, which);
+  is_valid = aarch64_simd_valid_imm (const_vector, &info, which);
   gcc_assert (is_valid);
 
   element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
@@ -25381,7 +25409,7 @@  aarch64_output_simd_mov_immediate (rtx const_vector, unsigned width,
     }
   else
     {
-      /* For AARCH64_CHECK_BIC and AARCH64_CHECK_ORR.  */
+      /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND.  */
       mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "orr";
       if (info.u.mov.shift)
 	snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, #"
@@ -25396,6 +25424,30 @@  aarch64_output_simd_mov_immediate (rtx const_vector, unsigned width,
   return templ;
 }
 
+/* Returns the string with the ORR instruction for the SIMD immediate
+   CONST_VECTOR of WIDTH bits.  */
+char*
+aarch64_output_simd_orr_imm (rtx const_vector, unsigned width)
+{
+  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_ORR);
+}
+
+/* Returns the string with the AND/BIC instruction for the SIMD immediate
+   CONST_VECTOR of WIDTH bits.  */
+char*
+aarch64_output_simd_and_imm (rtx const_vector, unsigned width)
+{
+  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_AND);
+}
+
+/* Returns the string with the MOV instruction for the SIMD immediate
+   CONST_VECTOR of WIDTH bits.  */
+char*
+aarch64_output_simd_mov_imm (rtx const_vector, unsigned width)
+{
+  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_MOV);
+}
+
 char*
 aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
 {
@@ -25417,7 +25469,7 @@  aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
 
   vmode = aarch64_simd_container_mode (mode, width);
   rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
-  return aarch64_output_simd_mov_immediate (v_op, width);
+  return aarch64_output_simd_mov_imm (v_op, width);
 }
 
 /* Return the output string to use for moving immediate CONST_VECTOR
@@ -25429,8 +25481,9 @@  aarch64_output_sve_mov_immediate (rtx const_vector)
   static char templ[40];
   struct simd_immediate_info info;
   char element_char;
+  bool is_valid;
 
-  bool is_valid = aarch64_simd_valid_immediate (const_vector, &info);
+  is_valid = aarch64_simd_valid_imm (const_vector, &info, AARCH64_CHECK_MOV);
   gcc_assert (is_valid);
 
   element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
@@ -25499,9 +25552,10 @@  char *
 aarch64_output_sve_ptrues (rtx const_unspec)
 {
   static char templ[40];
-
   struct simd_immediate_info info;
-  bool is_valid = aarch64_simd_valid_immediate (const_unspec, &info);
+  bool is_valid;
+
+  is_valid = aarch64_simd_valid_imm (const_unspec, &info, AARCH64_CHECK_MOV);
   gcc_assert (is_valid && info.insn == simd_immediate_info::PTRUE);
 
   char element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index f491e4bd6a069bc9f8a3c71bdec14496c862a94d..3f9fd92a1911e0b18a163dc6c4c2c97c871458e0 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -464,21 +464,19 @@  (define_constraint "Do"
   "@internal
    A constraint that matches vector of immediates for orr."
  (and (match_code "const_vector")
-      (match_test "aarch64_simd_valid_immediate (op, NULL,
-						 AARCH64_CHECK_ORR)")))
+      (match_test "aarch64_simd_valid_orr_imm (op)")))
 
 (define_constraint "Db"
   "@internal
-   A constraint that matches vector of immediates for bic."
+   A constraint that matches vector of immediates for and/bic."
  (and (match_code "const_vector")
-      (match_test "aarch64_simd_valid_immediate (op, NULL,
-						 AARCH64_CHECK_BIC)")))
+      (match_test "aarch64_simd_valid_and_imm (op)")))
 
 (define_constraint "Dn"
   "@internal
  A constraint that matches vector of immediates."
  (and (match_code "const,const_vector")
-      (match_test "aarch64_simd_valid_immediate (op, NULL)")))
+      (match_test "aarch64_simd_valid_mov_imm (op)")))
 
 (define_constraint "Dh"
   "@internal
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 8f3aab2272c62d5dcc06dfd14fd00067e4db6b8e..0a171387b1a73b85db0ae2ccbc788a3d7f28a082 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -118,14 +118,12 @@  (define_predicate "aarch64_reg_zero_or_m1_or_1"
 (define_predicate "aarch64_reg_or_orr_imm"
    (ior (match_operand 0 "register_operand")
 	(and (match_code "const_vector")
-	     (match_test "aarch64_simd_valid_immediate (op, NULL,
-							AARCH64_CHECK_ORR)"))))
+	     (match_test "aarch64_simd_valid_orr_imm (op)"))))
 
-(define_predicate "aarch64_reg_or_bic_imm"
+(define_predicate "aarch64_reg_or_and_imm"
    (ior (match_operand 0 "register_operand")
 	(and (match_code "const_vector")
-	     (match_test "aarch64_simd_valid_immediate (op, NULL,
-							AARCH64_CHECK_BIC)"))))
+	     (match_test "aarch64_simd_valid_and_imm (op)"))))
 
 (define_predicate "aarch64_fp_compare_operand"
   (ior (match_operand 0 "register_operand")