@@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
[(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
)
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation. To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "expandable_comparison_operator"
+ [(match_operand:VDQI 1 "register_operand")
+ (match_operand:VDQI 2 "zero_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_NEON"
+{
+ rtx mask = operands[1];
+
+ /* For 128-bit vectors we need an additional reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ mask = gen_reg_rtx (V2SImode);
+ rtx low = gen_reg_rtx (V2SImode);
+ rtx high = gen_reg_rtx (V2SImode);
+ emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+ emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+ emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+ }
+
+ emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+ rtx val = gen_reg_rtx (SImode);
+ emit_move_insn (val, gen_lowpart (SImode, mask));
+ emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+ DONE;
+})
+
;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
;; by define_expand in vec-common.md file.
new file mode 100644
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+** ...
+** vcgt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f1 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] > 0)
+ break;
+ }
+}
+
+/*
+** f2:
+** ...
+** vcge.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f2 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] >= 0)
+ break;
+ }
+}
+
+/*
+** f3:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f3 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] == 0)
+ break;
+ }
+}
+
+/*
+** f4:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vmvn q[0-9]+, q[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f4 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] != 0)
+ break;
+ }
+}
+
+/*
+** f5:
+** ...
+** vclt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f5 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] < 0)
+ break;
+ }
+}
+
+/*
+** f6:
+** ...
+** vcle.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f6 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] <= 0)
+ break;
+ }
+}
+
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
return [check_cached_effective_target_indexed vect_early_break {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_neon_ok]
}}]
}
# Return 1 if the target supports hardware vectorization of complex additions of