@@ -3795,6 +3795,41 @@ (define_expand "vcond_mask_<mode><v_int_equiv>"
DONE;
})
+;; Patterns comparing two vectors to produce a sets flagsi.
+
+(define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "aarch64_equality_operator"
+ [(match_operand:VDQ_BHSI 1 "register_operand")
+ (match_operand:VDQ_BHSI 2 "aarch64_simd_reg_or_zero")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "TARGET_SIMD"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+
+ /* For 64-bit vectors we need no reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ rtx reduc = simplify_gen_subreg (V4SImode, operands[1], <MODE>mode, 0);
+ rtx res = gen_reg_rtx (V4SImode);
+ emit_insn (gen_aarch64_umaxpv4si (res, reduc, reduc));
+ emit_move_insn (tmp, simplify_gen_subreg (<MODE>mode, res, V4SImode, 0));
+ }
+ else
+ tmp = operands[1];
+
+ rtx val = gen_reg_rtx (DImode);
+ emit_move_insn (val, simplify_gen_subreg (DImode, tmp, <MODE>mode, 0));
+
+ rtx cc_reg = aarch64_gen_compare_reg (NE, val, const0_rtx);
+ rtx cmp_rtx = gen_rtx_fmt_ee (NE, DImode, cc_reg, operands[2]);
+ emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[3]));
+ DONE;
+})
+
;; Patterns comparing two vectors to produce a mask.
(define_expand "vec_cmp<mode><mode>"
@@ -3653,8 +3653,7 @@ proc check_effective_target_vect_int { } {
proc check_effective_target_vect_early_break { } {
return [check_cached_effective_target_indexed vect_early_break {
expr {
- ([istarget aarch64*-*-*]
- && [check_effective_target_aarch64_sve])
+ [istarget aarch64*-*-*]
}}]
}
# Return 1 if the target supports hardware vectorization of complex additions of