@@ -3071,6 +3071,10 @@ FUNCTION (svadrb, svadr_bhwd_impl, (0))
FUNCTION (svadrd, svadr_bhwd_impl, (3))
FUNCTION (svadrh, svadr_bhwd_impl, (1))
FUNCTION (svadrw, svadr_bhwd_impl, (2))
+FUNCTION (svamax, cond_or_uncond_unspec_function,
+ (UNSPEC_COND_FAMAX, UNSPEC_FAMAX))
+FUNCTION (svamin, cond_or_uncond_unspec_function,
+ (UNSPEC_COND_FAMIN, UNSPEC_FAMIN))
FUNCTION (svand, rtx_code_function, (AND, AND))
FUNCTION (svandv, reduction, (UNSPEC_ANDV))
FUNCTION (svasr, rtx_code_function, (ASHIFTRT, ASHIFTRT))
@@ -379,3 +379,8 @@ DEF_SVE_FUNCTION (svzip2q, binary, all_data, none)
DEF_SVE_FUNCTION (svld1ro, load_replicate, all_data, implicit)
DEF_SVE_FUNCTION (svmmla, mmla, d_float, none)
#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_FAMINMAX
+DEF_SVE_FUNCTION (svamax, binary_opt_single_n, all_float, mxz)
+DEF_SVE_FUNCTION (svamin, binary_opt_single_n, all_float, mxz)
+#undef REQUIRED_EXTENSIONS
@@ -37,6 +37,8 @@ namespace aarch64_sve
extern const function_base *const svadrd;
extern const function_base *const svadrh;
extern const function_base *const svadrw;
+ extern const function_base *const svamax;
+ extern const function_base *const svamin;
extern const function_base *const svand;
extern const function_base *const svandv;
extern const function_base *const svasr;
@@ -470,6 +470,7 @@ constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
/* Floating Point Absolute Maximum/Minimum extension instructions are
enabled through +faminmax. */
#define TARGET_FAMINMAX AARCH64_HAVE_ISA (FAMINMAX)
+#define TARGET_SVE_FAMINMAX (TARGET_SVE && TARGET_FAMINMAX)
/* Prefer different predicate registers for the output of a predicated
operation over re-using an existing input predicate. */
@@ -841,6 +841,8 @@
UNSPEC_COND_CMPNE_WIDE ; Used in aarch64-sve.md.
UNSPEC_COND_FABS ; Used in aarch64-sve.md.
UNSPEC_COND_FADD ; Used in aarch64-sve.md.
+ UNSPEC_COND_FAMAX ; Used in aarch64-sve.md.
+ UNSPEC_COND_FAMIN ; Used in aarch64-sve.md.
UNSPEC_COND_FCADD90 ; Used in aarch64-sve.md.
UNSPEC_COND_FCADD270 ; Used in aarch64-sve.md.
UNSPEC_COND_FCMEQ ; Used in aarch64-sve.md.
@@ -3085,6 +3087,8 @@
(define_int_iterator SVE_COND_FP_BINARY
[UNSPEC_COND_FADD
+ (UNSPEC_COND_FAMAX "TARGET_SVE_FAMINMAX")
+ (UNSPEC_COND_FAMIN "TARGET_SVE_FAMINMAX")
UNSPEC_COND_FDIV
UNSPEC_COND_FMAX
UNSPEC_COND_FMAXNM
@@ -3124,7 +3128,9 @@
UNSPEC_COND_SMIN])
(define_int_iterator SVE_COND_FP_BINARY_REG
- [UNSPEC_COND_FDIV
+ [(UNSPEC_COND_FAMAX "TARGET_SVE_FAMINMAX")
+ (UNSPEC_COND_FAMIN "TARGET_SVE_FAMINMAX")
+ UNSPEC_COND_FDIV
UNSPEC_COND_FMULX
UNSPEC_COND_SMAX
UNSPEC_COND_SMIN])
@@ -3701,6 +3707,8 @@
(UNSPEC_ZIP2Q "zip2q")
(UNSPEC_COND_FABS "abs")
(UNSPEC_COND_FADD "add")
+ (UNSPEC_COND_FAMAX "famax")
+ (UNSPEC_COND_FAMIN "famin")
(UNSPEC_COND_FCADD90 "cadd90")
(UNSPEC_COND_FCADD270 "cadd270")
(UNSPEC_COND_FCMLA "fcmla")
@@ -4237,6 +4245,8 @@
(UNSPEC_FTSSEL "ftssel")
(UNSPEC_COND_FABS "fabs")
(UNSPEC_COND_FADD "fadd")
+ (UNSPEC_COND_FAMAX "famax")
+ (UNSPEC_COND_FAMIN "famin")
(UNSPEC_COND_FCVTLT "fcvtlt")
(UNSPEC_COND_FCVTX "fcvtx")
(UNSPEC_COND_FDIV "fdiv")
@@ -4263,6 +4273,8 @@
(UNSPEC_COND_SMIN "fminnm")])
(define_int_attr sve_fp_op_rev [(UNSPEC_COND_FADD "fadd")
+ (UNSPEC_COND_FAMAX "famax")
+ (UNSPEC_COND_FAMIN "famin")
(UNSPEC_COND_FDIV "fdivr")
(UNSPEC_COND_FMAX "fmax")
(UNSPEC_COND_FMAXNM "fmaxnm")
@@ -4401,6 +4413,8 @@
;; <optab><mode>3 pattern.
(define_int_attr sve_pred_fp_rhs1_operand
[(UNSPEC_COND_FADD "register_operand")
+ (UNSPEC_COND_FAMAX "register_operand")
+ (UNSPEC_COND_FAMIN "register_operand")
(UNSPEC_COND_FDIV "register_operand")
(UNSPEC_COND_FMAX "register_operand")
(UNSPEC_COND_FMAXNM "register_operand")
@@ -4416,6 +4430,8 @@
;; <optab><mode>3 pattern.
(define_int_attr sve_pred_fp_rhs2_operand
[(UNSPEC_COND_FADD "aarch64_sve_float_arith_with_sub_operand")
+ (UNSPEC_COND_FAMAX "aarch64_sve_float_maxmin_operand")
+ (UNSPEC_COND_FAMIN "aarch64_sve_float_maxmin_operand")
(UNSPEC_COND_FDIV "register_operand")
(UNSPEC_COND_FMAX "aarch64_sve_float_maxmin_operand")
(UNSPEC_COND_FMAXNM "aarch64_sve_float_maxmin_operand")
new file mode 100644
@@ -0,0 +1,312 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f16_m_tied1:
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_tied1, svfloat16_t,
+ z0 = svamax_f16_m (p0, z0, z1),
+ z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** famax z0\.h, p0/m, z0\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_tied2, svfloat16_t,
+ z0 = svamax_f16_m (p0, z1, z0),
+ z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f16_m_untied:
+** movprfx z0, z1
+** famax z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_untied, svfloat16_t,
+ z0 = svamax_f16_m (p0, z1, z2),
+ z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_h4_f16_m_tied1:
+** mov (z[0-9]+\.h), h4
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_m_tied1, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_m (p0, z0, d4),
+ z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_h4_f16_m_untied:
+** mov (z[0-9]+\.h), h4
+** movprfx z0, z1
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_m_untied, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_m (p0, z1, d4),
+ z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_2_f16_m:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_m, svfloat16_t,
+ z0 = svamax_n_f16_m (p0, z0, 2),
+ z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied1, svfloat16_t,
+ z0 = svamax_f16_z (p0, z0, z1),
+ z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f16_z_tied2:
+** movprfx z0\.h, p0/z, z0\.h
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied2, svfloat16_t,
+ z0 = svamax_f16_z (p0, z1, z0),
+ z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f16_z_untied:
+** (
+** movprfx z0\.h, p0/z, z1\.h
+** famax z0\.h, p0/m, z0\.h, z2\.h
+** |
+** movprfx z0\.h, p0/z, z2\.h
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_untied, svfloat16_t,
+ z0 = svamax_f16_z (p0, z1, z2),
+ z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_h4_f16_z_tied1:
+** mov (z[0-9]+\.h), h4
+** movprfx z0\.h, p0/z, z0\.h
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_tied1, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_z (p0, z0, d4),
+ z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_h4_f16_z_untied:
+** mov (z[0-9]+\.h), h4
+** (
+** movprfx z0\.h, p0/z, z1\.h
+** famax z0\.h, p0/m, z0\.h, \1
+** |
+** movprfx z0\.h, p0/z, \1
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_untied, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_z (p0, z1, d4),
+ z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_2_f16_z:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** movprfx z0\.h, p0/z, z0\.h
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_z, svfloat16_t,
+ z0 = svamax_n_f16_z (p0, z0, 2),
+ z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f16_x_tied1:
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied1, svfloat16_t,
+ z0 = svamax_f16_x (p0, z0, z1),
+ z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f16_x_tied2:
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied2, svfloat16_t,
+ z0 = svamax_f16_x (p0, z1, z0),
+ z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f16_x_untied:
+** (
+** movprfx z0, z1
+** famax z0\.h, p0/m, z0\.h, z2\.h
+** |
+** movprfx z0, z2
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_untied, svfloat16_t,
+ z0 = svamax_f16_x (p0, z1, z2),
+ z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_h4_f16_x_tied1:
+** mov (z[0-9]+\.h), h4
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_tied1, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_x (p0, z0, d4),
+ z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_h4_f16_x_untied:
+** mov z0\.h, h4
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_untied, svfloat16_t, __fp16,
+ z0 = svamax_n_f16_x (p0, z1, d4),
+ z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_2_f16_x_tied1:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** famax z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_tied1, svfloat16_t,
+ z0 = svamax_n_f16_x (p0, z0, 2),
+ z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f16_x_untied:
+** fmov z0\.h, #2\.0(?:e\+0)?
+** famax z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_untied, svfloat16_t,
+ z0 = svamax_n_f16_x (p0, z1, 2),
+ z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied1, svfloat16_t,
+ z0 = svamax_f16_x (svptrue_b16 (), z0, z1),
+ z0 = svamax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amax_f16_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied2, svfloat16_t,
+ z0 = svamax_f16_x (svptrue_b16 (), z1, z0),
+ z0 = svamax_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amax_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_untied, svfloat16_t,
+ z0 = svamax_f16_x (svptrue_b16 (), z1, z2),
+ z0 = svamax_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amax_0_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_tied1, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z0, 0),
+ z0 = svamax_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amax_0_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_untied, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z1, 0),
+ z0 = svamax_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amax_1_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_tied1, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z0, 1),
+ z0 = svamax_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amax_1_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_untied, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z1, 1),
+ z0 = svamax_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amax_2_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_tied1, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z0, 2),
+ z0 = svamax_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amax_2_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_untied, svfloat16_t,
+ z0 = svamax_n_f16_x (svptrue_b16 (), z1, 2),
+ z0 = svamax_x (svptrue_b16 (), z1, 2))
new file mode 100644
@@ -0,0 +1,312 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f32_m_tied1:
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied1, svfloat32_t,
+ z0 = svamax_f32_m (p0, z0, z1),
+ z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** famax z0\.s, p0/m, z0\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied2, svfloat32_t,
+ z0 = svamax_f32_m (p0, z1, z0),
+ z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f32_m_untied:
+** movprfx z0, z1
+** famax z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_untied, svfloat32_t,
+ z0 = svamax_f32_m (p0, z1, z2),
+ z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_s4_f32_m_tied1:
+** mov (z[0-9]+\.s), s4
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_tied1, svfloat32_t, float,
+ z0 = svamax_n_f32_m (p0, z0, d4),
+ z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_s4_f32_m_untied:
+** mov (z[0-9]+\.s), s4
+** movprfx z0, z1
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_untied, svfloat32_t, float,
+ z0 = svamax_n_f32_m (p0, z1, d4),
+ z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_2_f32_m:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_m, svfloat32_t,
+ z0 = svamax_n_f32_m (p0, z0, 2),
+ z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied1, svfloat32_t,
+ z0 = svamax_f32_z (p0, z0, z1),
+ z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f32_z_tied2:
+** movprfx z0\.s, p0/z, z0\.s
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied2, svfloat32_t,
+ z0 = svamax_f32_z (p0, z1, z0),
+ z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f32_z_untied:
+** (
+** movprfx z0\.s, p0/z, z1\.s
+** famax z0\.s, p0/m, z0\.s, z2\.s
+** |
+** movprfx z0\.s, p0/z, z2\.s
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_untied, svfloat32_t,
+ z0 = svamax_f32_z (p0, z1, z2),
+ z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_s4_f32_z_tied1:
+** mov (z[0-9]+\.s), s4
+** movprfx z0\.s, p0/z, z0\.s
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_tied1, svfloat32_t, float,
+ z0 = svamax_n_f32_z (p0, z0, d4),
+ z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_s4_f32_z_untied:
+** mov (z[0-9]+\.s), s4
+** (
+** movprfx z0\.s, p0/z, z1\.s
+** famax z0\.s, p0/m, z0\.s, \1
+** |
+** movprfx z0\.s, p0/z, \1
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_untied, svfloat32_t, float,
+ z0 = svamax_n_f32_z (p0, z1, d4),
+ z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_2_f32_z:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** movprfx z0\.s, p0/z, z0\.s
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_z, svfloat32_t,
+ z0 = svamax_n_f32_z (p0, z0, 2),
+ z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f32_x_tied1:
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied1, svfloat32_t,
+ z0 = svamax_f32_x (p0, z0, z1),
+ z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f32_x_tied2:
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied2, svfloat32_t,
+ z0 = svamax_f32_x (p0, z1, z0),
+ z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f32_x_untied:
+** (
+** movprfx z0, z1
+** famax z0\.s, p0/m, z0\.s, z2\.s
+** |
+** movprfx z0, z2
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_untied, svfloat32_t,
+ z0 = svamax_f32_x (p0, z1, z2),
+ z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_s4_f32_x_tied1:
+** mov (z[0-9]+\.s), s4
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_tied1, svfloat32_t, float,
+ z0 = svamax_n_f32_x (p0, z0, d4),
+ z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_s4_f32_x_untied:
+** mov z0\.s, s4
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_untied, svfloat32_t, float,
+ z0 = svamax_n_f32_x (p0, z1, d4),
+ z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_2_f32_x_tied1:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** famax z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_tied1, svfloat32_t,
+ z0 = svamax_n_f32_x (p0, z0, 2),
+ z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f32_x_untied:
+** fmov z0\.s, #2\.0(?:e\+0)?
+** famax z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_untied, svfloat32_t,
+ z0 = svamax_n_f32_x (p0, z1, 2),
+ z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied1, svfloat32_t,
+ z0 = svamax_f32_x (svptrue_b32 (), z0, z1),
+ z0 = svamax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amax_f32_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied2, svfloat32_t,
+ z0 = svamax_f32_x (svptrue_b32 (), z1, z0),
+ z0 = svamax_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amax_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_untied, svfloat32_t,
+ z0 = svamax_f32_x (svptrue_b32 (), z1, z2),
+ z0 = svamax_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amax_0_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_tied1, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z0, 0),
+ z0 = svamax_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amax_0_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_untied, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z1, 0),
+ z0 = svamax_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amax_1_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_tied1, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z0, 1),
+ z0 = svamax_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amax_1_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_untied, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z1, 1),
+ z0 = svamax_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amax_2_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_tied1, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z0, 2),
+ z0 = svamax_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amax_2_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_untied, svfloat32_t,
+ z0 = svamax_n_f32_x (svptrue_b32 (), z1, 2),
+ z0 = svamax_x (svptrue_b32 (), z1, 2))
new file mode 100644
@@ -0,0 +1,312 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f64_m_tied1:
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied1, svfloat64_t,
+ z0 = svamax_f64_m (p0, z0, z1),
+ z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f64_m_tied2:
+** mov (z[0-9]+\.d), z0\.d
+** movprfx z0, z1
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied2, svfloat64_t,
+ z0 = svamax_f64_m (p0, z1, z0),
+ z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f64_m_untied:
+** movprfx z0, z1
+** famax z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_untied, svfloat64_t,
+ z0 = svamax_f64_m (p0, z1, z2),
+ z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_d4_f64_m_tied1:
+** mov (z[0-9]+\.d), d4
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_tied1, svfloat64_t, double,
+ z0 = svamax_n_f64_m (p0, z0, d4),
+ z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_d4_f64_m_untied:
+** mov (z[0-9]+\.d), d4
+** movprfx z0, z1
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_untied, svfloat64_t, double,
+ z0 = svamax_n_f64_m (p0, z1, d4),
+ z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_2_f64_m:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_m, svfloat64_t,
+ z0 = svamax_n_f64_m (p0, z0, 2),
+ z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied1, svfloat64_t,
+ z0 = svamax_f64_z (p0, z0, z1),
+ z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f64_z_tied2:
+** movprfx z0\.d, p0/z, z0\.d
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied2, svfloat64_t,
+ z0 = svamax_f64_z (p0, z1, z0),
+ z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f64_z_untied:
+** (
+** movprfx z0\.d, p0/z, z1\.d
+** famax z0\.d, p0/m, z0\.d, z2\.d
+** |
+** movprfx z0\.d, p0/z, z2\.d
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_untied, svfloat64_t,
+ z0 = svamax_f64_z (p0, z1, z2),
+ z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_d4_f64_z_tied1:
+** mov (z[0-9]+\.d), d4
+** movprfx z0\.d, p0/z, z0\.d
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_tied1, svfloat64_t, double,
+ z0 = svamax_n_f64_z (p0, z0, d4),
+ z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_d4_f64_z_untied:
+** mov (z[0-9]+\.d), d4
+** (
+** movprfx z0\.d, p0/z, z1\.d
+** famax z0\.d, p0/m, z0\.d, \1
+** |
+** movprfx z0\.d, p0/z, \1
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_untied, svfloat64_t, double,
+ z0 = svamax_n_f64_z (p0, z1, d4),
+ z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_2_f64_z:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** movprfx z0\.d, p0/z, z0\.d
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_z, svfloat64_t,
+ z0 = svamax_n_f64_z (p0, z0, 2),
+ z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f64_x_tied1:
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied1, svfloat64_t,
+ z0 = svamax_f64_x (p0, z0, z1),
+ z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f64_x_tied2:
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied2, svfloat64_t,
+ z0 = svamax_f64_x (p0, z1, z0),
+ z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f64_x_untied:
+** (
+** movprfx z0, z1
+** famax z0\.d, p0/m, z0\.d, z2\.d
+** |
+** movprfx z0, z2
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_untied, svfloat64_t,
+ z0 = svamax_f64_x (p0, z1, z2),
+ z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_d4_f64_x_tied1:
+** mov (z[0-9]+\.d), d4
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_tied1, svfloat64_t, double,
+ z0 = svamax_n_f64_x (p0, z0, d4),
+ z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_d4_f64_x_untied:
+** mov z0\.d, d4
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_untied, svfloat64_t, double,
+ z0 = svamax_n_f64_x (p0, z1, d4),
+ z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_2_f64_x_tied1:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** famax z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_tied1, svfloat64_t,
+ z0 = svamax_n_f64_x (p0, z0, 2),
+ z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f64_x_untied:
+** fmov z0\.d, #2\.0(?:e\+0)?
+** famax z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_untied, svfloat64_t,
+ z0 = svamax_n_f64_x (p0, z1, 2),
+ z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied1, svfloat64_t,
+ z0 = svamax_f64_x (svptrue_b64 (), z0, z1),
+ z0 = svamax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amax_f64_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied2, svfloat64_t,
+ z0 = svamax_f64_x (svptrue_b64 (), z1, z0),
+ z0 = svamax_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amax_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_untied, svfloat64_t,
+ z0 = svamax_f64_x (svptrue_b64 (), z1, z2),
+ z0 = svamax_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amax_0_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_tied1, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z0, 0),
+ z0 = svamax_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amax_0_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_untied, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z1, 0),
+ z0 = svamax_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amax_1_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_tied1, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z0, 1),
+ z0 = svamax_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amax_1_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_untied, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z1, 1),
+ z0 = svamax_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amax_2_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_tied1, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z0, 2),
+ z0 = svamax_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amax_2_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_untied, svfloat64_t,
+ z0 = svamax_n_f64_x (svptrue_b64 (), z1, 2),
+ z0 = svamax_x (svptrue_b64 (), z1, 2))
new file mode 100644
@@ -0,0 +1,311 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f16_m_tied1:
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied1, svfloat16_t,
+ z0 = svamin_f16_m (p0, z0, z1),
+ z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** famin z0\.h, p0/m, z0\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied2, svfloat16_t,
+ z0 = svamin_f16_m (p0, z1, z0),
+ z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f16_m_untied:
+** movprfx z0, z1
+** famin z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_untied, svfloat16_t,
+ z0 = svamin_f16_m (p0, z1, z2),
+ z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_h4_f16_m_tied1:
+** mov (z[0-9]+\.h), h4
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_tied1, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_m (p0, z0, d4),
+ z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_h4_f16_m_untied:
+** mov (z[0-9]+\.h), h4
+** movprfx z0, z1
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_untied, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_m (p0, z1, d4),
+ z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_2_f16_m:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_m, svfloat16_t,
+ z0 = svamin_n_f16_m (p0, z0, 2),
+ z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied1, svfloat16_t,
+ z0 = svamin_f16_z (p0, z0, z1),
+ z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f16_z_tied2:
+** movprfx z0\.h, p0/z, z0\.h
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied2, svfloat16_t,
+ z0 = svamin_f16_z (p0, z1, z0),
+ z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f16_z_untied:
+** (
+** movprfx z0\.h, p0/z, z1\.h
+** famin z0\.h, p0/m, z0\.h, z2\.h
+** |
+** movprfx z0\.h, p0/z, z2\.h
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_untied, svfloat16_t,
+ z0 = svamin_f16_z (p0, z1, z2),
+ z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_h4_f16_z_tied1:
+** mov (z[0-9]+\.h), h4
+** movprfx z0\.h, p0/z, z0\.h
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_tied1, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_z (p0, z0, d4),
+ z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_h4_f16_z_untied:
+** mov (z[0-9]+\.h), h4
+** (
+** movprfx z0\.h, p0/z, z1\.h
+** famin z0\.h, p0/m, z0\.h, \1
+** |
+** movprfx z0\.h, p0/z, \1
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_untied, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_z (p0, z1, d4),
+ z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_2_f16_z:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** movprfx z0\.h, p0/z, z0\.h
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_z, svfloat16_t,
+ z0 = svamin_n_f16_z (p0, z0, 2),
+ z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f16_x_tied1:
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied1, svfloat16_t,
+ z0 = svamin_f16_x (p0, z0, z1),
+ z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f16_x_tied2:
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied2, svfloat16_t,
+ z0 = svamin_f16_x (p0, z1, z0),
+ z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f16_x_untied:
+** (
+** movprfx z0, z1
+** famin z0\.h, p0/m, z0\.h, z2\.h
+** |
+** movprfx z0, z2
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_untied, svfloat16_t,
+ z0 = svamin_f16_x (p0, z1, z2),
+ z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_h4_f16_x_tied1:
+** mov (z[0-9]+\.h), h4
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_tied1, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_x (p0, z0, d4),
+ z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_h4_f16_x_untied:
+** mov z0\.h, h4
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_untied, svfloat16_t, __fp16,
+ z0 = svamin_n_f16_x (p0, z1, d4),
+ z0 = svamin_x (p0, z1, d4))
+/*
+** amin_2_f16_x_tied1:
+** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
+** famin z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_tied1, svfloat16_t,
+ z0 = svamin_n_f16_x (p0, z0, 2),
+ z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f16_x_untied:
+** fmov z0\.h, #2\.0(?:e\+0)?
+** famin z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_untied, svfloat16_t,
+ z0 = svamin_n_f16_x (p0, z1, 2),
+ z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied1, svfloat16_t,
+ z0 = svamin_f16_x (svptrue_b16 (), z0, z1),
+ z0 = svamin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amin_f16_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied2, svfloat16_t,
+ z0 = svamin_f16_x (svptrue_b16 (), z1, z0),
+ z0 = svamin_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amin_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_untied, svfloat16_t,
+ z0 = svamin_f16_x (svptrue_b16 (), z1, z2),
+ z0 = svamin_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amin_0_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_tied1, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z0, 0),
+ z0 = svamin_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amin_0_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_untied, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z1, 0),
+ z0 = svamin_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amin_1_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_tied1, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z0, 1),
+ z0 = svamin_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amin_1_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_untied, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z1, 1),
+ z0 = svamin_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amin_2_f16_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_tied1, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z0, 2),
+ z0 = svamin_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amin_2_f16_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_untied, svfloat16_t,
+ z0 = svamin_n_f16_x (svptrue_b16 (), z1, 2),
+ z0 = svamin_x (svptrue_b16 (), z1, 2))
new file mode 100644
@@ -0,0 +1,312 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f32_m_tied1:
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied1, svfloat32_t,
+ z0 = svamin_f32_m (p0, z0, z1),
+ z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** famin z0\.s, p0/m, z0\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied2, svfloat32_t,
+ z0 = svamin_f32_m (p0, z1, z0),
+ z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f32_m_untied:
+** movprfx z0, z1
+** famin z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_untied, svfloat32_t,
+ z0 = svamin_f32_m (p0, z1, z2),
+ z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_s4_f32_m_tied1:
+** mov (z[0-9]+\.s), s4
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_tied1, svfloat32_t, float,
+ z0 = svamin_n_f32_m (p0, z0, d4),
+ z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_s4_f32_m_untied:
+** mov (z[0-9]+\.s), s4
+** movprfx z0, z1
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_untied, svfloat32_t, float,
+ z0 = svamin_n_f32_m (p0, z1, d4),
+ z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_2_f32_m:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_m, svfloat32_t,
+ z0 = svamin_n_f32_m (p0, z0, 2),
+ z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied1, svfloat32_t,
+ z0 = svamin_f32_z (p0, z0, z1),
+ z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f32_z_tied2:
+** movprfx z0\.s, p0/z, z0\.s
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied2, svfloat32_t,
+ z0 = svamin_f32_z (p0, z1, z0),
+ z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f32_z_untied:
+** (
+** movprfx z0\.s, p0/z, z1\.s
+** famin z0\.s, p0/m, z0\.s, z2\.s
+** |
+** movprfx z0\.s, p0/z, z2\.s
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_untied, svfloat32_t,
+ z0 = svamin_f32_z (p0, z1, z2),
+ z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_s4_f32_z_tied1:
+** mov (z[0-9]+\.s), s4
+** movprfx z0\.s, p0/z, z0\.s
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_tied1, svfloat32_t, float,
+ z0 = svamin_n_f32_z (p0, z0, d4),
+ z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_s4_f32_z_untied:
+** mov (z[0-9]+\.s), s4
+** (
+** movprfx z0\.s, p0/z, z1\.s
+** famin z0\.s, p0/m, z0\.s, \1
+** |
+** movprfx z0\.s, p0/z, \1
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_untied, svfloat32_t, float,
+ z0 = svamin_n_f32_z (p0, z1, d4),
+ z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_2_f32_z:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** movprfx z0\.s, p0/z, z0\.s
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_z, svfloat32_t,
+ z0 = svamin_n_f32_z (p0, z0, 2),
+ z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f32_x_tied1:
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied1, svfloat32_t,
+ z0 = svamin_f32_x (p0, z0, z1),
+ z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f32_x_tied2:
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied2, svfloat32_t,
+ z0 = svamin_f32_x (p0, z1, z0),
+ z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f32_x_untied:
+** (
+** movprfx z0, z1
+** famin z0\.s, p0/m, z0\.s, z2\.s
+** |
+** movprfx z0, z2
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_untied, svfloat32_t,
+ z0 = svamin_f32_x (p0, z1, z2),
+ z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_s4_f32_x_tied1:
+** mov (z[0-9]+\.s), s4
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_tied1, svfloat32_t, float,
+ z0 = svamin_n_f32_x (p0, z0, d4),
+ z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_s4_f32_x_untied:
+** mov z0\.s, s4
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_untied, svfloat32_t, float,
+ z0 = svamin_n_f32_x (p0, z1, d4),
+ z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_2_f32_x_tied1:
+** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
+** famin z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_tied1, svfloat32_t,
+ z0 = svamin_n_f32_x (p0, z0, 2),
+ z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f32_x_untied:
+** fmov z0\.s, #2\.0(?:e\+0)?
+** famin z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_untied, svfloat32_t,
+ z0 = svamin_n_f32_x (p0, z1, 2),
+ z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied1, svfloat32_t,
+ z0 = svamin_f32_x (svptrue_b32 (), z0, z1),
+ z0 = svamin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amin_f32_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied2, svfloat32_t,
+ z0 = svamin_f32_x (svptrue_b32 (), z1, z0),
+ z0 = svamin_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amin_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_untied, svfloat32_t,
+ z0 = svamin_f32_x (svptrue_b32 (), z1, z2),
+ z0 = svamin_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amin_0_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_tied1, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z0, 0),
+ z0 = svamin_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amin_0_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_untied, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z1, 0),
+ z0 = svamin_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amin_1_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_tied1, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z0, 1),
+ z0 = svamin_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amin_1_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_untied, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z1, 1),
+ z0 = svamin_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amin_2_f32_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_tied1, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z0, 2),
+ z0 = svamin_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amin_2_f32_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_untied, svfloat32_t,
+ z0 = svamin_n_f32_x (svptrue_b32 (), z1, 2),
+ z0 = svamin_x (svptrue_b32 (), z1, 2))
new file mode 100644
@@ -0,0 +1,312 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f64_m_tied1:
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied1, svfloat64_t,
+ z0 = svamin_f64_m (p0, z0, z1),
+ z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f64_m_tied2:
+** mov (z[0-9]+\.d), z0\.d
+** movprfx z0, z1
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied2, svfloat64_t,
+ z0 = svamin_f64_m (p0, z1, z0),
+ z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f64_m_untied:
+** movprfx z0, z1
+** famin z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_untied, svfloat64_t,
+ z0 = svamin_f64_m (p0, z1, z2),
+ z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_d4_f64_m_tied1:
+** mov (z[0-9]+\.d), d4
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_tied1, svfloat64_t, double,
+ z0 = svamin_n_f64_m (p0, z0, d4),
+ z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_d4_f64_m_untied:
+** mov (z[0-9]+\.d), d4
+** movprfx z0, z1
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_untied, svfloat64_t, double,
+ z0 = svamin_n_f64_m (p0, z1, d4),
+ z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_2_f64_m:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_m, svfloat64_t,
+ z0 = svamin_n_f64_m (p0, z0, 2),
+ z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied1, svfloat64_t,
+ z0 = svamin_f64_z (p0, z0, z1),
+ z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f64_z_tied2:
+** movprfx z0\.d, p0/z, z0\.d
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied2, svfloat64_t,
+ z0 = svamin_f64_z (p0, z1, z0),
+ z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f64_z_untied:
+** (
+** movprfx z0\.d, p0/z, z1\.d
+** famin z0\.d, p0/m, z0\.d, z2\.d
+** |
+** movprfx z0\.d, p0/z, z2\.d
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_untied, svfloat64_t,
+ z0 = svamin_f64_z (p0, z1, z2),
+ z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_d4_f64_z_tied1:
+** mov (z[0-9]+\.d), d4
+** movprfx z0\.d, p0/z, z0\.d
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_tied1, svfloat64_t, double,
+ z0 = svamin_n_f64_z (p0, z0, d4),
+ z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_d4_f64_z_untied:
+** mov (z[0-9]+\.d), d4
+** (
+** movprfx z0\.d, p0/z, z1\.d
+** famin z0\.d, p0/m, z0\.d, \1
+** |
+** movprfx z0\.d, p0/z, \1
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_untied, svfloat64_t, double,
+ z0 = svamin_n_f64_z (p0, z1, d4),
+ z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_2_f64_z:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** movprfx z0\.d, p0/z, z0\.d
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_z, svfloat64_t,
+ z0 = svamin_n_f64_z (p0, z0, 2),
+ z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f64_x_tied1:
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied1, svfloat64_t,
+ z0 = svamin_f64_x (p0, z0, z1),
+ z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f64_x_tied2:
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied2, svfloat64_t,
+ z0 = svamin_f64_x (p0, z1, z0),
+ z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f64_x_untied:
+** (
+** movprfx z0, z1
+** famin z0\.d, p0/m, z0\.d, z2\.d
+** |
+** movprfx z0, z2
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_untied, svfloat64_t,
+ z0 = svamin_f64_x (p0, z1, z2),
+ z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_d4_f64_x_tied1:
+** mov (z[0-9]+\.d), d4
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_tied1, svfloat64_t, double,
+ z0 = svamin_n_f64_x (p0, z0, d4),
+ z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_d4_f64_x_untied:
+** mov z0\.d, d4
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_untied, svfloat64_t, double,
+ z0 = svamin_n_f64_x (p0, z1, d4),
+ z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_2_f64_x_tied1:
+** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
+** famin z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_tied1, svfloat64_t,
+ z0 = svamin_n_f64_x (p0, z0, 2),
+ z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f64_x_untied:
+** fmov z0\.d, #2\.0(?:e\+0)?
+** famin z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_untied, svfloat64_t,
+ z0 = svamin_n_f64_x (p0, z1, 2),
+ z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied1, svfloat64_t,
+ z0 = svamin_f64_x (svptrue_b64 (), z0, z1),
+ z0 = svamin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amin_f64_x_tied2:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied2, svfloat64_t,
+ z0 = svamin_f64_x (svptrue_b64 (), z1, z0),
+ z0 = svamin_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amin_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_untied, svfloat64_t,
+ z0 = svamin_f64_x (svptrue_b64 (), z1, z2),
+ z0 = svamin_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amin_0_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_tied1, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z0, 0),
+ z0 = svamin_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amin_0_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_untied, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z1, 0),
+ z0 = svamin_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amin_1_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_tied1, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z0, 1),
+ z0 = svamin_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amin_1_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_untied, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z1, 1),
+ z0 = svamin_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amin_2_f64_x_tied1:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_tied1, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z0, 2),
+ z0 = svamin_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amin_2_f64_x_untied:
+** ...
+** ptrue p[0-9]+\.b[^\n]*
+** ...
+** ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_untied, svfloat64_t,
+ z0 = svamin_n_f64_x (svptrue_b64 (), z1, 2),
+ z0 = svamin_x (svptrue_b64 (), z1, 2))