@@ -722,6 +722,269 @@ _mm_maskz_div_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
(A), (D)))
#endif /* __OPTIMIZE__ */
+/* Intrinsic vmaxph vminph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_ph (__m512h __A, __m512h __B)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_ph (__m512h __A, __m512h __B)
+{
+ return __builtin_ia32_vminph_v32hf_mask (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D)
+{
+ return __builtin_ia32_vminph_v32hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_ph (__mmask32 __A, __m512h __B, __m512h __C)
+{
+ return __builtin_ia32_vminph_v32hf_mask (__B, __C,
+ _mm512_setzero_ph (), __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_vmaxph_v32hf_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_round_ph (__m512h __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_vminph_v32hf_mask_round (__A, __B,
+ _mm512_setzero_ph (),
+ (__mmask32) -1, __C);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ __m512h __D, const int __E)
+{
+ return __builtin_ia32_vminph_v32hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_round_ph (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_vminph_v32hf_mask_round (__B, __C,
+ _mm512_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm512_max_round_ph(A, B, C) \
+ (__builtin_ia32_vmaxph_v32hf_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_max_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_vmaxph_v32hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_max_round_ph(A, B, C, D) \
+ (__builtin_ia32_vmaxph_v32hf_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+
+#define _mm512_min_round_ph(A, B, C) \
+ (__builtin_ia32_vminph_v32hf_mask_round ((A), (B), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (C)))
+
+#define _mm512_mask_min_round_ph(A, B, C, D, E) \
+ (__builtin_ia32_vminph_v32hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm512_maskz_min_round_ph(A, B, C, D) \
+ (__builtin_ia32_vminph_v32hf_mask_round ((B), (C), \
+ _mm512_setzero_ph (), \
+ (A), (D)))
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsic vmaxsh vminsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_sh (__m128h __A, __m128h __B)
+{
+ __A[0] = __A[0] > __B[0] ? __A[0] : __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vmaxsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vmaxsh_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_sh (__m128h __A, __m128h __B)
+{
+ __A[0] = __A[0] < __B[0] ? __A[0] : __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vminsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vminsh_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vmaxsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vmaxsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vmaxsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vminsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vminsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vminsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm_max_round_sh(A, B, C) \
+ (__builtin_ia32_vmaxsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_max_round_sh(A, B, C, D, E) \
+ (__builtin_ia32_vmaxsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_max_round_sh(A, B, C, D) \
+ (__builtin_ia32_vmaxsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#define _mm_min_round_sh(A, B, C) \
+ (__builtin_ia32_vminsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_min_round_sh(A, B, C, D, E) \
+ (__builtin_ia32_vminsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_min_round_sh(A, B, C, D) \
+ (__builtin_ia32_vminsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
@@ -211,6 +211,103 @@ _mm256_maskz_div_ph (__mmask16 __A, __m256h __B, __m256h __C)
_mm256_setzero_ph (), __A);
}
+/* Intrinsics v[max,min]ph. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_ph (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vmaxph_v8hf_mask (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_max_ph (__m256h __A, __m256h __B)
+{
+ return __builtin_ia32_vmaxph_v16hf_mask (__A, __B,
+ _mm256_setzero_ph (),
+ (__mmask16) -1);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vmaxph_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D)
+{
+ return __builtin_ia32_vmaxph_v16hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_ph (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vmaxph_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_ph (__mmask16 __A, __m256h __B, __m256h __C)
+{
+ return __builtin_ia32_vmaxph_v16hf_mask (__B, __C,
+ _mm256_setzero_ph (), __A);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_ph (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vminph_v8hf_mask (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_min_ph (__m256h __A, __m256h __B)
+{
+ return __builtin_ia32_vminph_v16hf_mask (__A, __B,
+ _mm256_setzero_ph (),
+ (__mmask16) -1);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vminph_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D)
+{
+ return __builtin_ia32_vminph_v16hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_ph (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vminph_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_ph (__mmask16 __A, __m256h __B, __m256h __C)
+{
+ return __builtin_ia32_vminph_v16hf_mask (__B, __C,
+ _mm256_setzero_ph (), __A);
+}
+
#ifdef __DISABLE_AVX512FP16VL__
#undef __DISABLE_AVX512FP16VL__
#pragma GCC pop_options
@@ -1304,9 +1304,11 @@ DEF_FUNCTION_TYPE (UINT8, PV2DI, PCV2DI, PCVOID)
# FP16 builtins
DEF_FUNCTION_TYPE (V8HF, V8HI)
+DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI)
@@ -2791,6 +2791,14 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask, "__b
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask, "__builtin_ia32_vsubsh_v8hf_mask", IX86_BUILTIN_VSUBSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask, "__builtin_ia32_vmulsh_v8hf_mask", IX86_BUILTIN_VMULSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask, "__builtin_ia32_vdivsh_v8hf_mask", IX86_BUILTIN_VDIVSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv8hf3_mask, "__builtin_ia32_vmaxph_v8hf_mask", IX86_BUILTIN_VMAXPH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv16hf3_mask, "__builtin_ia32_vmaxph_v16hf_mask", IX86_BUILTIN_VMAXPH_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv32hf3_mask, "__builtin_ia32_vmaxph_v32hf_mask", IX86_BUILTIN_VMAXPH_V32HF_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv8hf3_mask, "__builtin_ia32_vminph_v8hf_mask", IX86_BUILTIN_VMINPH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv16hf3_mask, "__builtin_ia32_vminph_v16hf_mask", IX86_BUILTIN_VMINPH_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask, "__builtin_ia32_vminph_v32hf_mask", IX86_BUILTIN_VMINPH_V32HF_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask, "__builtin_ia32_vmaxsh_v8hf_mask", IX86_BUILTIN_VMAXSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask, "__builtin_ia32_vminsh_v8hf_mask", IX86_BUILTIN_VMINSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
@@ -3000,6 +3008,10 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask_round
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask_round, "__builtin_ia32_vsubsh_v8hf_mask_round", IX86_BUILTIN_VSUBSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask_round, "__builtin_ia32_vmulsh_v8hf_mask_round", IX86_BUILTIN_VMULSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask_round, "__builtin_ia32_vdivsh_v8hf_mask_round", IX86_BUILTIN_VDIVSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv32hf3_mask_round, "__builtin_ia32_vmaxph_v32hf_mask_round", IX86_BUILTIN_VMAXPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask_round, "__builtin_ia32_vminph_v32hf_mask_round", IX86_BUILTIN_VMINPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask_round, "__builtin_ia32_vmaxsh_v8hf_mask_round", IX86_BUILTIN_VMAXSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask_round, "__builtin_ia32_vminsh_v8hf_mask_round", IX86_BUILTIN_VMINSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -9349,12 +9349,14 @@ ix86_expand_args_builtin (const struct builtin_description *d,
case FLOAT128_FTYPE_FLOAT128_FLOAT128:
case V16QI_FTYPE_V16QI_V16QI:
case V16QI_FTYPE_V8HI_V8HI:
+ case V16HF_FTYPE_V16HF_V16HF:
case V16SF_FTYPE_V16SF_V16SF:
case V8QI_FTYPE_V8QI_V8QI:
case V8QI_FTYPE_V4HI_V4HI:
case V8HI_FTYPE_V8HI_V8HI:
case V8HI_FTYPE_V16QI_V16QI:
case V8HI_FTYPE_V4SI_V4SI:
+ case V8HF_FTYPE_V8HF_V8HF:
case V8SF_FTYPE_V8SF_V8SF:
case V8SF_FTYPE_V8SF_V8SI:
case V8DF_FTYPE_V8DF_V8DF:
@@ -2384,11 +2384,12 @@ (define_insn "*sse_vmrsqrtv4sf2"
(set_attr "mode" "SF")])
(define_expand "<code><mode>3<mask_name><round_saeonly_name>"
- [(set (match_operand:VF 0 "register_operand")
- (smaxmin:VF
- (match_operand:VF 1 "<round_saeonly_nimm_predicate>")
- (match_operand:VF 2 "<round_saeonly_nimm_predicate>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
+ [(set (match_operand:VFH 0 "register_operand")
+ (smaxmin:VFH
+ (match_operand:VFH 1 "<round_saeonly_nimm_predicate>")
+ (match_operand:VFH 2 "<round_saeonly_nimm_predicate>")))]
+ "TARGET_SSE && <mask_mode512bit_condition>
+ && <round_saeonly_mode512bit_condition>"
{
if (!flag_finite_math_only || flag_signed_zeros)
{
@@ -2409,13 +2410,14 @@ (define_expand "<code><mode>3<mask_name><round_saeonly_name>"
;; are undefined in this condition, we're certain this is correct.
(define_insn "*<code><mode>3<mask_name><round_saeonly_name>"
- [(set (match_operand:VF 0 "register_operand" "=x,v")
- (smaxmin:VF
- (match_operand:VF 1 "<round_saeonly_nimm_predicate>" "%0,v")
- (match_operand:VF 2 "<round_saeonly_nimm_predicate>" "xBm,<round_saeonly_constraint>")))]
+ [(set (match_operand:VFH 0 "register_operand" "=x,v")
+ (smaxmin:VFH
+ (match_operand:VFH 1 "<round_saeonly_nimm_predicate>" "%0,v")
+ (match_operand:VFH 2 "<round_saeonly_nimm_predicate>" "xBm,<round_saeonly_constraint>")))]
"TARGET_SSE
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))
- && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
+ && <mask_mode512bit_condition>
+ && <round_saeonly_mode512bit_condition>"
"@
<maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
v<maxmin_float><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -2432,13 +2434,14 @@ (define_insn "*<code><mode>3<mask_name><round_saeonly_name>"
;; presence of -0.0 and NaN.
(define_insn "ieee_<ieee_maxmin><mode>3<mask_name><round_saeonly_name>"
- [(set (match_operand:VF 0 "register_operand" "=x,v")
- (unspec:VF
- [(match_operand:VF 1 "register_operand" "0,v")
- (match_operand:VF 2 "<round_saeonly_nimm_predicate>" "xBm,<round_saeonly_constraint>")]
+ [(set (match_operand:VFH 0 "register_operand" "=x,v")
+ (unspec:VFH
+ [(match_operand:VFH 1 "register_operand" "0,v")
+ (match_operand:VFH 2 "<round_saeonly_nimm_predicate>" "xBm,<round_saeonly_constraint>")]
IEEE_MAXMIN))]
"TARGET_SSE
- && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
+ && <mask_mode512bit_condition>
+ && <round_saeonly_mode512bit_condition>"
"@
<ieee_maxmin><ssemodesuffix>\t{%2, %0|%0, %2}
v<ieee_maxmin><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -2473,11 +2476,11 @@ (define_insn "*ieee_<ieee_maxmin><mode>3"
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_vm<code><mode>3<mask_scalar_name><round_saeonly_scalar_name>"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (smaxmin:VF_128
- (match_operand:VF_128 1 "register_operand" "0,v")
- (match_operand:VF_128 2 "nonimmediate_operand" "xm,<round_saeonly_scalar_constraint>"))
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (smaxmin:VFH_128
+ (match_operand:VFH_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 2 "nonimmediate_operand" "xm,<round_saeonly_scalar_constraint>"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
@@ -187,7 +187,9 @@ (define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimm
(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(<MODE>mode == V16SFmode
|| <MODE>mode == V8DFmode
|| <MODE>mode == V8DImode
- || <MODE>mode == V16SImode)")
+ || <MODE>mode == V16SImode
+ || <MODE>mode == V32HFmode)")
+
(define_subst_attr "round_saeonly_modev8sf_condition" "round_saeonly" "1" "(<MODE>mode == V8SFmode)")
(define_subst "round_saeonly"
@@ -694,6 +694,10 @@
#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
@@ -711,6 +711,10 @@
#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
@@ -676,6 +676,10 @@ test_2 (_mm_add_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_sub_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_mul_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_div_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_max_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm512_min_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_max_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -684,6 +688,10 @@ test_3 (_mm_maskz_add_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
test_3 (_mm_maskz_sub_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
test_3 (_mm_maskz_mul_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
test_3 (_mm_maskz_div_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
+test_3 (_mm512_maskz_max_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm512_maskz_min_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -692,6 +700,10 @@ test_4 (_mm_mask_add_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
test_4 (_mm_mask_sub_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
test_4 (_mm_mask_mul_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
test_4 (_mm_mask_div_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
+test_4 (_mm512_mask_max_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
@@ -781,6 +781,10 @@ test_2 (_mm_add_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_sub_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_mul_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_div_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_max_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm512_min_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_max_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -789,6 +793,10 @@ test_3 (_mm_maskz_add_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_sub_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_mul_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_div_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm512_maskz_max_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm512_maskz_min_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -797,6 +805,10 @@ test_4 (_mm_mask_add_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_sub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_mul_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_div_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm512_mask_max_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
@@ -712,6 +712,10 @@
#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)