@@ -985,6 +985,256 @@ _mm_maskz_min_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
#endif /* __OPTIMIZE__ */
+/* vcmpph */
+#ifdef __OPTIMIZE
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_ph_mask (__m512h __A, __m512h __B, const int __C)
+{
+ return (__mmask32) __builtin_ia32_vcmpph_v32hf_mask (__A, __B, __C,
+ (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D)
+{
+ return (__mmask32) __builtin_ia32_vcmpph_v32hf_mask (__B, __C, __D,
+ __A);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_round_ph_mask (__m512h __A, __m512h __B, const int __C,
+ const int __D)
+{
+ return (__mmask32) __builtin_ia32_vcmpph_v32hf_mask_round (__A, __B,
+ __C, (__mmask32) -1,
+ __D);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_round_ph_mask (__mmask32 __A, __m512h __B, __m512h __C,
+ const int __D, const int __E)
+{
+ return (__mmask32) __builtin_ia32_vcmpph_v32hf_mask_round (__B, __C,
+ __D, __A,
+ __E);
+}
+
+#else
+#define _mm512_cmp_ph_mask(A, B, C) \
+ (__builtin_ia32_vcmpph_v32hf_mask ((A), (B), (C), (-1)))
+
+#define _mm512_mask_cmp_ph_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpph_v32hf_mask ((B), (C), (D), (A)))
+
+#define _mm512_cmp_round_ph_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpph_v32hf_mask_round ((A), (B), (C), (-1), (D)))
+
+#define _mm512_mask_cmp_round_ph_mask(A, B, C, D, E) \
+ (__builtin_ia32_vcmpph_v32hf_mask_round ((B), (C), (D), (A), (E)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcmpsh. */
+#ifdef __OPTIMIZE__
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_sh_mask (__m128h __A, __m128h __B, const int __C)
+{
+ return (__mmask8)
+ __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B,
+ __C, (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_sh_mask (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return (__mmask8)
+ __builtin_ia32_vcmpsh_v8hf_mask_round (__B, __C,
+ __D, __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_round_sh_mask (__m128h __A, __m128h __B, const int __C,
+ const int __D)
+{
+ return (__mmask8) __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B,
+ __C, (__mmask8) -1,
+ __D);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_round_sh_mask (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D, const int __E)
+{
+ return (__mmask8) __builtin_ia32_vcmpsh_v8hf_mask_round (__B, __C,
+ __D, __A,
+ __E);
+}
+
+#else
+#define _mm_cmp_sh_mask(A, B, C) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((A), (B), (C), (-1), \
+ (_MM_FROUND_CUR_DIRECTION)))
+
+#define _mm_mask_cmp_sh_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((B), (C), (D), (A), \
+ (_MM_FROUND_CUR_DIRECTION)))
+
+#define _mm_cmp_round_sh_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((A), (B), (C), (-1), (D)))
+
+#define _mm_mask_cmp_round_sh_mask(A, B, C, D, E) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((B), (C), (D), (A), (E)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcomish. */
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comieq_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_EQ_OS,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comilt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_LT_OS,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comile_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_LE_OS,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comigt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_GT_OS,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comige_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_GE_OS,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comineq_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_NEQ_US,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomieq_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_EQ_OQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomilt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_LT_OQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomile_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_LE_OQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomigt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_GT_OQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomige_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_GE_OQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomineq_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, _CMP_NEQ_UQ,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comi_sh (__m128h __A, __m128h __B, const int __P)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, __P,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comi_round_sh (__m128h __A, __m128h __B, const int __P, const int __R)
+{
+ return __builtin_ia32_vcmpsh_v8hf_mask_round (__A, __B, __P,
+ (__mmask8) -1,__R);
+}
+
+#else
+#define _mm_comi_round_sh(A, B, P, R) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((A), (B), (P), (__mmask8) (-1), (R)))
+#define _mm_comi_sh(A, B, P) \
+ (__builtin_ia32_vcmpsh_v8hf_mask_round ((A), (B), (P), (__mmask8) (-1), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
@@ -308,6 +308,56 @@ _mm256_maskz_min_ph (__mmask16 __A, __m256h __B, __m256h __C)
_mm256_setzero_ph (), __A);
}
+/* vcmpph */
+#ifdef __OPTIMIZE
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_ph_mask (__m128h __A, __m128h __B, const int __C)
+{
+ return (__mmask8) __builtin_ia32_vcmpph_v8hf_mask (__A, __B, __C,
+ (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_ph_mask (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return (__mmask8) __builtin_ia32_vcmpph_v8hf_mask (__B, __C, __D, __A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_ph_mask (__m256h __A, __m256h __B, const int __C)
+{
+ return (__mmask16) __builtin_ia32_vcmpph_v16hf_mask (__A, __B, __C,
+ (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_ph_mask (__mmask16 __A, __m256h __B, __m256h __C,
+ const int __D)
+{
+ return (__mmask16) __builtin_ia32_vcmpph_v16hf_mask (__B, __C, __D,
+ __A);
+}
+
+#else
+#define _mm_cmp_ph_mask(A, B, C) \
+ (__builtin_ia32_vcmpph_v8hf_mask ((A), (B), (C), (-1)))
+
+#define _mm_mask_cmp_ph_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpph_v8hf_mask ((B), (C), (D), (A)))
+
+#define _mm256_cmp_ph_mask(A, B, C) \
+ (__builtin_ia32_vcmpph_v16hf_mask ((A), (B), (C), (-1)))
+
+#define _mm256_mask_cmp_ph_mask(A, B, C, D) \
+ (__builtin_ia32_vcmpph_v16hf_mask ((B), (C), (D), (A)))
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16VL__
#undef __DISABLE_AVX512FP16VL__
#pragma GCC pop_options
@@ -1306,10 +1306,15 @@ DEF_FUNCTION_TYPE (UINT8, PV2DI, PCV2DI, PCVOID)
DEF_FUNCTION_TYPE (V8HF, V8HI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT)
+DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI)
+DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI, INT)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI, INT)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF)
+DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT)
+DEF_FUNCTION_TYPE (USI, V32HF, V32HF, INT, USI)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI)
+DEF_FUNCTION_TYPE (USI, V32HF, V32HF, INT, USI, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI, INT)
@@ -2799,6 +2799,9 @@ BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv16hf
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask, "__builtin_ia32_vminph_v32hf_mask", IX86_BUILTIN_VMINPH_V32HF_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask, "__builtin_ia32_vmaxsh_v8hf_mask", IX86_BUILTIN_VMAXSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask, "__builtin_ia32_vminsh_v8hf_mask", IX86_BUILTIN_VMINSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_cmpv8hf3_mask, "__builtin_ia32_vcmpph_v8hf_mask", IX86_BUILTIN_VCMPPH_V8HF_MASK, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_cmpv16hf3_mask, "__builtin_ia32_vcmpph_v16hf_mask", IX86_BUILTIN_VCMPPH_V16HF_MASK, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask, "__builtin_ia32_vcmpph_v32hf_mask", IX86_BUILTIN_VCMPPH_V32HF_MASK, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI)
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
@@ -3012,6 +3015,8 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_smaxv32hf3_mask_round, "__builti
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_sminv32hf3_mask_round, "__builtin_ia32_vminph_v32hf_mask_round", IX86_BUILTIN_VMINPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsmaxv8hf3_mask_round, "__builtin_ia32_vmaxsh_v8hf_mask_round", IX86_BUILTIN_VMAXSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask_round, "__builtin_ia32_vminsh_v8hf_mask_round", IX86_BUILTIN_VMINSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask_round, "__builtin_ia32_vcmpph_v32hf_mask_round", IX86_BUILTIN_VCMPPH_V32HF_MASK_ROUND, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmcmpv8hf3_mask_round, "__builtin_ia32_vcmpsh_v8hf_mask_round", IX86_BUILTIN_VCMPSH_V8HF_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -9821,14 +9821,17 @@ ix86_expand_args_builtin (const struct builtin_description *d,
case UQI_FTYPE_V8SI_V8SI_INT_UQI:
case QI_FTYPE_V4DF_V4DF_INT_UQI:
case QI_FTYPE_V8SF_V8SF_INT_UQI:
+ case UHI_FTYPE_V16HF_V16HF_INT_UHI:
case UQI_FTYPE_V2DI_V2DI_INT_UQI:
case UQI_FTYPE_V4SI_V4SI_INT_UQI:
case UQI_FTYPE_V2DF_V2DF_INT_UQI:
case UQI_FTYPE_V4SF_V4SF_INT_UQI:
+ case UQI_FTYPE_V8HF_V8HF_INT_UQI:
case UDI_FTYPE_V64QI_V64QI_INT_UDI:
case USI_FTYPE_V32QI_V32QI_INT_USI:
case UHI_FTYPE_V16QI_V16QI_INT_UHI:
case USI_FTYPE_V32HI_V32HI_INT_USI:
+ case USI_FTYPE_V32HF_V32HF_INT_USI:
case UHI_FTYPE_V16HI_V16HI_INT_UHI:
case UQI_FTYPE_V8HI_V8HI_INT_UQI:
nargs = 4;
@@ -10112,6 +10115,9 @@ ix86_expand_args_builtin (const struct builtin_description *d,
case CODE_FOR_avx512f_cmpv16sf3_mask:
case CODE_FOR_avx512f_vmcmpv2df3_mask:
case CODE_FOR_avx512f_vmcmpv4sf3_mask:
+ case CODE_FOR_avx512bw_cmpv32hf3_mask:
+ case CODE_FOR_avx512vl_cmpv16hf3_mask:
+ case CODE_FOR_avx512fp16_cmpv8hf3_mask:
error ("the last argument must be a 5-bit immediate");
return const0_rtx;
@@ -10532,6 +10538,8 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
+ case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
+ case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
nargs_constant = 3;
nargs = 5;
break;
@@ -10587,6 +10595,8 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case CODE_FOR_avx512f_cmpv16sf3_mask_round:
case CODE_FOR_avx512f_vmcmpv2df3_mask_round:
case CODE_FOR_avx512f_vmcmpv4sf3_mask_round:
+ case CODE_FOR_avx512f_vmcmpv8hf3_mask_round:
+ case CODE_FOR_avx512bw_cmpv32hf3_mask_round:
error ("the immediate argument must be a 5-bit immediate");
return const0_rtx;
default:
@@ -1252,7 +1252,7 @@ (define_mode_attr ssevecmodesuffix [(SF "ps") (DF "pd")])
;; SSE vector mode corresponding to a scalar mode
(define_mode_attr ssevecmode
- [(QI "V16QI") (HI "V8HI") (SI "V4SI") (DI "V2DI") (SF "V4SF") (DF "V2DF")])
+ [(QI "V16QI") (HI "V8HI") (SI "V4SI") (DI "V2DI") (HF "V8HF") (SF "V4SF") (DF "V2DF")])
(define_mode_attr ssevecmodelower
[(QI "v16qi") (HI "v8hi") (SI "v4si") (DI "v2di") (SF "v4sf") (DF "v2df")])
@@ -230,13 +230,23 @@ (define_mode_iterator VMOVE
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
-;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline.
+;; All AVX-512{F,VL} vector modes without HF. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48_AVX512VL
[V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline.
+(define_mode_iterator V48H_AVX512VL
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ (V32HF "TARGET_AVX512FP16")
+ (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
+ (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
+ V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+
;; 1,2 byte AVX-512{BW,VL} vector modes. Supposed TARGET_AVX512BW baseline.
(define_mode_iterator VI12_AVX512VL
[V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
@@ -974,10 +984,10 @@ (define_mode_attr iptr
[(V64QI "b") (V32HI "w") (V16SI "k") (V8DI "q")
(V32QI "b") (V16HI "w") (V8SI "k") (V4DI "q")
(V16QI "b") (V8HI "w") (V4SI "k") (V2DI "q")
- (V16SF "k") (V8DF "q")
- (V8SF "k") (V4DF "q")
- (V4SF "k") (V2DF "q")
- (SF "k") (DF "q")])
+ (V32HF "w") (V16SF "k") (V8DF "q")
+ (V16HF "w") (V8SF "k") (V4DF "q")
+ (V8HF "w") (V4SF "k") (V2DF "q")
+ (HF "w") (SF "k") (DF "q")])
;; Mapping of vector modes to VPTERNLOG suffix
(define_mode_attr ternlogsuffix
@@ -1024,6 +1034,18 @@ (define_mode_attr sseintprefix
(V32QI "p") (V16HI "p") (V16HF "p")
(V64QI "p") (V32HI "p") (V32HF "p")])
+;; SSE prefix for integer and HF vector comparison.
+(define_mode_attr ssecmpintprefix
+ [(V2DI "p") (V2DF "")
+ (V4DI "p") (V4DF "")
+ (V8DI "p") (V8DF "")
+ (V4SI "p") (V4SF "")
+ (V8SI "p") (V8SF "")
+ (V16SI "p") (V16SF "")
+ (V16QI "p") (V8HI "p") (V8HF "")
+ (V32QI "p") (V16HI "p") (V16HF "")
+ (V64QI "p") (V32HI "p") (V32HF "")])
+
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
[(HF "sh") (SF "ss") (DF "sd")
@@ -3263,11 +3285,11 @@ (define_insn "<sse>_vmmaskcmp<mode>3"
(set_attr "mode" "<ssescalarmode>")])
(define_mode_attr cmp_imm_predicate
- [(V16SF "const_0_to_31_operand") (V8DF "const_0_to_31_operand")
+ [(V32HF "const_0_to_31_operand") (V16SF "const_0_to_31_operand") (V8DF "const_0_to_31_operand")
(V16SI "const_0_to_7_operand") (V8DI "const_0_to_7_operand")
- (V8SF "const_0_to_31_operand") (V4DF "const_0_to_31_operand")
+ (V16HF "const_0_to_31_operand") (V8SF "const_0_to_31_operand") (V4DF "const_0_to_31_operand")
(V8SI "const_0_to_7_operand") (V4DI "const_0_to_7_operand")
- (V4SF "const_0_to_31_operand") (V2DF "const_0_to_31_operand")
+ (V8HF "const_0_to_31_operand") (V4SF "const_0_to_31_operand") (V2DF "const_0_to_31_operand")
(V4SI "const_0_to_7_operand") (V2DI "const_0_to_7_operand")
(V32HI "const_0_to_7_operand") (V64QI "const_0_to_7_operand")
(V16HI "const_0_to_7_operand") (V32QI "const_0_to_7_operand")
@@ -3276,12 +3298,12 @@ (define_mode_attr cmp_imm_predicate
(define_insn "<avx512>_cmp<mode>3<mask_scalar_merge_name><round_saeonly_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(unspec:<avx512fmaskmode>
- [(match_operand:V48_AVX512VL 1 "register_operand" "v")
- (match_operand:V48_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
+ [(match_operand:V48H_AVX512VL 1 "register_operand" "v")
+ (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 3 "<cmp_imm_predicate>" "n")]
UNSPEC_PCMP))]
"TARGET_AVX512F && <round_saeonly_mode512bit_condition>"
- "v<sseintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
+ "v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
@@ -3428,8 +3450,8 @@ (define_insn "avx512f_vmcmp<mode>3<round_saeonly_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(and:<avx512fmaskmode>
(unspec:<avx512fmaskmode>
- [(match_operand:VF_128 1 "register_operand" "v")
- (match_operand:VF_128 2 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
+ [(match_operand:VFH_128 1 "register_operand" "v")
+ (match_operand:VFH_128 2 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP)
(const_int 1)))]
@@ -3444,8 +3466,8 @@ (define_insn "avx512f_vmcmp<mode>3_mask<round_saeonly_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(and:<avx512fmaskmode>
(unspec:<avx512fmaskmode>
- [(match_operand:VF_128 1 "register_operand" "v")
- (match_operand:VF_128 2 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
+ [(match_operand:VFH_128 1 "register_operand" "v")
+ (match_operand:VFH_128 2 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP)
(and:<avx512fmaskmode>
@@ -3461,10 +3483,10 @@ (define_insn "avx512f_vmcmp<mode>3_mask<round_saeonly_name>"
(define_insn "<sse>_<unord>comi<round_saeonly_name>"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
- (vec_select:MODEF
+ (vec_select:MODEFH
(match_operand:<ssevecmode> 0 "register_operand" "v")
(parallel [(const_int 0)]))
- (vec_select:MODEF
+ (vec_select:MODEFH
(match_operand:<ssevecmode> 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (<MODE>mode)"
@@ -698,6 +698,13 @@
#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+
+/* avx512fp16vlintrin.h */
+#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v16hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v16hf_mask(A, B, 1, D)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
@@ -715,6 +715,13 @@
#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+
+/* avx512fp16vlintrin.h */
+#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v16hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v16hf_mask(A, B, 1, D)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
@@ -286,6 +286,7 @@ test_2 (_mm_add_round_sd, __m128d, __m128d, __m128d, 9)
test_2 (_mm_add_round_ss, __m128, __m128, __m128, 9)
test_2 (_mm_cmp_sd_mask, __mmask8, __m128d, __m128d, 1)
test_2 (_mm_cmp_ss_mask, __mmask8, __m128, __m128, 1)
+test_2 (_mm_cmp_sh_mask, __mmask8, __m128h, __m128h, 1)
#ifdef __x86_64__
test_2 (_mm_cvt_roundi64_sd, __m128d, __m128d, long long, 9)
test_2 (_mm_cvt_roundi64_ss, __m128, __m128, long long, 9)
@@ -470,6 +471,7 @@ test_3 (_mm256_maskz_shldi_epi64, __m256i, __mmask8, __m256i, __m256i, 1)
test_3 (_mm_maskz_shldi_epi16, __m128i, __mmask8, __m128i, __m128i, 1)
test_3 (_mm_maskz_shldi_epi32, __m128i, __mmask8, __m128i, __m128i, 1)
test_3 (_mm_maskz_shldi_epi64, __m128i, __mmask8, __m128i, __m128i, 1)
+test_3 (_mm_mask_cmp_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1)
test_3v (_mm512_i32scatter_epi32, void *, __m512i, __m512i, 1)
test_3v (_mm512_i32scatter_epi64, void *, __m256i, __m512i, 1)
test_3v (_mm512_i32scatter_pd, void *, __m256i, __m512d, 1)
@@ -680,6 +682,11 @@ test_2 (_mm512_max_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_min_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm_max_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_cmp_ph_mask, __mmask32, __m512h, __m512h, 1)
+test_2 (_mm_comi_sh, int, __m128h, __m128h, 1)
+test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
+test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
+test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -692,6 +699,9 @@ test_3 (_mm512_maskz_max_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_min_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm512_mask_cmp_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1)
+test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
+test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -705,6 +715,12 @@ test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h,
test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+/* avx512fp16vlintrin.h */
+test_2 (_mm_cmp_ph_mask, __mmask8, __m128h, __m128h, 1)
+test_2 (_mm256_cmp_ph_mask, __mmask16, __m256h, __m256h, 1)
+test_3 (_mm_mask_cmp_ph_mask, __mmask8, __mmask8, __m128h, __m128h, 1)
+test_3 (_mm256_mask_cmp_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1)
+
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
@@ -457,6 +457,7 @@ test_2 (_mm256_shldi_epi64, __m256i, __m256i, __m256i, 1)
test_2 (_mm_shldi_epi16, __m128i, __m128i, __m128i, 1)
test_2 (_mm_shldi_epi32, __m128i, __m128i, __m128i, 1)
test_2 (_mm_shldi_epi64, __m128i, __m128i, __m128i, 1)
+test_2 (_mm_cmp_sh_mask, __mmask8, __m128h, __m128h, 1)
#ifdef __x86_64__
test_2 (_mm_cvt_roundi64_sd, __m128d, __m128d, long long, 9)
test_2 (_mm_cvt_roundi64_ss, __m128, __m128, long long, 9)
@@ -581,6 +582,7 @@ test_3 (_mm256_maskz_shldi_epi64, __m256i, __mmask8, __m256i, __m256i, 1)
test_3 (_mm_maskz_shldi_epi16, __m128i, __mmask8, __m128i, __m128i, 1)
test_3 (_mm_maskz_shldi_epi32, __m128i, __mmask8, __m128i, __m128i, 1)
test_3 (_mm_maskz_shldi_epi64, __m128i, __mmask8, __m128i, __m128i, 1)
+test_3 (_mm_mask_cmp_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1)
test_3v (_mm512_i32scatter_epi32, void *, __m512i, __m512i, 1)
test_3v (_mm512_i32scatter_epi64, void *, __m256i, __m512i, 1)
test_3v (_mm512_i32scatter_pd, void *, __m256i, __m512d, 1)
@@ -785,6 +787,11 @@ test_2 (_mm512_max_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_min_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm_max_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_cmp_ph_mask, __mmask32, __m512h, __m512h, 1)
+test_2 (_mm_comi_sh, int, __m128h, __m128h, 1)
+test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
+test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
+test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -797,6 +804,9 @@ test_3 (_mm512_maskz_max_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_min_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm512_mask_cmp_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1)
+test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
+test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
@@ -810,6 +820,12 @@ test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h,
test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+/* avx512fp16vlintrin.h */
+test_2 (_mm_cmp_ph_mask, __mmask8, __m128h, __m128h, 1)
+test_2 (_mm256_cmp_ph_mask, __mmask16, __m256h, __m256h, 1)
+test_3 (_mm_mask_cmp_ph_mask, __mmask8, __mmask8, __m128h, __m128h, 1)
+test_3 (_mm256_mask_cmp_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1)
+
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
@@ -716,6 +716,13 @@
#define __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vminph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmaxsh_v8hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vminsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+
+/* avx512fp16vlintrin.h */
+#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
+#define __builtin_ia32_vcmpph_v16hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v16hf_mask(A, B, 1, D)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)