@@ -1241,6 +1241,216 @@ _mm256_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
(__mmask8) __U,
__R);
}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epi32 (__m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epi32 (__m256i __W, __mmask8 __U, __m128h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epi32 (__mmask8 __U, __m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epi64 (__m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epi64 (__m256i __W, __mmask8 __U, __m128h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epi64 (__mmask8 __U, __m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epu32 (__m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epu32 (__m256i __W, __mmask8 __U, __m128h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epu32 (__mmask8 __U, __m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epu64 (__m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epu64 (__m256i __W, __mmask8 __U, __m128h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epu16 (__m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundph_epi16 (__m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
+ (__v16hi) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvttph2w256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -1946,6 +2156,7 @@ _mm256_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
(_mm_setzero_si128 ()), \
(__mmask8) (U), \
(R)))
+
#define _mm256_cvtt_roundpd_epu64(A, R) \
((__m256i) \
__builtin_ia32_cvttpd2uqq256_mask_round ((__v4df) (A), \
@@ -1975,6 +2186,130 @@ _mm256_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
(_mm256_setzero_si256 ()), \
(__mmask8) (-1), \
(R)))
+
+#define _mm256_mask_cvtt_roundph_epi32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epi32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2dq256_mask_round ((__v8hf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundph_epi64(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundph_epi64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epi64(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2qq256_mask_round ((__v8hf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundph_epu32(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundph_epu32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epu32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2udq256_mask_round ((__v8hf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundph_epu64(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundph_epu64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epu64(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uqq256_mask_round ((__v8hf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundph_epu16(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundph_epu16(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epu16(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundph_epi16(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundph_epi16(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundph_epi16(U, A, R)\
+ ((__m256i) \
+ __builtin_ia32_vcvttph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (U), \
+ (R)))
#endif
#ifdef __DISABLE_AVX10_2_256__
@@ -3355,6 +3355,12 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4dfv4si2_mask_
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4dfv4di2_mask_round, "__builtin_ia32_cvttpd2qq256_mask_round", IX86_BUILTIN_VCVTTPD2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4dfv4si2_mask_round, "__builtin_ia32_cvttpd2udq256_mask_round", IX86_BUILTIN_VCVTTPD2UDQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4dfv4di2_mask_round, "__builtin_ia32_cvttpd2uqq256_mask_round", IX86_BUILTIN_VCVTTPD2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv8si2_mask_round, "__builtin_ia32_vcvttph2dq256_mask_round", IX86_BUILTIN_VCVTTPH2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv4di2_mask_round, "__builtin_ia32_vcvttph2qq256_mask_round", IX86_BUILTIN_VCVTTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv8si2_mask_round, "__builtin_ia32_vcvttph2udq256_mask_round", IX86_BUILTIN_VCVTTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv4di2_mask_round, "__builtin_ia32_vcvttph2uqq256_mask_round", IX86_BUILTIN_VCVTTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2uw256_mask_round", IX86_BUILTIN_VCVTTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2w256_mask_round", IX86_BUILTIN_VCVTTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -7606,7 +7606,7 @@
(unspec:VI2H_AVX512VL
[(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512FP16"
+ "TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
"vcvttph2<vcvtt_suffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7616,7 +7616,7 @@
[(set (match_operand:VI2H_AVX512VL 0 "register_operand" "=v")
(any_fix:VI2H_AVX512VL
(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512FP16"
+ "TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
"vcvttph2<fixsuffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -7639,13 +7639,13 @@
}
})
-(define_insn "unspec_avx512fp16_fix<vcvtt_uns_suffix>_trunc<mode>2<mask_name>"
+(define_insn "unspec_avx512fp16_fix<vcvtt_uns_suffix>_trunc<mode>2<mask_name><round_saeonly_name>"
[(set (match_operand:VI4_128_8_256 0 "register_operand" "=v")
(unspec:VI4_128_8_256
[(match_operand:V8HF 1 "register_operand" "v")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512FP16 && TARGET_AVX512VL"
- "vcvttph2<vcvtt_suffix><sseintconvert>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "TARGET_AVX512FP16 && TARGET_AVX512VL && <round_saeonly_mode_condition>"
+ "vcvttph2<vcvtt_suffix><sseintconvert>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -275,6 +275,7 @@
|| <MODE>mode == V4DFmode
|| <MODE>mode == V4DImode
|| <MODE>mode == V8SImode
+ || <MODE>mode == V16HImode
|| <MODE>mode == V16HFmode)))")
(define_subst_attr "round_saeonly_applied" "round_saeonly" "false" "true")
@@ -879,6 +879,12 @@
#define __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
#include <wmmintrin.h>
#include <immintrin.h>
@@ -21,6 +21,24 @@
/* { dg-final { scan-assembler-times "vcvttpd2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvttpd2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvttpd2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2dq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2dq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2dq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2qq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2qq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2qq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2udq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2udq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2udq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uqq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uqq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uqq\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uw\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2uw\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
@@ -70,3 +88,31 @@ avx10_2_test_2 (void)
xi = _mm256_mask_cvtt_roundpd_epu64 (xi, m8, xd, _MM_FROUND_NO_EXC);
xi = _mm256_maskz_cvtt_roundpd_epu64 (m8, xd, _MM_FROUND_NO_EXC);
}
+
+void extern
+avx10_2_test_3 (void)
+{
+ xi = _mm256_cvtt_roundph_epi32 (hxh, 4);
+ xi = _mm256_mask_cvtt_roundph_epi32 (xi, m8, hxh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epi32 (m8, hxh, 8);
+
+ xi = _mm256_cvtt_roundph_epi64 (hxh, 4);
+ xi = _mm256_mask_cvtt_roundph_epi64 (xi, m8, hxh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epi64 (m8, hxh, 8);
+
+ xi = _mm256_cvtt_roundph_epu32 (hxh, 4);
+ xi = _mm256_mask_cvtt_roundph_epu32 (xi, m8, hxh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epu32 (m8, hxh, 8);
+
+ xi = _mm256_cvtt_roundph_epu64 (hxh, 4);
+ xi = _mm256_mask_cvtt_roundph_epu64 (xi, m8, hxh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epu64 (m8, hxh, 8);
+
+ xi = _mm256_cvtt_roundph_epu16 (xh, 4);
+ xi = _mm256_mask_cvtt_roundph_epu16 (xi, m16, xh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epu16 (m16, xh, 8);
+
+ xi = _mm256_cvtt_roundph_epi16 (xh, 4);
+ xi = _mm256_mask_cvtt_roundph_epi16 (xi, m16, xh, 8);
+ xi = _mm256_maskz_cvtt_roundph_epi16 (m16, xh, 8);
+}
@@ -886,5 +886,11 @@
#define __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
#include <x86intrin.h>
@@ -1050,6 +1050,12 @@ test_1 (_mm256_cvtt_roundpd_epi32, __m128i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epi64, __m256i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epu32, __m128i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epu64, __m256i, __m256d, 8)
+test_1 (_mm256_cvtt_roundph_epi32, __m256i, __m128d, 8)
+test_1 (_mm256_cvtt_roundph_epi64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundph_epu32, __m256i, __m128d, 8)
+test_1 (_mm256_cvtt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvtt_roundph_epi16, __m256i, __m256h, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1083,6 +1089,12 @@ test_2 (_mm256_maskz_cvtt_roundpd_epi32, __m128i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epi64, __m256i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epu32, __m128i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epu64, __m256i, __mmask8, __m256d, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi32, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1119,6 +1131,12 @@ test_3 (_mm256_mask_cvtt_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epi64, __m256i, __m256i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epu32, __m128i, __m128i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epu64, __m256i, __m256i, __mmask8, __m256d, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi32, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1092,6 +1092,12 @@ test_1 (_mm256_cvtt_roundpd_epi32, __m128i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epi64, __m256i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epu32, __m128i, __m256d, 8)
test_1 (_mm256_cvtt_roundpd_epu64, __m256i, __m256d, 8)
+test_1 (_mm256_cvtt_roundph_epi32, __m256i, __m128d, 8)
+test_1 (_mm256_cvtt_roundph_epi64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundph_epu32, __m256i, __m128d, 8)
+test_1 (_mm256_cvtt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvtt_roundph_epi16, __m256i, __m256h, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1125,6 +1131,12 @@ test_2 (_mm256_maskz_cvtt_roundpd_epi32, __m128i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epi64, __m256i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epu32, __m128i, __mmask8, __m256d, 8)
test_2 (_mm256_maskz_cvtt_roundpd_epu64, __m256i, __mmask8, __m256d, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi32, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1161,6 +1173,12 @@ test_3 (_mm256_mask_cvtt_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epi64, __m256i, __m256i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epu32, __m128i, __m128i, __mmask8, __m256d, 8)
test_3 (_mm256_mask_cvtt_roundpd_epu64, __m256i, __m256i, __mmask8, __m256d, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi32, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -861,6 +861,12 @@
#define __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttpd2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
From: "Hu, Lin1" <lin1.hu@intel.com> gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/sse.md (avx512fp16_fix<fixunssuffix>_trunc<mode>2<mask_name>): Extend round control for 256bit. (unspec_avx512fp16_fix<vcvtt_uns_suffix>_trunc<mode>2<mask_name>): Ditto. (avx512fp16_fix<fixunssuffix>_trunc<mode>2<mask_name><round_saeonly_name>): Add condition check. * config/i386/subst.md (round_saeonly_mode_condition): Add V16HI check for 256bit. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-2.c: Add test. --- gcc/config/i386/avx10_2roundingintrin.h | 335 ++++++++++++++++++ gcc/config/i386/i386-builtin.def | 6 + gcc/config/i386/sse.md | 10 +- gcc/config/i386/subst.md | 1 + gcc/testsuite/gcc.target/i386/avx-1.c | 6 + .../gcc.target/i386/avx10_2-rounding-2.c | 46 +++ gcc/testsuite/gcc.target/i386/sse-13.c | 6 + gcc/testsuite/gcc.target/i386/sse-14.c | 18 + gcc/testsuite/gcc.target/i386/sse-22.c | 18 + gcc/testsuite/gcc.target/i386/sse-23.c | 6 + 10 files changed, 447 insertions(+), 5 deletions(-)