@@ -1451,6 +1451,312 @@ _mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
(__mmask16) __U,
__R);
}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundps_epi32 (__m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundps_epi64 (__m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundps_epu32 (__m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtt_roundps_epu64 (__m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtt_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu32_ph (__m256i __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu32_ph (__m128h __W, __mmask8 __U, __m256i __A,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
+ (__v8hf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu32_ph (__mmask8 __U, __m256i __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu32_ps (__m256i __A, const int __R)
+{
+ return
+ (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
+ (__v8sf)
+ _mm256_undefined_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu32_ps (__m256 __W, __mmask8 __U, __m256i __A,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu32_ps (__mmask8 __U, __m256i __A, const int __R)
+{
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu64_pd (__m256i __A, const int __R)
+{
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu64_pd (__m256d __W, __mmask8 __U, __m256i __A,
+ const int __R)
+{
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu64_pd (__mmask8 __U, __m256i __A, const int __R)
+{
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu64_ph (__m256i __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu64_ph (__m128h __W, __mmask8 __U, __m256i __A,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
+ (__v8hf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu64_ph (__mmask8 __U, __m256i __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu64_ps (__m256i __A, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu64_ps (__m128 __W, __mmask8 __U, __m256i __A,
+ const int __R)
+{
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu64_ps (__mmask8 __U, __m256i __A, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -2310,6 +2616,192 @@ _mm256_maskz_cvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
(_mm256_setzero_si256 ()), \
(__mmask16) (U), \
(R)))
+
+#define _mm256_cvtt_roundps_epi32(A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundps_epi32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundps_epi32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundps_epi64(A, R) \
+ ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()),\
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundps_epi64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundps_epi64(U, A, R) \
+ ((__m256i) __builtin_ia32_cvttps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()),\
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundps_epu32(A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundps_epu32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundps_epu32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtt_roundps_epu64(A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtt_roundps_epu64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtt_roundps_epu64(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvttps2uqq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundepu32_ph(A, R) \
+ ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundepu32_ph(W, U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
+ (__v8hf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundepu32_ph(U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtudq2ph256_mask_round ((__v8si) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundepu32_ps(A, R) \
+ ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
+ (__v8sf) \
+ (_mm256_undefined_ps ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundepu32_ps(W, U, A, R) \
+ ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
+ (__v8sf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundepu32_ps(U, A, R) \
+ ((__m256) __builtin_ia32_cvtudq2ps256_mask_round ((__v8si) (A), \
+ (__v8sf) \
+ (_mm256_setzero_ps ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundepu64_pd(A, R) \
+ ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundepu64_pd(W, U, A, R) \
+ ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
+ (__v4df) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundepu64_pd(U, A, R) \
+ ((__m256d) __builtin_ia32_cvtuqq2pd256_mask_round ((__v4di) (A), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundepu64_ph(A, R) \
+ ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundepu64_ph(W, U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
+ (__v8hf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundepu64_ph(U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtuqq2ph256_mask_round ((__v4di) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundepu64_ps(A, R) \
+ ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
+ (__v4sf) \
+ (_mm_setzero_ps ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundepu64_ps(W, U, A, R) \
+ ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
+ (__v4sf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundepu64_ps(U, A, R) \
+ ((__m128) __builtin_ia32_cvtuqq2ps256_mask_round ((__v4di) (A), \
+ (__v4sf) \
+ (_mm_setzero_ps ()), \
+ (__mmask8) (U), \
+ (R)))
#endif
#ifdef __DISABLE_AVX10_2_256__
@@ -3361,6 +3361,15 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv4di2_mask_round, "__builtin_ia32_vcvttph2uqq256_mask_round", IX86_BUILTIN_VCVTTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fixuns_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2uw256_mask_round", IX86_BUILTIN_VCVTTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_avx512fp16_fix_truncv16hi2_mask_round, "__builtin_ia32_vcvttph2w256_mask_round", IX86_BUILTIN_VCVTTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2dq256_mask_round", IX86_BUILTIN_VCVTTPS2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fix_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2qq256_mask_round", IX86_BUILTIN_VCVTTPS2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv8sfv8si2_mask_round, "__builtin_ia32_cvttps2udq256_mask_round", IX86_BUILTIN_VCVTTPS2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_unspec_fixuns_truncv4sfv4di2_mask_round, "__builtin_ia32_cvttps2uqq256_mask_round", IX86_BUILTIN_VCVTTPS2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtudq2ph_v8si_mask_round, "__builtin_ia32_vcvtudq2ph256_mask_round", IX86_BUILTIN_VCVTUDQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SI_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv8siv8sf2_mask_round, "__builtin_ia32_cvtudq2ps256_mask_round", IX86_BUILTIN_VCVTUDQ2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SI_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4df2_mask_round, "__builtin_ia32_cvtuqq2pd256_mask_round", IX86_BUILTIN_VCVTUQQ2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DI_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtuqq2ph_v4di_mask_round, "__builtin_ia32_vcvtuqq2ph256_mask_round", IX86_BUILTIN_VCVTUQQ2PH256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V4DI_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4sf2_mask_round, "__builtin_ia32_cvtuqq2ps256_mask_round", IX86_BUILTIN_VCVTUQQ2PS256_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4DI_V4SF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -8401,7 +8401,7 @@
[(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v")
(unsigned_float:VF1_AVX512VL
(match_operand:<sseintvecmode> 1 "nonimmediate_operand" "<round_constraint>")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_mode_condition>"
"vcvtudq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8530,12 +8530,13 @@
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
-(define_insn "unspec_fix_truncv8sfv8si2<mask_name>"
+(define_insn "unspec_fix_truncv8sfv8si2<mask_name><round_saeonly_name>"
[(set (match_operand:V8SI 0 "register_operand" "=v")
- (unspec:V8SI [(match_operand:V8SF 1 "nonimmediate_operand" "vm")]
+ (unspec:V8SI [(match_operand:V8SF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT))]
- "TARGET_AVX && <mask_avx512vl_condition>"
- "vcvttps2dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "TARGET_AVX && <mask_avx512vl_condition>
+ && (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
+ "vcvttps2dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "<mask_prefix>")
(set_attr "mode" "OI")])
@@ -9488,12 +9489,12 @@
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
-(define_insn "fix<fixunssuffix>_trunc<ssePSmode2lower><mode>2<mask_name><round_saeonly_name>"
+(define_insn "fix<fixunssuffix>_trunc<ssePSmode2lower><mode>2<mask_name>"
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(any_fix:VI8_256_512
- (match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
- "vcvttps2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
+ (match_operand:<ssePSmode2> 1 "nonimmediate_operand" "vm")))]
+ "TARGET_AVX512DQ"
+ "vcvttps2<fixsuffix>qq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@@ -9648,13 +9649,13 @@
DONE;
})
-(define_insn "<mask_codefor>unspec_fixuns_trunc<mode><sseintvecmodelower>2<mask_name>"
+(define_insn "<mask_codefor>unspec_fixuns_trunc<mode><sseintvecmodelower>2<mask_name><round_saeonly_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VF1_128_256 1 "nonimmediate_operand" "vm")]
+ [(match_operand:VF1_128_256 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTTU))]
- "TARGET_AVX512VL"
- "vcvttps2udq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "TARGET_AVX512VL && <round_saeonly_mode_condition>"
+ "vcvttps2udq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseintvecmode2>")])
@@ -885,6 +885,15 @@
#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2dq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
#include <wmmintrin.h>
#include <immintrin.h>
@@ -39,6 +39,33 @@
/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvttph2w\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2dq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2dq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2dq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2qq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2qq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2qq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2udq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2udq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2udq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvttps2uqq\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2phy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2ph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2ph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2ps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2ps\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtudq2ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2pd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2pd\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2pd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2phy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2ph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2ph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2ps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2ps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtuqq2ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\n\]*%ymm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
@@ -116,3 +143,51 @@ avx10_2_test_3 (void)
xi = _mm256_mask_cvtt_roundph_epi16 (xi, m16, xh, 8);
xi = _mm256_maskz_cvtt_roundph_epi16 (m16, xh, 8);
}
+
+void extern
+avx10_2_test_4 (void)
+{
+ xi = _mm256_cvtt_roundps_epi32 (x, _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvtt_roundps_epi32 (xi, m8, x, _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvtt_roundps_epi32 (m8, x, _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvtt_roundps_epi64 (hx, _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvtt_roundps_epi64 (xi, m8, hx, _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvtt_roundps_epi64 (m8, hx, _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvtt_roundps_epu32 (x, _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvtt_roundps_epu32 (xi, m8, x, _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvtt_roundps_epu32 (m8, x, _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvtt_roundps_epu64 (hx, _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvtt_roundps_epu64 (xi, m8, hx, _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvtt_roundps_epu64 (m8, hx, _MM_FROUND_NO_EXC);
+}
+
+void extern
+avx10_2_test_5 (void)
+{
+ hxh = _mm256_cvt_roundepu32_ph (xi, 4);
+ hxh = _mm256_mask_cvt_roundepu32_ph (hxh, m8, xi, 8);
+ hxh = _mm256_maskz_cvt_roundepu32_ph (m8, xi, 11);
+
+ x = _mm256_cvt_roundepu32_ps (xi, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ x = _mm256_mask_cvt_roundepu32_ps (x, m8, xi, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ x = _mm256_maskz_cvt_roundepu32_ps (m8, xi, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+void extern
+avx10_2_test_6 (void)
+{
+ xd = _mm256_cvt_roundepu64_pd (xi, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_cvt_roundepu64_pd (xd, m8, xi, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_cvt_roundepu64_pd (m8, xi, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ hxh = _mm256_cvt_roundepu64_ph (xi, 4);
+ hxh = _mm256_mask_cvt_roundepu64_ph (hxh, m8, xi, 8);
+ hxh = _mm256_maskz_cvt_roundepu64_ph (m8, xi, 11);
+
+ hx = _mm256_cvt_roundepu64_ps (xi, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ hx = _mm256_mask_cvt_roundepu64_ps (hx, m8, xi, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ hx = _mm256_maskz_cvt_roundepu64_ps (m8, xi, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
@@ -892,5 +892,14 @@
#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2dq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
#include <x86intrin.h>
@@ -1056,6 +1056,14 @@ test_1 (_mm256_cvtt_roundph_epu32, __m256i, __m128d, 8)
test_1 (_mm256_cvtt_roundph_epu64, __m256i, __m128h, 8)
test_1 (_mm256_cvtt_roundph_epu16, __m256i, __m256h, 8)
test_1 (_mm256_cvtt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvtt_roundps_epi32, __m256i, __m256, 8)
+test_1 (_mm256_cvtt_roundps_epi64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundps_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundepu32_ph, __m128h, __m256i, 8)
+test_1 (_mm256_cvt_roundepu32_ps, __m256, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_pd, __m256d, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_ph, __m128h, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_ps, __m128, __m256i, 9)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1095,6 +1103,15 @@ test_2 (_mm256_maskz_cvtt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epi32, __m256i, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epi64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epu32, __m256i, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundepu32_ph, __m128h, __mmask8, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepu32_ps, __m256, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu64_pd, __m256d, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu64_ph, __m128h, __mmask8, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepu64_ps, __m128, __mmask8, __m256i, 9)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1137,6 +1154,15 @@ test_3 (_mm256_mask_cvtt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvtt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3 (_mm256_mask_cvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvtt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvtt_roundps_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvtt_roundps_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundepu32_ph, __m128h, __m128h, __mmask8, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepu32_ps, __m256, __m256, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu64_pd, __m256d, __m256d, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu64_ph, __m128h, __m128h, __mmask8, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepu64_ps, __m128, __m128, __mmask8, __m256i, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1098,6 +1098,15 @@ test_1 (_mm256_cvtt_roundph_epu32, __m256i, __m128d, 8)
test_1 (_mm256_cvtt_roundph_epu64, __m256i, __m128h, 8)
test_1 (_mm256_cvtt_roundph_epu16, __m256i, __m256h, 8)
test_1 (_mm256_cvtt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvtt_roundps_epi32, __m256i, __m256, 8)
+test_1 (_mm256_cvtt_roundps_epi64, __m256i, __m128h, 8)
+test_1 (_mm256_cvtt_roundps_epu32, __m256i, __m256, 8)
+test_1 (_mm256_cvtt_roundps_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundepu32_ph, __m128h, __m256i, 8)
+test_1 (_mm256_cvt_roundepu32_ps, __m256, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_pd, __m256d, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_ph, __m128h, __m256i, 9)
+test_1 (_mm256_cvt_roundepu64_ps, __m128, __m256i, 9)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1137,6 +1146,15 @@ test_2 (_mm256_maskz_cvtt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
test_2 (_mm256_maskz_cvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epi32, __m256i, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epi64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epu32, __m256i, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvtt_roundps_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundepu32_ph, __m128h, __mmask8, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepu32_ps, __m256, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu64_pd, __m256d, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu64_ph, __m128h, __mmask8, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepu64_ps, __m128, __mmask8, __m256i, 9)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1179,6 +1197,15 @@ test_3 (_mm256_mask_cvtt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvtt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3 (_mm256_mask_cvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvtt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvtt_roundps_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvtt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvtt_roundps_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundepu32_ph, __m128h, __m128h, __mmask8, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepu32_ps, __m256, __m256, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu64_pd, __m256d, __m256d, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu64_ph, __m128h, __m128h, __mmask8, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepu64_ps, __m128, __m128, __mmask8, __m256i, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -867,6 +867,15 @@
#define __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2uw256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvttph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvttph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2dq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvttps2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtudq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtudq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
From: "Hu, Lin1" <lin1.hu@intel.com> gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/sse.md (unspec_fix_truncv8sfv8si2<mask_name>): Extend rounding control. (<mask_codefor>fixuns_trunc<mode><sseintvecmodelower>2<mask_name>): Ditto. (<mask_codefor>floatuns<sseintvecmodelower><mode>2<mask_name><round_name>): Add condition check. (fix<fixunssuffix>_trunc<mode><sselongvecmodelower>2<mask_name><round_saeonly_name>): Remove round_saeonly_name. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-2.c: Add test. --- gcc/config/i386/avx10_2roundingintrin.h | 492 ++++++++++++++++++ gcc/config/i386/i386-builtin.def | 9 + gcc/config/i386/sse.md | 27 +- gcc/testsuite/gcc.target/i386/avx-1.c | 9 + .../gcc.target/i386/avx10_2-rounding-2.c | 75 +++ gcc/testsuite/gcc.target/i386/sse-13.c | 9 + gcc/testsuite/gcc.target/i386/sse-14.c | 26 + gcc/testsuite/gcc.target/i386/sse-22.c | 27 + gcc/testsuite/gcc.target/i386/sse-23.c | 9 + 9 files changed, 670 insertions(+), 13 deletions(-)