@@ -1934,6 +1934,164 @@ _mm256_maskz_div_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
(__mmask8) __U,
__R);
}
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fcmadd_round_pch (__m256h __A, __m256h __B, __m256h __D, const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmaddcph256_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fcmadd_round_pch (__m256h __A, __mmask8 __U, __m256h __B,
+ __m256h __D, const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmaddcph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fcmadd_round_pch (__m256h __A, __m256h __B, __m256h __D,
+ __mmask8 __U, const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmaddcph256_mask3_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fcmadd_round_pch (__mmask8 __U, __m256h __A, __m256h __B,
+ __m256h __D, const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmaddcph256_maskz_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fcmul_round_pch (__m256h __A, __m256h __B, const int __R)
+{
+ return
+ (__m256h) __builtin_ia32_vfcmulcph256_round ((__v16hf) __A,
+ (__v16hf) __B,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fcmul_round_pch (__m256h __W, __mmask8 __U, __m256h __A,
+ __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fcmul_round_pch (__mmask8 __U, __m256h __A, __m256h __B,
+ const int __R)
+{
+ return (__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fixupimm_round_pd (__m256d __A, __m256d __B, __m256i __D,
+ const int __C, const int __R)
+{
+ return (__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4di) __D,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fixupimm_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
+ __m256i __D, const int __C, const int __R)
+{
+ return (__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4di) __D,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fixupimm_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+ __m256i __D, const int __C, const int __R)
+{
+ return (__m256d) __builtin_ia32_fixupimmpd256_maskz_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4di) __D,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fixupimm_round_ps (__m256 __A, __m256 __B, __m256i __D, const int __C,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8si) __D,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fixupimm_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
+ __m256i __D, const int __C, const int __R)
+{
+ return (__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8si) __D,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fixupimm_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+ __m256i __D, const int __C, const int __R)
+{
+ return (__m256) __builtin_ia32_fixupimmps256_maskz_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8si) __D,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -3088,8 +3246,97 @@ _mm256_maskz_div_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
(_mm256_setzero_ps ()), \
(__mmask8) (U), \
(R)))
+
+#define _mm256_fcmadd_round_pch(A, B, D, R) \
+ (__m256h) __builtin_ia32_vfcmaddcph256_round ((A), (B), (D), (R))
+
+#define _mm256_mask_fcmadd_round_pch(A, U, B, D, R) \
+ ((__m256h) __builtin_ia32_vfcmaddcph256_mask_round ((__v16hf)(A), \
+ (__v16hf)(B), \
+ (__v16hf)(D), \
+ (U), (R)))
+
+#define _mm256_mask3_fcmadd_round_pch(A, B, D, U, R) \
+ ((__m256h) __builtin_ia32_vfcmaddcph256_mask3_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_maskz_fcmadd_round_pch(U, A, B, D, R) \
+ ((__m256h) __builtin_ia32_vfcmaddcph256_maskz_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_fcmul_round_pch(A, B, R) \
+ ((__m256h) __builtin_ia32_vfcmulcph256_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (R)))
+
+#define _mm256_mask_fcmul_round_pch(W, U, A, B, R) \
+ ((__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_fcmul_round_pch(U, A, B, R) \
+ ((__m256h) __builtin_ia32_vfcmulcph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_setzero_ph ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_fixupimm_round_pd(A, B, D, C, R) \
+ ((__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4di) (D), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_fixupimm_round_pd(A, U, B, D, C, R)\
+ ((__m256d) __builtin_ia32_fixupimmpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4di) (D), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_fixupimm_round_pd(U, A, B, D, C, R)\
+ ((__m256d) __builtin_ia32_fixupimmpd256_maskz_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4di) (D), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_fixupimm_round_ps(A, B, D, C, R)\
+ ((__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8si) (D), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_fixupimm_round_ps(A, U, B, D, C, R)\
+ ((__m256) __builtin_ia32_fixupimmps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8si) (D), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_fixupimm_round_ps(U, A, B, D, C, R)\
+ ((__m256) __builtin_ia32_fixupimmps256_maskz_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8si) (D), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
#endif
+#define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R))
+#define _mm256_mask_cmul_round_pch(W, U, A, B, R) \
+ _mm256_mask_fcmul_round_pch ((W), (U), (A), (B), (R))
+#define _mm256_maskz_cmul_round_pch(U, A, B, R) \
+ _mm256_maskz_fcmul_round_pch ((U), (A), (B), (R))
+
#ifdef __DISABLE_AVX10_2_256__
#undef __DISABLE_AVX10_2_256__
#pragma GCC pop_options
@@ -1440,3 +1440,8 @@ DEF_FUNCTION_TYPE (V4DF, V4DI, V4DF, UQI, INT)
DEF_FUNCTION_TYPE (V8HF, V4DI, V8HF, UQI, INT)
DEF_FUNCTION_TYPE (V4SF, V4DI, V4SF, UQI, INT)
DEF_FUNCTION_TYPE (V16HF, V16HI, V16HF, UHI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UQI, INT)
+DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DI, INT, UQI, INT)
+DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SI, INT, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, INT)
@@ -3375,6 +3375,16 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtw2ph_v16hi_mask_
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv4df3_mask_round, "__builtin_ia32_divpd256_mask_round", IX86_BUILTIN_VDIVPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_divv16hf3_mask_round, "__builtin_ia32_divph256_mask_round", IX86_BUILTIN_VDIVPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv8sf3_mask_round, "__builtin_ia32_divps256_mask_round", IX86_BUILTIN_VDIVPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fma_fcmaddc_v16hf_round, "__builtin_ia32_vfcmaddcph256_round", IX86_BUILTIN_VFCMADDCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_mask1_round, "__builtin_ia32_vfcmaddcph256_mask_round", IX86_BUILTIN_VFCMADDCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_mask_round, "__builtin_ia32_vfcmaddcph256_mask3_round", IX86_BUILTIN_VFCMADDCPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmaddc_v16hf_maskz_round, "__builtin_ia32_vfcmaddcph256_maskz_round", IX86_BUILTIN_VFCMADDCPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmulc_v16hf_round, "__builtin_ia32_vfcmulcph256_round", IX86_BUILTIN_VFCMULCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fcmulc_v16hf_mask_round, "__builtin_ia32_vfcmulcph256_mask_round", IX86_BUILTIN_VFCMULCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv4df_mask_round, "__builtin_ia32_fixupimmpd256_mask_round", IX86_BUILTIN_VFIXUPIMMPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv4df_maskz_round, "__builtin_ia32_fixupimmpd256_maskz_round", IX86_BUILTIN_VFIXUPIMMPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv8sf_mask_round, "__builtin_ia32_fixupimmps256_mask_round", IX86_BUILTIN_VFIXUPIMMPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixupimmv8sf_maskz_round, "__builtin_ia32_fixupimmps256_maskz_round", IX86_BUILTIN_VFIXUPIMMPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -12426,6 +12426,7 @@ ix86_expand_round_builtin (const struct builtin_description *d,
nargs = 2;
break;
case V32HF_FTYPE_V32HF_V32HF_INT:
+ case V16HF_FTYPE_V16HF_V16HF_INT:
case V8HF_FTYPE_V8HF_V8HF_INT:
case V8HF_FTYPE_V8HF_INT_INT:
case V8HF_FTYPE_V8HF_UINT_INT:
@@ -12461,6 +12462,7 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V16SF_FTYPE_V16SI_V16SF_HI_INT:
case V16SI_FTYPE_V16SF_V16SI_HI_INT:
case V16SI_FTYPE_V16HF_V16SI_UHI_INT:
+ case V16HF_FTYPE_V16HF_V16HF_V16HF_INT:
case V16HF_FTYPE_V16SI_V16HF_UHI_INT:
case V16HI_FTYPE_V16HF_V16HI_UHI_INT:
case V8DF_FTYPE_V8SF_V8DF_QI_INT:
@@ -12507,6 +12509,7 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
+ case V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12552,7 +12555,9 @@ ix86_expand_round_builtin (const struct builtin_description *d,
nargs_constant = 4;
break;
case V8DF_FTYPE_V8DF_V8DF_V8DI_INT_QI_INT:
+ case V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SI_INT_HI_INT:
+ case V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DI_INT_QI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SI_INT_QI_INT:
nargs = 6;
@@ -13856,7 +13856,7 @@
(match_operand:<sseintvecmode> 3 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_FIXUPIMM))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_saeonly_mode_condition>"
"vfixupimm<ssemodesuffix>\t{%4, <round_saeonly_sd_mask_op5>%3, %2, %0<sd_mask_op5>|%0<sd_mask_op5>, %2, %3<round_saeonly_sd_mask_op5>, %4}";
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -13872,7 +13872,7 @@
UNSPEC_FIXUPIMM)
(match_dup 1)
(match_operand:<avx512fmaskmode> 5 "register_operand" "Yk")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_saeonly_mode_condition>"
"vfixupimm<ssemodesuffix>\t{%4, <round_saeonly_op6>%3, %2, %0%{%5%}|%0%{%5%}, %2, %3<round_saeonly_op6>, %4}";
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
@@ -899,6 +899,16 @@
#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) __builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) __builtin_ia32_divph256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) __builtin_ia32_divps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfcmaddcph256_round(A, B, C, D) __builtin_ia32_vfcmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmulcph256_round(A, B, C) __builtin_ia32_vfcmulcph256_round(A, B, 8)
+#define __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, E) __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_fixupimmpd256_mask_round(A, B, C, I, E, F) __builtin_ia32_fixupimmpd256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, I, E, F) __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_mask_round(A, B, C, I, E, F) __builtin_ia32_fixupimmps256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_maskz_round(A, B, C, I, E, F) __builtin_ia32_fixupimmps256_maskz_round(A, B, C, 1, E, 8)
#include <wmmintrin.h>
#include <immintrin.h>
@@ -15,6 +15,18 @@
/* { dg-final { scan-assembler-times "vdivps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vdivps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vdivps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfcmaddcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfcmaddcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vfcmaddcph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfcmulcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vfcmulcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vfcmulcph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vfixupimmpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfixupimmpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfixupimmpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfixupimmps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfixupimmps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfixupimmps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
@@ -56,3 +68,40 @@ avx10_2_test_2 (void)
x = _mm256_mask_div_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
x = _mm256_maskz_div_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
+
+void extern
+avx10_2_test_3 (void)
+{
+ xh = _mm256_fcmadd_round_pch (xh, xh, xh, 8);
+ xh = _mm256_mask_fcmadd_round_pch (xh, m8, xh, xh, 8);
+ xh = _mm256_mask3_fcmadd_round_pch (xh, xh, xh, m8, 8);
+ xh = _mm256_maskz_fcmadd_round_pch (m8, xh, xh, xh, 11);
+}
+
+void extern
+avx10_2_test_4 (void)
+{
+ xh = _mm256_fcmul_round_pch (xh, xh, 8);
+ xh = _mm256_mask_fcmul_round_pch (xh, m8, xh, xh, 8);
+ xh = _mm256_maskz_fcmul_round_pch (m8, xh, xh, 11);
+}
+
+void extern
+avx10_2_test_5 (void)
+{
+ xh = _mm256_cmul_round_pch (xh, xh, 8);
+ xh = _mm256_mask_cmul_round_pch (xh, m8, xh, xh, 8);
+ xh = _mm256_maskz_cmul_round_pch (m8, xh, xh, 11);
+}
+
+void extern
+avx10_2_test_6 (void)
+{
+ xd = _mm256_fixupimm_round_pd (xd, xd, xi, 3, _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_fixupimm_round_pd (xd, m8, xd, xi, 3, _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_fixupimm_round_pd (m8, xd, xd, xi, 3, _MM_FROUND_NO_EXC);
+
+ x = _mm256_fixupimm_round_ps (x, x, xi, 3, _MM_FROUND_NO_EXC);
+ x = _mm256_mask_fixupimm_round_ps (x, m8, x, xi, 3, _MM_FROUND_NO_EXC);
+ x = _mm256_maskz_fixupimm_round_ps (m8, x, x, xi, 3, _MM_FROUND_NO_EXC);
+}
@@ -906,5 +906,15 @@
#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) __builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) __builtin_ia32_divph256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) __builtin_ia32_divps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfcmaddcph256_round(A, B, C, D) __builtin_ia32_vfcmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmulcph256_round(A, B, C) __builtin_ia32_vfcmulcph256_round(A, B, 8)
+#define __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, E) __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_fixupimmpd256_mask_round(A, B, C, D, E, F) __builtin_ia32_fixupimmpd256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, D, E, F) __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_mask_round(A, B, C, D, E, F) __builtin_ia32_fixupimmps256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_maskz_round(A, B, C, D, E, F) __builtin_ia32_fixupimmps256_maskz_round(A, B, C, 1, E, 8)
#include <x86intrin.h>
@@ -1119,6 +1119,7 @@ test_2 (_mm256_maskz_cvt_roundepi16_ph, __m256h, __mmask16, __m256i, 8)
test_2 (_mm256_div_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_div_round_ph, __m256h, __m256h, __m256h, 9)
test_2 (_mm256_div_round_ps, __m256, __m256, __m256, 9)
+test_2 (_mm256_fcmul_round_pch, __m256h, __m256h, __m256h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1175,12 +1176,24 @@ test_3 (_mm256_mask_cvt_roundepi16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
test_3 (_mm256_maskz_div_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
test_3 (_mm256_maskz_div_round_ph, __m256h, __mmask8, __m256h, __m256h, 9)
test_3 (_mm256_maskz_div_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3 (_mm256_fcmadd_round_pch, __m256h, __m256h, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_fcmul_round_pch, __m256h, __mmask8, __m256h, __m256h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_3x (_mm256_fixupimm_round_pd, __m256d, __m256d, __m256d, __m256i, 3, 8)
+test_3x (_mm256_fixupimm_round_ps, __m256, __m256, __m256, __m256i, 3, 8)
test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4 (_mm256_mask_div_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_div_round_ph, __m256h, __m256h, __mmask8, __m256h, __m256h, 9)
test_4 (_mm256_mask_div_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_fcmadd_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fcmadd_round_pch, __m256h, __m256h, __m256h, __m256h, __mmask8, 9)
+test_4 (_mm256_maskz_fcmadd_round_pch, __m256h, __mmask8, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fcmul_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 8)
+test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
+test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
+test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
+test_4x (_mm256_mask_fixupimm_round_ps, __m256, __m256, __mmask8, __m256, __m256i, 3, 8)
@@ -1162,6 +1162,7 @@ test_2 (_mm256_maskz_cvt_roundepi16_ph, __m256h, __mmask16, __m256i, 8)
test_2 (_mm256_div_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_div_round_ph, __m256h, __m256h, __m256h, 9)
test_2 (_mm256_div_round_ps, __m256, __m256, __m256, 9)
+test_2 (_mm256_fcmul_round_pch, __m256h, __m256h, __m256h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1218,12 +1219,24 @@ test_3 (_mm256_mask_cvt_roundepi16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
test_3 (_mm256_maskz_div_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
test_3 (_mm256_maskz_div_round_ph, __m256h, __mmask8, __m256h, __m256h, 9)
test_3 (_mm256_maskz_div_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3 (_mm256_fcmadd_round_pch, __m256h, __m256h, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_fcmul_round_pch, __m256h, __mmask8, __m256h, __m256h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_3x (_mm256_fixupimm_round_pd, __m256d, __m256d, __m256d, __m256i, 3, 8)
+test_3x (_mm256_fixupimm_round_ps, __m256, __m256, __m256, __m256i, 3, 8)
test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4 (_mm256_mask_div_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
test_4 (_mm256_mask_div_round_ph, __m256h, __m256h, __mmask8, __m256h, __m256h, 9)
test_4 (_mm256_mask_div_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_fcmadd_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fcmadd_round_pch, __m256h, __m256h, __m256h, __m256h, __mmask8, 9)
+test_4 (_mm256_maskz_fcmadd_round_pch, __m256h, __mmask8, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fcmul_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 8)
+test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
+test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
+test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
+test_4x (_mm256_mask_fixupimm_round_ps, __m256, __m256, __mmask8, __m256, __m256i, 3, 8)
@@ -881,6 +881,16 @@
#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) __builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) __builtin_ia32_divph256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) __builtin_ia32_divps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfcmaddcph256_round(A, B, C, D) __builtin_ia32_vfcmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfcmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmulcph256_round(A, B, C) __builtin_ia32_vfcmulcph256_round(A, B, 8)
+#define __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, E) __builtin_ia32_vfcmulcph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_fixupimmpd256_mask_round(A, B, C, D, E, F) __builtin_ia32_fixupimmpd256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, D, E, F) __builtin_ia32_fixupimmpd256_maskz_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_mask_round(A, B, C, D, E, F) __builtin_ia32_fixupimmps256_mask_round(A, B, C, 1, E, 8)
+#define __builtin_ia32_fixupimmps256_maskz_round(A, B, C, D, E, F) __builtin_ia32_fixupimmps256_maskz_round(A, B, C, 1, E, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
From: "Hu, Lin1" <lin1.hu@intel.com> gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle V16HF_FTYPE_V16HF_V16HF_INT, V16HF_FTYPE_V16HF_V16HF_V16HF_INT, V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT, V4DF_FTYPE_V4DF_V4DF_V4DI_INT_UQI_INT, V8SF_FTYPE_V8SF_V8SF_V8SI_INT_UQI_INT. * config/i386/sse.md: (<avx512>_fixupimm<mode><sd_maskz_name><round_saeonly_name>): Add condition check. (<avx512>_fixupimm<mode>_mask<round_saeonly_name>): Ditto. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-3.c: New test. --- gcc/config/i386/avx10_2roundingintrin.h | 247 ++++++++++++++++++ gcc/config/i386/i386-builtin-types.def | 5 + gcc/config/i386/i386-builtin.def | 10 + gcc/config/i386/i386-expand.cc | 5 + gcc/config/i386/sse.md | 4 +- gcc/testsuite/gcc.target/i386/avx-1.c | 10 + .../gcc.target/i386/avx10_2-rounding-3.c | 49 ++++ gcc/testsuite/gcc.target/i386/sse-13.c | 10 + gcc/testsuite/gcc.target/i386/sse-14.c | 13 + gcc/testsuite/gcc.target/i386/sse-22.c | 13 + gcc/testsuite/gcc.target/i386/sse-23.c | 10 + 11 files changed, 374 insertions(+), 2 deletions(-)