diff mbox series

[13/22] AVX10.2 ymm rounding: Support vfmaddcph and vfmaddsub{132, 231, 213}p{s, d, h} intrins

Message ID 20240814090159.422097-14-haochen.jiang@intel.com
State New
Headers show
Series Support AVX10.2 ymm rounding | expand

Commit Message

Haochen Jiang Aug. 14, 2024, 9:01 a.m. UTC
From: "Hu, Lin1" <lin1.hu@intel.com>

gcc/ChangeLog:

	* config/i386/avx10_2roundingintrin.h: New intrins.
	* config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE.
	* config/i386/i386-builtin.def (BDESC): Add new builtins.
	* config/i386/sse.md:
	(<avx512>_fmaddsub_<mode>_mask<round_name>): Add condition check.
	(<avx512>_fmaddsub_<mode>_mask3<round_name>): Ditto.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/avx-1.c: Add new builtin test.
	* gcc.target/i386/sse-13.c: Ditto.
	* gcc.target/i386/sse-14.c: Ditto.
	* gcc.target/i386/sse-22.c: Add new macro test.
	* gcc.target/i386/sse-23.c: Ditto.
	* gcc.target/i386/avx10_2-rounding-3.c: Add test.
---
 gcc/config/i386/avx10_2roundingintrin.h       | 238 ++++++++++++++++++
 gcc/config/i386/i386-builtin.def              |  13 +
 gcc/config/i386/sse.md                        |   4 +-
 gcc/testsuite/gcc.target/i386/avx-1.c         |  13 +
 .../gcc.target/i386/avx10_2-rounding-3.c      |  43 ++++
 gcc/testsuite/gcc.target/i386/sse-13.c        |  13 +
 gcc/testsuite/gcc.target/i386/sse-14.c        |  16 ++
 gcc/testsuite/gcc.target/i386/sse-22.c        |  15 ++
 gcc/testsuite/gcc.target/i386/sse-23.c        |  13 +
 9 files changed, 366 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
index 9015095144e..95e42410a10 100644
--- a/gcc/config/i386/avx10_2roundingintrin.h
+++ b/gcc/config/i386/avx10_2roundingintrin.h
@@ -2232,6 +2232,193 @@  _mm256_maskz_fmadd_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
 							  (__mmask8) __U,
 							  __R);
 }
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fmadd_round_pch (__m256h __A, __m256h __B, __m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddcph256_round ((__v16hf) __A,
+						      (__v16hf) __B,
+						      (__v16hf) __D,
+						      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmadd_round_pch (__m256h __A, __mmask16 __U, __m256h __B,
+			     __m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddcph256_mask_round ((__v16hf) __A,
+							   (__v16hf) __B,
+							   (__v16hf) __D,
+							   __U,
+							   __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmadd_round_pch (__m256h __A, __m256h __B, __m256h __D,
+			      __mmask16 __U, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddcph256_mask3_round ((__v16hf) __A,
+							    (__v16hf) __B,
+							    (__v16hf) __D,
+							    __U,
+							    __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmadd_round_pch (__mmask16 __U, __m256h __A, __m256h __B,
+			      __m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddcph256_maskz_round ((__v16hf) __A,
+							    (__v16hf) __B,
+							    (__v16hf) __D,
+							    __U,
+							    __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fmaddsub_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round ((__v4df) __A,
+							     (__v4df) __B,
+							     (__v4df) __D,
+							     (__mmask8) -1,
+							     __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmaddsub_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
+			       __m256d __D, const int __R)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round ((__v4df) __A,
+							     (__v4df) __B,
+							     (__v4df) __D,
+							     (__mmask8) __U,
+							     __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmaddsub_round_pd (__m256d __A, __m256d __B, __m256d __D,
+				__mmask8 __U, const int __R)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3_round ((__v4df) __A,
+							      (__v4df) __B,
+							      (__v4df) __D,
+							      (__mmask8) __U,
+							      __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmaddsub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+				__m256d __D, const int __R)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz_round ((__v4df) __A,
+							      (__v4df) __B,
+							      (__v4df) __D,
+							      (__mmask8) __U,
+							      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fmaddsub_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddsubph256_mask_round ((__v16hf) __A,
+							     (__v16hf) __B,
+							     (__v16hf) __D,
+							     (__mmask16) -1,
+							     __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmaddsub_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
+			       __m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddsubph256_mask_round ((__v16hf) __A,
+							     (__v16hf) __B,
+							     (__v16hf) __D,
+							     (__mmask16) __U,
+							     __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmaddsub_round_ph (__m256h __A, __m256h __B, __m256h __D,
+				__mmask16 __U, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddsubph256_mask3_round ((__v16hf) __A,
+							      (__v16hf) __B,
+							      (__v16hf) __D,
+							      (__mmask16) __U,
+							      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmaddsub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+				__m256h __D, const int __R)
+{
+  return (__m256h) __builtin_ia32_vfmaddsubph256_maskz_round ((__v16hf) __A,
+							      (__v16hf) __B,
+							      (__v16hf) __D,
+							      (__mmask16) __U,
+							      __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fmaddsub_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask_round ((__v8sf) __A,
+							    (__v8sf) __B,
+							    (__v8sf) __D,
+							    (__mmask8) -1,
+							    __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmaddsub_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
+			       __m256 __D, const int __R)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask_round ((__v8sf) __A,
+							    (__v8sf) __B,
+							    (__v8sf) __D,
+							    (__mmask8) __U,
+							    __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmaddsub_round_ps (__m256 __A, __m256 __B, __m256 __D,
+				__mmask8 __U, const int __R)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask3_round ((__v8sf) __A,
+							     (__v8sf) __B,
+							     (__v8sf) __D,
+							     (__mmask8) __U,
+							     __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmaddsub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+				__m256 __D, const int __R)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_maskz_round ((__v8sf) __A,
+							     (__v8sf) __B,
+							     (__v8sf) __D,
+							     (__mmask8) __U,
+							     __R);
+}
 #else
 #define _mm256_add_round_pd(A, B, R) \
   ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -3505,6 +3692,57 @@  _mm256_maskz_fmadd_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
 
 #define _mm256_maskz_fmadd_round_ps(U, A, B, D, R)   \
   ((__m256)__builtin_ia32_vfmaddps256_maskz_round (A, B, D, U, R))
+
+#define _mm256_fmadd_round_pch(A, B, D, R)	\
+  (__m256h) __builtin_ia32_vfmaddcph256_round ((A), (B), (D), (R))
+
+#define _mm256_mask_fmadd_round_pch(A, U, B, D, R) \
+  ((__m256h) __builtin_ia32_vfmaddcph256_mask_round ((__v16hf) (A), \
+						     (__v16hf) (B), \
+						     (__v16hf) (D), \
+						     (U), (R)))
+
+#define _mm256_mask3_fmadd_round_pch(A, B, D, U, R)	\
+  (__m256h) __builtin_ia32_vfmaddcph256_mask3_round ((A), (B), (D), (U), (R))
+
+#define _mm256_maskz_fmadd_round_pch(U, A, B, D, R)	\
+  (__m256h) __builtin_ia32_vfmaddcph256_maskz_round ((A), (B), (D), (U), (R))
+
+#define _mm256_fmaddsub_round_pd(A, B, D, R)		\
+  (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round (A, B, D, -1, R)
+
+#define _mm256_mask_fmaddsub_round_pd(A, U, B, D, R)    \
+  (__m256d) __builtin_ia32_vfmaddsubpd256_mask_round (A, B, D, U, R)
+
+#define _mm256_mask3_fmaddsub_round_pd(A, B, D, U, R)   \
+  (__m256d)__builtin_ia32_vfmaddsubpd256_mask3_round (A, B, D, U, R)
+
+#define _mm256_maskz_fmaddsub_round_pd(U, A, B, D, R)   \
+  (__m256d)__builtin_ia32_vfmaddsubpd256_maskz_round (A, B, D, U, R)
+
+#define _mm256_fmaddsub_round_ph(A, B, D, R)		  \
+  ((__m256h)__builtin_ia32_vfmaddsubph256_mask_round ((A), (B), (D), -1, (R)))
+
+#define _mm256_mask_fmaddsub_round_ph(A, U, B, D, R)	  \
+  ((__m256h)__builtin_ia32_vfmaddsubph256_mask_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_mask3_fmaddsub_round_ph(A, B, D, U, R)	  \
+  ((__m256h)__builtin_ia32_vfmaddsubph256_mask3_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_maskz_fmaddsub_round_ph(U, A, B, D, R)	  \
+  ((__m256h)__builtin_ia32_vfmaddsubph256_maskz_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_fmaddsub_round_ps(A, B, D, R)		\
+  (__m256)__builtin_ia32_vfmaddsubps256_mask_round (A, B, D, -1, R)
+
+#define _mm256_mask_fmaddsub_round_ps(A, U, B, D, R)    \
+  (__m256)__builtin_ia32_vfmaddsubps256_mask_round (A, B, D, U, R)
+
+#define _mm256_mask3_fmaddsub_round_ps(A, B, D, U, R)   \
+  (__m256)__builtin_ia32_vfmaddsubps256_mask3_round (A, B, D, U, R)
+
+#define _mm256_maskz_fmaddsub_round_ps(U, A, B, D, R)   \
+  (__m256)__builtin_ia32_vfmaddsubps256_maskz_round (A, B, D, U, R)
 #endif
 
 #define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R))
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 33e7573503f..74411b43973 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3394,6 +3394,19 @@  BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v16hf_maskz_roun
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_mask_round, "__builtin_ia32_vfmaddps256_mask_round", IX86_BUILTIN_VFMADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_mask3_round, "__builtin_ia32_vfmaddps256_mask3_round", IX86_BUILTIN_VFMADDPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmadd_v8sf_maskz_round, "__builtin_ia32_vfmaddps256_maskz_round", IX86_BUILTIN_VFMADDPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fma_fmaddc_v16hf_round, "__builtin_ia32_vfmaddcph256_round", IX86_BUILTIN_VFMADDCPH256_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_mask1_round, "__builtin_ia32_vfmaddcph256_mask_round", IX86_BUILTIN_VFMADDCPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_mask_round, "__builtin_ia32_vfmaddcph256_mask3_round", IX86_BUILTIN_VFMADDCPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddc_v16hf_maskz_round, "__builtin_ia32_vfmaddcph256_maskz_round", IX86_BUILTIN_VFMADDCPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_mask_round, "__builtin_ia32_vfmaddsubpd256_mask_round", IX86_BUILTIN_VFMADDSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_mask3_round, "__builtin_ia32_vfmaddsubpd256_mask3_round", IX86_BUILTIN_VFMADDSUBPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v4df_maskz_round, "__builtin_ia32_vfmaddsubpd256_maskz_round", IX86_BUILTIN_VFMADDSUBPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_mask_round, "__builtin_ia32_vfmaddsubph256_mask_round", IX86_BUILTIN_VFMADDSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_mask3_round, "__builtin_ia32_vfmaddsubph256_mask3_round", IX86_BUILTIN_VFMADDSUBPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v16hf_maskz_round, "__builtin_ia32_vfmaddsubph256_maskz_round", IX86_BUILTIN_VFMADDSUBPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_mask_round, "__builtin_ia32_vfmaddsubps256_mask_round", IX86_BUILTIN_VFMADDSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_mask3_round, "__builtin_ia32_vfmaddsubps256_mask3_round", IX86_BUILTIN_VFMADDSUBPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fmaddsub_v8sf_maskz_round, "__builtin_ia32_vfmaddsubps256_maskz_round", IX86_BUILTIN_VFMADDSUBPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
 
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index fe7520ac58a..52d996533ed 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -6178,7 +6178,7 @@ 
 	    UNSPEC_FMADDSUB)
 	  (match_dup 1)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F"
+  "TARGET_AVX512F && <round_mode_condition>"
   "@
    vfmaddsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfmaddsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6196,7 +6196,7 @@ 
 	    UNSPEC_FMADDSUB)
 	  (match_dup 3)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
-  "TARGET_AVX512F"
+  "TARGET_AVX512F && <round_mode_condition>"
   "vfmaddsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index 9e6e609108e..6b267905bdc 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -918,6 +918,19 @@ 
 #define __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddcph256_round(A, B, C, D) __builtin_ia32_vfmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, 8)
 
 #include <wmmintrin.h>
 #include <immintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
index e5a1831147c..a781b2ecb7a 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
@@ -39,6 +39,21 @@ 
 /* { dg-final { scan-assembler-times "vfmadd...ps\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
 /* { dg-final { scan-assembler-times "vfmadd231ps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
 /* { dg-final { scan-assembler-times "vfmadd...ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddcph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2  }  } */
+/* { dg-final { scan-assembler-times "vfmaddcph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...pd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...pd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub231pd\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...pd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ph\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ph\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub231ph\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ph\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ps\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub231ps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vfmaddsub...ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
 
 #include <immintrin.h>
 
@@ -136,3 +151,31 @@  avx10_2_test_7 (void)
   x = _mm256_mask3_fmadd_round_ps (x, x, x, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
   x = _mm256_maskz_fmadd_round_ps (m8, x, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
 }
+
+void extern
+avx10_2_test_8 (void)
+{
+  xh = _mm256_fmadd_round_pch (xh, xh, xh, 8);
+  xh = _mm256_mask_fmadd_round_pch (xh, m8, xh, xh, 8);
+  xh = _mm256_mask3_fmadd_round_pch (xh, xh, xh, m8, 8);
+  xh = _mm256_maskz_fmadd_round_pch (m8, xh, xh, xh, 11);
+}
+
+void extern
+avx10_2_test_9 (void)
+{
+  xd = _mm256_fmaddsub_round_pd (xd, xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+  xd = _mm256_mask_fmaddsub_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+  xd = _mm256_mask3_fmaddsub_round_pd (xd, xd, xd, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+  xd = _mm256_maskz_fmaddsub_round_pd (m8, xd, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+  xh = _mm256_fmaddsub_round_ph (xh, xh, xh, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+  xh = _mm256_mask_fmaddsub_round_ph (xh, m8, xh, xh, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+  xh = _mm256_mask3_fmaddsub_round_ph (xh, xh, xh, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+  xh = _mm256_maskz_fmaddsub_round_ph (m8, xh, xh, xh, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+  x = _mm256_fmaddsub_round_ps (x, x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+  x = _mm256_mask_fmaddsub_round_ps (x, m8, x, x, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+  x = _mm256_mask3_fmaddsub_round_ps (x, x, x, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+  x = _mm256_maskz_fmaddsub_round_ps (m8, x, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index d8e1ee6a99b..5b71ee0b633 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -925,5 +925,18 @@ 
 #define __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddcph256_round(A, B, C, D) __builtin_ia32_vfmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, 8)
 
 #include <x86intrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index 0cf49a59a52..062f3d06859 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -1181,6 +1181,10 @@  test_3 (_mm256_maskz_fcmul_round_pch, __m256h, __mmask8, __m256h, __m256h, 8)
 test_3 (_mm256_fmadd_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
 test_3 (_mm256_fmadd_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
 test_3 (_mm256_fmadd_round_ps, __m256, __m256, __m256, __m256, 9)
+test_3 (_mm256_fmadd_round_pch, __m256h, __m256h, __m256h, __m256h, 8)
+test_3 (_mm256_fmaddsub_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
+test_3 (_mm256_fmaddsub_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
+test_3 (_mm256_fmaddsub_round_ps, __m256, __m256, __m256, __m256, 9)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1205,6 +1209,18 @@  test_4 (_mm256_maskz_fmadd_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256
 test_4 (_mm256_mask_fmadd_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
 test_4 (_mm256_mask3_fmadd_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
 test_4 (_mm256_maskz_fmadd_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
+test_4 (_mm256_mask_fmadd_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 8)
+test_4 (_mm256_mask3_fmadd_round_pch, __m256h, __m256h, __m256h, __m256h, __mmask8, 8)
+test_4 (_mm256_maskz_fmadd_round_pch, __m256h, __mmask8, __m256h, __m256h, __m256h, 8)
+test_4 (_mm256_mask_fmaddsub_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask3_fmaddsub_round_pd, __m256d, __m256d, __m256d, __m256d, __mmask8, 9)
+test_4 (_mm256_maskz_fmaddsub_round_pd, __m256d,__mmask8, __m256d, __m256d, __m256d, 9)
+test_4 (_mm256_mask_fmaddsub_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fmaddsub_round_ph, __m256h, __m256h, __m256h, __m256h, __mmask16, 9)
+test_4 (_mm256_maskz_fmaddsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fmaddsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask3_fmaddsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
+test_4 (_mm256_maskz_fmaddsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
 test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
 test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
 test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
index 9685dd072eb..15c440a864f 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -1224,6 +1224,9 @@  test_3 (_mm256_maskz_fcmul_round_pch, __m256h, __mmask8, __m256h, __m256h, 8)
 test_3 (_mm256_fmadd_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
 test_3 (_mm256_fmadd_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
 test_3 (_mm256_fmadd_round_ps, __m256, __m256, __m256, __m256, 9)
+test_3 (_mm256_fmadd_round_pch, __m256h, __m256h, __m256h, __m256h, 8)
+test_3 (_mm256_fmaddsub_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
+test_3 (_mm256_fmaddsub_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
@@ -1248,6 +1251,18 @@  test_4 (_mm256_maskz_fmadd_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256
 test_4 (_mm256_mask_fmadd_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
 test_4 (_mm256_mask3_fmadd_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
 test_4 (_mm256_maskz_fmadd_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
+test_4 (_mm256_mask_fmadd_round_pch, __m256h, __m256h, __mmask8, __m256h, __m256h, 8)
+test_4 (_mm256_mask3_fmadd_round_pch, __m256h, __m256h, __m256h, __m256h, __mmask8, 8)
+test_4 (_mm256_maskz_fmadd_round_pch, __m256h, __mmask8, __m256h, __m256h, __m256h, 8)
+test_4 (_mm256_mask_fmaddsub_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask3_fmaddsub_round_pd, __m256d, __m256d, __m256d, __m256d, __mmask8, 9)
+test_4 (_mm256_maskz_fmaddsub_round_pd, __m256d,__mmask8, __m256d, __m256d, __m256d, 9)
+test_4 (_mm256_mask_fmaddsub_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fmaddsub_round_ph, __m256h, __m256h, __m256h, __m256h, __mmask16, 9)
+test_4 (_mm256_maskz_fmaddsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fmaddsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask3_fmaddsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
+test_4 (_mm256_maskz_fmaddsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
 test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
 test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
 test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index 4f54afabeb5..e77991f2365 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -900,6 +900,19 @@ 
 #define __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_mask3_round(A, B, C, D, 8)
 #define __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddcph256_round(A, B, C, D) __builtin_ia32_vfmaddcph256_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, E) __builtin_ia32_vfmaddcph256_mask3_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcph256_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfmaddsubps256_maskz_round(A, B, C, D, 8)
 
 #pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")