diff mbox series

[01/22] AVX10.2 ymm rounding: Support vadd{s, d, h} and vcmp{s, d, h} intrins

Message ID 20240814090159.422097-2-haochen.jiang@intel.com
State New
Headers show
Series Support AVX10.2 ymm rounding | expand

Commit Message

Haochen Jiang Aug. 14, 2024, 9:01 a.m. UTC
From: "Hu, Lin1" <lin1.hu@intel.com>

gcc/ChangeLog:

	* config.gcc: Add avx10_2roundingintrin.h.
	* config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE.
	* config/i386/i386-builtin.def (BDESC): Add new builtins.
	* config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle
	V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT, V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT,
	V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT, UQI_FTYPE_V4DF_V4DF_INT_UQI_INT,
	UHI_FTYPE_V16HF_V16HF_INT_UHI_INT, UQI_FTYPE_V8SF_V8SF_INT_UQI_INT.
	* config/i386/immintrin.h: Include avx10_2roundingintrin.h.
	* config/i386/sse.md: Change subst_attr name due to renaming.
	* config/i386/subst.md:
	(<round_mode512bit_condition>): Add condition check for avx10.2
	rounding control 256bit intrins and renamed to ...
	(<round_mode_condition>): ...this.
	(round_saeonly_mode512bit_condition): Add condition check for
	avx10.2 rounding control 256 bit intris and renamed to ...
	(round_saeonly_mode_condition): ...this.
	* config/i386/avx10_2roundingintrin.h: New file.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/avx-1.c: Add -mavx10.2 and new builtin test.
	* gcc.target/i386/avx-2.c: Ditto.
	* gcc.target/i386/sse-13.c: Add new tests.
	* gcc.target/i386/sse-23.c: Ditto.
	* gcc.target/i386/sse-14.c: Ditto.
	* gcc.target/i386/sse-22.c: Ditto.
	* gcc.target/i386/avx10_2-rounding-1.c: New test.
---
 gcc/config.gcc                                |   2 +-
 gcc/config/i386/avx10_2roundingintrin.h       | 337 ++++++++++++++++++
 gcc/config/i386/i386-builtin-types.def        |   8 +
 gcc/config/i386/i386-builtin.def              |   8 +
 gcc/config/i386/i386-expand.cc                |   6 +
 gcc/config/i386/immintrin.h                   |   2 +
 gcc/config/i386/sse.md                        | 100 +++---
 gcc/config/i386/subst.md                      |  32 +-
 gcc/testsuite/gcc.target/i386/avx-1.c         |  10 +-
 gcc/testsuite/gcc.target/i386/avx-2.c         |   2 +-
 .../gcc.target/i386/avx10_2-rounding-1.c      |  64 ++++
 gcc/testsuite/gcc.target/i386/sse-13.c        |   8 +
 gcc/testsuite/gcc.target/i386/sse-14.c        |  17 +
 gcc/testsuite/gcc.target/i386/sse-22.c        |  17 +
 gcc/testsuite/gcc.target/i386/sse-23.c        |   8 +
 15 files changed, 558 insertions(+), 63 deletions(-)
 create mode 100644 gcc/config/i386/avx10_2roundingintrin.h
 create mode 100644 gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c
diff mbox series

Patch

diff --git a/gcc/config.gcc b/gcc/config.gcc
index a36dd1bcbc6..2c0f4518638 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -452,7 +452,7 @@  i[34567]86-*-* | x86_64-*-*)
 		       cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
 		       raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
 		       sm3intrin.h sha512intrin.h sm4intrin.h
-		       usermsrintrin.h"
+		       usermsrintrin.h avx10_2roundingintrin.h"
 	;;
 ia64-*-*)
 	extra_headers=ia64intrin.h
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
new file mode 100644
index 00000000000..5698ed05c1d
--- /dev/null
+++ b/gcc/config/i386/avx10_2roundingintrin.h
@@ -0,0 +1,337 @@ 
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx10_2roundingintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+
+#ifndef __AVX10_2_256__
+#pragma GCC push_options
+#pragma GCC target("avx10.2-256")
+#define __DISABLE_AVX10_2_256__
+#endif /* __AVX10_2_256__ */
+
+#ifdef  __OPTIMIZE__
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
+{
+  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+						       (__v4df) __B,
+						       (__v4df)
+						       _mm256_undefined_pd (),
+						       (__mmask8) -1,
+						       __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
+			  __m256d __B, const int __R)
+{
+  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+						       (__v4df) __B,
+						       (__v4df) __W,
+						       (__mmask8) __U,
+						       __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+			   const int __R)
+{
+  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+						       (__v4df) __B,
+						       (__v4df)
+						       _mm256_setzero_pd (),
+						       (__mmask8) __U,
+						       __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
+{
+  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+						       (__v16hf) __B,
+						       (__v16hf)
+						       _mm256_undefined_ph (),
+						       (__mmask16) -1,
+						       __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
+			  __m256h __B, const int __R)
+{
+  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+						       (__v16hf) __B,
+						       (__v16hf) __W,
+						       (__mmask16) __U,
+						       __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+			   const int __R)
+{
+  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+						       (__v16hf) __B,
+						       (__v16hf)
+						       _mm256_setzero_ph (),
+						       (__mmask16) __U,
+						       __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
+{
+  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+						      (__v8sf) __B,
+						      (__v8sf)
+						      _mm256_undefined_ps (),
+						      (__mmask8) -1,
+						      __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
+			  const int __R)
+{
+  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+						      (__v8sf) __B,
+						      (__v8sf) __W,
+						      (__mmask8) __U,
+						      __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+			   const int __R)
+{
+  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+						      (__v8sf) __B,
+						      (__v8sf)
+						      _mm256_setzero_ps (),
+						      (__mmask8) __U,
+						      __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
+			  const int __R)
+{
+  return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+							(__v4df) __B,
+							__C,
+							(__mmask8) -1,
+							__R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
+			       const int __C, const int __R)
+{
+  return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+							(__v4df) __B,
+							__C,
+							(__mmask8) __U,
+							__R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
+			  const int __R)
+{
+  return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+							 (__v16hf) __B,
+							 __C,
+							 (__mmask16) -1,
+							 __R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
+			       const int __C, const int __R)
+{
+  return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+							 (__v16hf) __B,
+							 __C,
+							 (__mmask16) __U,
+							 __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
+{
+  return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+							(__v8sf) __B,
+							__C,
+							(__mmask8) -1,
+							__R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
+			       const int __C, const int __R)
+{
+  return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+							(__v8sf) __B,
+							__C,
+							(__mmask8) __U,
+							__R);
+}
+#else
+#define _mm256_add_round_pd(A, B, R) \
+  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+						 (__v4df) (B), \
+						 (__v4df) \
+						 (_mm256_undefined_pd ()), \
+						 (__mmask8) (-1), \
+						 (R)))
+
+#define _mm256_mask_add_round_pd(W, U, A, B, R) \
+  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+						 (__v4df) (B), \
+						 (__v4df) (W), \
+						 (__mmask8) (U), \
+						 (R)))
+
+#define _mm256_maskz_add_round_pd(U, A, B, R) \
+  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+						 (__v4df) (B), \
+						 (__v4df) \
+						 (_mm256_setzero_pd ()), \
+						 (__mmask8) (U), \
+						 (R)))
+
+#define _mm256_add_round_ph(A, B, R) \
+  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+						 (__v16hf) (B), \
+						 (__v16hf) \
+						 (_mm256_undefined_ph ()), \
+						 (__mmask16) (-1), \
+						 (R)))
+
+#define _mm256_mask_add_round_ph(W, U, A, B, R) \
+  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+						 (__v16hf) (B), \
+						 (__v16hf) (W), \
+						 (__mmask16) (U), \
+						 (R)))
+
+#define _mm256_maskz_add_round_ph(U, A, B, R) \
+  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+						 (__v16hf) (B), \
+						 (__v16hf) \
+						 (_mm256_setzero_ph ()), \
+						 (__mmask16) (U), \
+						 (R)))
+
+#define _mm256_add_round_ps(A, B, R) \
+  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+						(__v8sf) (B), \
+						(__v8sf) \
+						(_mm256_undefined_ps ()), \
+						(__mmask8) (-1), \
+						(R)))
+
+#define _mm256_mask_add_round_ps(W, U, A, B, R) \
+  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+						(__v8sf) (B), \
+						(__v8sf) (W), \
+						(__mmask8) (U), \
+						(R)))
+
+#define _mm256_maskz_add_round_ps(U, A, B, R)\
+  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+						(__v8sf) (B), \
+						(__v8sf) \
+						(_mm256_setzero_ps ()), \
+						(__mmask8) (U), \
+						(R)))
+
+#define _mm256_cmp_round_pd_mask(A, B, C, R) \
+  ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+						  (__v4df) (B), \
+						  (C), \
+						  (__mmask8) (-1), \
+						  (R)))
+
+#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
+  ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+						  (__v4df) (B), \
+						  (C), \
+						  (__mmask8) (U), \
+						  (R)))
+
+#define _mm256_cmp_round_ph_mask(A, B, C, R) \
+  ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+						   (__v16hf) (B), \
+						   (C), \
+						   (__mmask16) (-1), \
+						   (R)))
+
+#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
+  ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+						   (__v16hf) (B), \
+						   (C), \
+						   (__mmask16) (U), \
+						   (R)))
+
+#define _mm256_cmp_round_ps_mask(A, B, C, R) \
+  ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+						  (__v8sf) (B), \
+						  (C), \
+						  (__mmask8) (-1), \
+						  (R)))
+
+#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
+  ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+						  (__v8sf) (B), \
+						  (C), \
+						  (__mmask8) (U), \
+						  (R)))
+#endif
+
+#ifdef __DISABLE_AVX10_2_256__
+#undef __DISABLE_AVX10_2_256__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX10_2_256__ */
+
+#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 5e1245174a3..f32abfd9d70 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1413,3 +1413,11 @@  DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
 
 # USER_MSR builtins
 DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
+
+# AVX10.2 builtins
+DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
+DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
+DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
+DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
+DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index e876e7f5cbe..a7c0884e2ab 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3318,6 +3318,14 @@  BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_mask_ro
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_round", IX86_BUILTIN_VFMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
 
+/* AVX10.2.  */
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
+
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
 /* FMA4 and XOP.  */
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index d1ac10f4d9d..af94087c6d5 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -12480,11 +12480,14 @@  ix86_expand_round_builtin (const struct builtin_description *d,
     case INT_FTYPE_V4SF_V4SF_INT_INT:
     case INT_FTYPE_V2DF_V2DF_INT_INT:
       return ix86_expand_sse_comi_round (d, exp, target);
+    case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
     case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
     case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
     case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
     case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
+    case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
     case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
+    case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
     case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
     case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
     case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12508,8 +12511,11 @@  ix86_expand_round_builtin (const struct builtin_description *d,
       nargs = 5;
       break;
     case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
+    case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
     case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
     case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
+    case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
+    case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
     case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
     case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
     case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
index b1d4cbf9ecd..80357d563ee 100644
--- a/gcc/config/i386/immintrin.h
+++ b/gcc/config/i386/immintrin.h
@@ -138,4 +138,6 @@ 
 
 #include <amxfp16intrin.h>
 
+#include <avx10_2roundingintrin.h>
+
 #endif /* _IMMINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index d1010bc5682..acc1dc66491 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -2459,7 +2459,7 @@ 
 	(plusminus:VFH
 	  (match_operand:VFH 1 "<round_nimm_predicate>")
 	  (match_operand:VFH 2 "<round_nimm_predicate>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
   "ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
 
 (define_insn "*<insn><mode>3<mask_name><round_name>"
@@ -2468,7 +2468,7 @@ 
 	  (match_operand:VFH 1 "<bcst_round_nimm_predicate>" "<comm>0,v")
 	  (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
   "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
-   && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+   && <mask_mode512bit_condition> && <round_mode_condition>"
   "@
    <plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
    v<plusminus_mnemonic><ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -2548,7 +2548,7 @@ 
 	(mult:VFH
 	  (match_operand:VFH 1 "<round_nimm_predicate>")
 	  (match_operand:VFH 2 "<round_nimm_predicate>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
   "ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
 
 (define_insn "*mul<mode>3<mask_name><round_name>"
@@ -2557,7 +2557,7 @@ 
 	  (match_operand:VFH 1 "<bcst_round_nimm_predicate>" "%0,v")
 	  (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
   "TARGET_SSE && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
-   && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+   && <mask_mode512bit_condition> && <round_mode_condition>"
   "@
    mul<ssemodesuffix>\t{%2, %0|%0, %2}
    vmul<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -2685,7 +2685,7 @@ 
 	(div:VFH
 	  (match_operand:VFH 1 "register_operand" "0,v")
 	  (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
   "@
    div<ssemodesuffix>\t{%2, %0|%0, %2}
    vdiv<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
@@ -2851,7 +2851,7 @@ 
 (define_insn "<sse>_sqrt<mode>2<mask_name><round_name>"
   [(set (match_operand:VFH 0 "register_operand" "=x,v")
 	(sqrt:VFH (match_operand:VFH 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
   "@
    sqrt<ssemodesuffix>\t{%1, %0|%0, %1}
    vsqrt<ssemodesuffix>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -3067,7 +3067,7 @@ 
 	  (match_operand:VFH 1 "<round_saeonly_nimm_predicate>")
 	  (match_operand:VFH 2 "<round_saeonly_nimm_predicate>")))]
   "TARGET_SSE && <mask_mode512bit_condition>
-   && <round_saeonly_mode512bit_condition>"
+   && <round_saeonly_mode_condition>"
 {
   if (!flag_finite_math_only || flag_signed_zeros)
     {
@@ -3095,7 +3095,7 @@ 
   "TARGET_SSE
    && !(MEM_P (operands[1]) && MEM_P (operands[2]))
    && <mask_mode512bit_condition>
-   && <round_saeonly_mode512bit_condition>"
+   && <round_saeonly_mode_condition>"
   "@
    <maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
    v<maxmin_float><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -3182,7 +3182,7 @@ 
 	  IEEE_MAXMIN))]
   "TARGET_SSE
    && <mask_mode512bit_condition>
-   && <round_saeonly_mode512bit_condition>"
+   && <round_saeonly_mode_condition>"
   "@
    <ieee_maxmin><ssemodesuffix>\t{%2, %0|%0, %2}
    v<ieee_maxmin><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
@@ -4142,7 +4142,7 @@ 
 	   (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
 	   (match_operand:SI 3 "<cmp_imm_predicate>" "n")]
 	  UNSPEC_PCMP))]
-  "TARGET_AVX512F && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512F && <round_saeonly_mode_condition>"
   "v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
   [(set_attr "type" "ssecmp")
    (set_attr "length_immediate" "1")
@@ -5638,7 +5638,7 @@ 
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
 {
   emit_insn (gen_fma_fmadd_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -5680,7 +5680,7 @@ 
 	  (match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v")
 	  (match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
 	  (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -5721,7 +5721,7 @@ 
 	    (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
 	  (match_dup 1)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
   "@
    vfmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -5768,7 +5768,7 @@ 
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
 {
   emit_insn (gen_fma_fmsub_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -5783,7 +5783,7 @@ 
 	  (match_operand:VFH_SF_AVX512VL   2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
 	  (neg:VFH_SF_AVX512VL
 	    (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -5844,7 +5844,7 @@ 
 	      (match_operand:VFH_AVX512VL 3 "register_operand" "0")))
 	  (match_dup 3)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
   "vfmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
@@ -5874,7 +5874,7 @@ 
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
 {
   emit_insn (gen_fma_fnmadd_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -5889,7 +5889,7 @@ 
 	    (match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v"))
 	  (match_operand:VFH_SF_AVX512VL   2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
 	  (match_operand:VFH_SF_AVX512VL   3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfnmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfnmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -5932,7 +5932,7 @@ 
 	    (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
 	  (match_dup 1)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
   "@
    vfnmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfnmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -5950,7 +5950,7 @@ 
 	    (match_operand:VFH_AVX512VL 3 "register_operand" "0"))
 	  (match_dup 3)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
   "vfnmadd231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
@@ -5981,7 +5981,7 @@ 
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
 {
   emit_insn (gen_fma_fnmsub_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -5997,7 +5997,7 @@ 
 	  (match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
 	  (neg:VFH_SF_AVX512VL
 	    (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfnmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfnmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6042,7 +6042,7 @@ 
 	      (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>")))
 	  (match_dup 1)
 	  (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <round_mode_condition>"
   "@
    vfnmsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfnmsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
@@ -6159,7 +6159,7 @@ 
 	   (match_operand:VFH_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>,v,<round_constraint>")
 	   (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0")]
 	  UNSPEC_FMADDSUB))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfmaddsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmaddsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6229,7 +6229,7 @@ 
 	   (neg:VFH_AVX512VL
 	     (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0"))]
 	  UNSPEC_FMADDSUB))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
   "@
    vfmsubadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmsubadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
@@ -6798,7 +6798,7 @@ 
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx op0, op1, dest;
   if (<round_embedded_complex>)
@@ -6828,7 +6828,7 @@ 
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   emit_insn (gen_fma_fmaddc_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -6842,7 +6842,7 @@ 
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx op0, op1, dest;
   if (<round_embedded_complex>)
@@ -6874,7 +6874,7 @@ 
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   emit_insn (gen_fma_fcmaddc_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -6898,7 +6898,7 @@ 
 	   (match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")
 	   (match_operand:VHF_AVX512VL 3 "<round_nimm_predicate>" "0")]
 	   UNSPEC_COMPLEX_F_C_MA))]
-  "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode_condition>"
   "v<complexopname><ssemodesuffix>\t{<round_sdc_mask_op4>%2, %1, %0<sdc_mask_op4>|%0<sdc_mask_op4>, %1, %2<round_sdc_mask_op4>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
@@ -7036,7 +7036,7 @@ 
 	  (unspec:<avx512fmaskmode>
 	    [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
 	    UNSPEC_COMPLEX_MASK)))]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
   "v<complexopname><ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
@@ -7056,7 +7056,7 @@ 
 	    [(match_operand:VHF_AVX512VL 1 "<round_nimm_predicate>" "<int_comm>v")
 	     (match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")]
 	     UNSPEC_COMPLEX_F_C_MUL))]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   if (TARGET_DEST_FALSE_DEP_FOR_GLC
       && <maskc_dest_false_dep_for_glc_cond>)
@@ -7073,7 +7073,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -7087,7 +7087,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx op0, op1, dest;
 
@@ -7117,7 +7117,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
@@ -7131,7 +7131,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx op0, op1, dest;
 
@@ -7161,7 +7161,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx dest, op0, op1;
 
@@ -7191,7 +7191,7 @@ 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
 {
   rtx dest, op0, op1;
 
@@ -8362,7 +8362,7 @@ 
   [(set (match_operand:VF1 0 "register_operand" "=x,v")
 	(float:VF1
 	  (match_operand:<sseintvecmode> 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
-  "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+  "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
   "@
    cvtdq2ps\t{%1, %0|%0, %1}
    vcvtdq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -8441,7 +8441,7 @@ 
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
 	(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
 		     UNSPEC_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_mode_condition>"
   "vcvtps2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -8464,7 +8464,7 @@ 
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
 	(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
 		     UNSPEC_UNSIGNED_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_mode_condition>"
   "vcvtps2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -8909,7 +8909,7 @@ 
   [(set (match_operand:<ssePSmode2> 0 "register_operand" "=v")
 	 (any_float:<ssePSmode2>
 	   (match_operand:VI8_256_512 1 "nonimmediate_operand" "<round_constraint>")))]
-  "TARGET_AVX512DQ && <round_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_mode_condition>"
   "vcvt<floatsuffix>qq2ps<qq2pssuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9410,7 +9410,7 @@ 
 	(unspec:<sseintvecmode>
 	  [(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
 	  UNSPEC_VCVTT_U))]
-  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
   "vcvttpd2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9420,7 +9420,7 @@ 
   [(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
 	(any_fix:<sseintvecmode>
 	  (match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
   "vcvttpd2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9431,7 +9431,7 @@ 
 	(unspec:<sseintvecmode>
 	  [(match_operand:VF2_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
 	  UNSPEC_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_mode_condition>"
   "vcvtpd2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9442,7 +9442,7 @@ 
 	(unspec:<sseintvecmode>
 	  [(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "<round_constraint>")]
 	  UNSPEC_UNSIGNED_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_mode_condition>"
   "vcvtpd2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9453,7 +9453,7 @@ 
 	(unspec:VI8_256_512
 	  [(match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
 	  UNSPEC_VCVTT_U))]
-  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
   "vcvttps2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9463,7 +9463,7 @@ 
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
 	(any_fix:VI8_256_512
 	  (match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
   "vcvttps2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
@@ -9941,7 +9941,7 @@ 
   [(set (match_operand:VF2_512_256 0 "register_operand" "=v")
 	(float_extend:VF2_512_256
 	  (match_operand:<sf2dfmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode_condition>"
   "vcvtps2pd\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "maybe_vex")
@@ -29482,7 +29482,7 @@ 
 	   (match_operand:VF_AVX512VL 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
 	   (match_operand:SI 3 "const_0_to_15_operand")]
 	  UNSPEC_RANGE))]
-  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
 {
   if (TARGET_DEST_FALSE_DEP_FOR_GLC
       && <mask4_dest_false_dep_for_glc_cond>
diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md
index 40fb92094d2..ba180301b90 100644
--- a/gcc/config/i386/subst.md
+++ b/gcc/config/i386/subst.md
@@ -205,11 +205,17 @@ 
 (define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand")
 (define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand")
 (define_subst_attr "round_prefix" "round" "vex" "evex")
-(define_subst_attr "round_mode512bit_condition" "round" "1" "(<MODE>mode == V16SFmode
-							      || <MODE>mode == V8DFmode
-							      || <MODE>mode == V8DImode
-							      || <MODE>mode == V16SImode
-							      || <MODE>mode == V32HFmode)")
+(define_subst_attr "round_mode_condition" "round" "1" "((<MODE>mode == V16SFmode
+							       || <MODE>mode == V8DFmode
+							       || <MODE>mode == V8DImode
+							       || <MODE>mode == V16SImode
+							       || <MODE>mode == V32HFmode)
+							       || (TARGET_AVX10_2_256
+								   && (<MODE>mode == V8SFmode
+								       || <MODE>mode == V4DFmode
+								       || <MODE>mode == V4DImode
+								       || <MODE>mode == V8SImode
+								       || <MODE>mode == V16HFmode)))")
 
 (define_subst_attr "round_modev4sf_condition" "round" "1" "(<MODE>mode == V4SFmode)")
 (define_subst_attr "round_codefor" "round" "*" "")
@@ -250,11 +256,17 @@ 
 (define_subst_attr "round_saeonly_constraint2" "round_saeonly" "m" "v")
 (define_subst_attr "round_saeonly_nimm_predicate" "round_saeonly" "vector_operand" "register_operand")
 (define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimmediate_operand" "register_operand")
-(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(<MODE>mode == V16SFmode
-									      || <MODE>mode == V8DFmode
-									      || <MODE>mode == V8DImode
-									      || <MODE>mode == V16SImode
-									      || <MODE>mode == V32HFmode)")
+(define_subst_attr "round_saeonly_mode_condition" "round_saeonly" "1" "((<MODE>mode == V16SFmode
+									       || <MODE>mode == V8DFmode
+									       || <MODE>mode == V8DImode
+									       || <MODE>mode == V16SImode
+									       || <MODE>mode == V32HFmode)
+									       || (TARGET_AVX10_2_256
+										   && (<MODE>mode == V8SFmode
+										       || <MODE>mode == V4DFmode
+										       || <MODE>mode == V4DImode
+										       || <MODE>mode == V8SImode
+										       || <MODE>mode == V16HFmode)))")
 
 
 (define_subst "round_saeonly"
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index a6589deca84..8b852c670cd 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mavx512bw -mavx512fp16 -mavx512vl -mprefetchi" } */
+/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mprefetchi -mavx10.2-512" } */
 /* { dg-add-options bind_pic_locally } */
 
 #include <mm_malloc.h>
@@ -842,6 +842,14 @@ 
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
 #include <wmmintrin.h>
 #include <immintrin.h>
 #include <mm3dnow.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx-2.c b/gcc/testsuite/gcc.target/i386/avx-2.c
index 642ae4d7bfb..fb0ef9e2aa5 100644
--- a/gcc/testsuite/gcc.target/i386/avx-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx-2.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul -mavx512bw -mavx512fp16 -mavx512vl" } */
+/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul -mavx10.2-512" } */
 /* { dg-add-options bind_pic_locally } */
 
 #include <mm_malloc.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c
new file mode 100644
index 00000000000..0fd37bc8c3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c
@@ -0,0 +1,64 @@ 
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx10.2" } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\\\$3\[^\n\r]*\{sae\}\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\[^\{\n\]*\\\$4\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+
+#include <immintrin.h>
+
+volatile __m256 x;
+volatile __m256d xd;
+volatile __m256h xh;
+volatile __mmask8 m8;
+volatile __mmask16 m16;
+volatile __mmask32 m32;
+
+void extern
+avx10_2_test_1 (void)
+{
+  xd = _mm256_add_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+  xd = _mm256_mask_add_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+  xd = _mm256_maskz_add_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+  xh = _mm256_add_round_ph (xh, xh, 8);
+  xh = _mm256_mask_add_round_ph (xh, m32, xh, xh, 8);
+  xh = _mm256_maskz_add_round_ph (m32, xh, xh, 11);
+
+  x = _mm256_add_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+  x = _mm256_mask_add_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+  x = _mm256_maskz_add_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+  m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+  m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+
+  m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
+  m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
+
+  m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+  m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+}
+
+void extern
+avx10_2_test_2 (void)
+{
+  m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+  m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+
+  m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
+  m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
+
+  m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+  m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index a4ed83665dc..3b0f96ac8d9 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -849,4 +849,12 @@ 
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
 #include <x86intrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index cbe91bf5fb3..9f0a4c19418 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -1019,3 +1019,20 @@  test_2 (_mm512_gf2p8affine_epi64_epi8, __m512i, __m512i, __m512i, 1)
 
 /* sm3intrin.h */
 test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
+
+/* avx10_2roundingintrin.h */
+test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
+test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
+test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
+test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
+test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
+test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
+test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
+test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
+test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
index 73367c0d207..959cec5d23b 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -1060,3 +1060,20 @@  test_1 ( __bextri_u64, unsigned long long, unsigned long long, 1)
 
 /* sm3intrin.h */
 test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
+
+/* avx10_2roundingintrin.h */
+test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
+test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
+test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
+test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
+test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
+test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
+test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
+test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
+test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index 9201298a9a8..0bc7fa00e61 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -824,6 +824,14 @@ 
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
 #pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
 
 #include <x86intrin.h>