diff mbox series

[4/4] RISC-V: Add crypto vector api-testing cases.

Message ID 20231206024524.10792-4-wangfeng@eswincomputing.com
State New
Headers show
Series [1/4] RISC-V: Add crypto vector implied ISA info. | expand

Commit Message

Feng Wang Dec. 6, 2023, 2:45 a.m. UTC
This patch add crypto vector api-testing cases based on
https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob/
eopc/vector-crypto/auto-generated/vector-crypto

gcc/testsuite/ChangeLog:

	* gcc.target/riscv/zvk/zvbb/vandn.c: New test.
	* gcc.target/riscv/zvk/zvbb/vandn_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vbrev.c: New test.
	* gcc.target/riscv/zvk/zvbb/vbrev8.c: New test.
	* gcc.target/riscv/zvk/zvbb/vbrev8_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vbrev_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vclz.c: New test.
	* gcc.target/riscv/zvk/zvbb/vclz_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vctz.c: New test.
	* gcc.target/riscv/zvk/zvbb/vctz_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vrev8.c: New test.
	* gcc.target/riscv/zvk/zvbb/vrev8_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vrol.c: New test.
	* gcc.target/riscv/zvk/zvbb/vrol_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vror.c: New test.
	* gcc.target/riscv/zvk/zvbb/vror_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/vwsll.c: New test.
	* gcc.target/riscv/zvk/zvbb/vwsll_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbb/zvkb.c: New test.
	* gcc.target/riscv/zvk/zvbc/vclmul.c: New test.
	* gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvbc/vclmulh.c: New test.
	* gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvk.exp: New test.
	* gcc.target/riscv/zvk/zvkg/vghsh.c: New test.
	* gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkg/vgmul.c: New test.
	* gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesdf.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesdf_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesdm.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesdm_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesef.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesef_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesem.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesem_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaeskf1.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaeskf1_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaeskf2.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaeskf2_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesz.c: New test.
	* gcc.target/riscv/zvk/zvkned/vaesz_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2ch.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2ch_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2cl.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2cl_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2ms.c: New test.
	* gcc.target/riscv/zvk/zvknha/vsha2ms_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2ch.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2ch_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2cl.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2cl_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2ms.c: New test.
	* gcc.target/riscv/zvk/zvknhb/vsha2ms_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvksed/vsm4k.c: New test.
	* gcc.target/riscv/zvk/zvksed/vsm4k_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvksed/vsm4r.c: New test.
	* gcc.target/riscv/zvk/zvksed/vsm4r_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvksh/vsm3c.c: New test.
	* gcc.target/riscv/zvk/zvksh/vsm3c_overloaded.c: New test.
	* gcc.target/riscv/zvk/zvksh/vsm3me.c: New test.
	* gcc.target/riscv/zvk/zvksh/vsm3me_overloaded.c: New test.
	* gcc.target/riscv/zvkb.c: New test.
---
 .../gcc.target/riscv/zvk/zvbb/vandn.c         | 172 +++++++++++++++
 .../riscv/zvk/zvbb/vandn_overloaded.c         | 172 +++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vbrev.c         | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vbrev8.c        | 170 ++++++++++++++
 .../riscv/zvk/zvbb/vbrev8_overloaded.c        | 170 ++++++++++++++
 .../riscv/zvk/zvbb/vbrev_overloaded.c         | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vclz.c          | 187 ++++++++++++++++
 .../riscv/zvk/zvbb/vclz_overloaded.c          | 187 ++++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vctz.c          | 187 ++++++++++++++++
 .../riscv/zvk/zvbb/vctz_overloaded.c          | 188 ++++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vrev8.c         | 170 ++++++++++++++
 .../riscv/zvk/zvbb/vrev8_overloaded.c         | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vrol.c          | 172 +++++++++++++++
 .../riscv/zvk/zvbb/vrol_overloaded.c          | 172 +++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vror.c          | 172 +++++++++++++++
 .../riscv/zvk/zvbb/vror_overloaded.c          | 172 +++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/vwsll.c         | 164 ++++++++++++++
 .../riscv/zvk/zvbb/vwsll_overloaded.c         | 165 ++++++++++++++
 .../gcc.target/riscv/zvk/zvbb/zvkb.c          |  48 ++++
 .../gcc.target/riscv/zvk/zvbc/vclmul.c        | 208 ++++++++++++++++++
 .../riscv/zvk/zvbc/vclmul_overloaded.c        | 208 ++++++++++++++++++
 .../gcc.target/riscv/zvk/zvbc/vclmulh.c       | 208 ++++++++++++++++++
 .../riscv/zvk/zvbc/vclmulh_overloaded.c       | 208 ++++++++++++++++++
 gcc/testsuite/gcc.target/riscv/zvk/zvk.exp    |  54 +++++
 .../gcc.target/riscv/zvk/zvkg/vghsh.c         |  51 +++++
 .../riscv/zvk/zvkg/vghsh_overloaded.c         |  51 +++++
 .../gcc.target/riscv/zvk/zvkg/vgmul.c         |  51 +++++
 .../riscv/zvk/zvkg/vgmul_overloaded.c         |  51 +++++
 .../gcc.target/riscv/zvk/zvkned/vaesdf.c      | 169 ++++++++++++++
 .../riscv/zvk/zvkned/vaesdf_overloaded.c      | 169 ++++++++++++++
 .../gcc.target/riscv/zvk/zvkned/vaesdm.c      | 170 ++++++++++++++
 .../riscv/zvk/zvkned/vaesdm_overloaded.c      | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvkned/vaesef.c      | 170 ++++++++++++++
 .../riscv/zvk/zvkned/vaesef_overloaded.c      | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvkned/vaesem.c      | 170 ++++++++++++++
 .../riscv/zvk/zvkned/vaesem_overloaded.c      | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvkned/vaeskf1.c     |  50 +++++
 .../riscv/zvk/zvkned/vaeskf1_overloaded.c     |  50 +++++
 .../gcc.target/riscv/zvk/zvkned/vaeskf2.c     |  50 +++++
 .../riscv/zvk/zvkned/vaeskf2_overloaded.c     |  50 +++++
 .../gcc.target/riscv/zvk/zvkned/vaesz.c       | 130 +++++++++++
 .../riscv/zvk/zvkned/vaesz_overloaded.c       | 130 +++++++++++
 .../gcc.target/riscv/zvk/zvknha/vsha2ch.c     |  51 +++++
 .../riscv/zvk/zvknha/vsha2ch_overloaded.c     |  51 +++++
 .../gcc.target/riscv/zvk/zvknha/vsha2cl.c     |  51 +++++
 .../riscv/zvk/zvknha/vsha2cl_overloaded.c     |  51 +++++
 .../gcc.target/riscv/zvk/zvknha/vsha2ms.c     |  51 +++++
 .../riscv/zvk/zvknha/vsha2ms_overloaded.c     |  51 +++++
 .../gcc.target/riscv/zvk/zvknhb/vsha2ch.c     |  83 +++++++
 .../riscv/zvk/zvknhb/vsha2ch_overloaded.c     |  83 +++++++
 .../gcc.target/riscv/zvk/zvknhb/vsha2cl.c     |  83 +++++++
 .../riscv/zvk/zvknhb/vsha2cl_overloaded.c     |  83 +++++++
 .../gcc.target/riscv/zvk/zvknhb/vsha2ms.c     |  83 +++++++
 .../riscv/zvk/zvknhb/vsha2ms_overloaded.c     |  83 +++++++
 .../gcc.target/riscv/zvk/zvksed/vsm4k.c       |  50 +++++
 .../riscv/zvk/zvksed/vsm4k_overloaded.c       |  50 +++++
 .../gcc.target/riscv/zvk/zvksed/vsm4r.c       | 170 ++++++++++++++
 .../riscv/zvk/zvksed/vsm4r_overloaded.c       | 170 ++++++++++++++
 .../gcc.target/riscv/zvk/zvksh/vsm3c.c        |  51 +++++
 .../riscv/zvk/zvksh/vsm3c_overloaded.c        |  51 +++++
 .../gcc.target/riscv/zvk/zvksh/vsm3me.c       |  51 +++++
 .../riscv/zvk/zvksh/vsm3me_overloaded.c       |  51 +++++
 gcc/testsuite/gcc.target/riscv/zvkb.c         |  13 ++
 63 files changed, 7547 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbb/zvkb.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvkb.c
diff mbox series

Patch

diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn.c
new file mode 100644
index 00000000000..d3462ea114c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8mf8(vs2, rs1, vl);
+}
+
+vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u16mf4(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u16mf4(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u32m1(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u32m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl);
+}
+
+vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 20 } } */
+/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 12 } } */
+/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 20 } } */
+/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 12 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn_overloaded.c
new file mode 100644
index 00000000000..25113efb0d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vandn_overloaded.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vandn(vs2, vs1, vl);
+}
+
+vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn(vs2, rs1, vl);
+}
+
+vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vandn(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vandn(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vandn(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, vs1, vl);
+}
+
+vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 20 } } */
+/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 12 } } */
+/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 20 } } */
+/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 12 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev.c
new file mode 100644
index 00000000000..23b3b2f2f62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8mf8(vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m8(vs2, vl);
+}
+
+vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16mf4(vs2, vl);
+}
+
+vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m4(vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m1(vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m2(vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m1(vs2, vl);
+}
+
+vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m2(vs2, vl);
+}
+
+vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl);
+}
+
+vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m4_m(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl);
+}
+
+vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m1_m(mask, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m1_m(mask, vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m2_m(mask, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m1_m(mask, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m4_m(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8.c
new file mode 100644
index 00000000000..abfeadce941
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8mf8(vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m8(vs2, vl);
+}
+
+vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16mf4(vs2, vl);
+}
+
+vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m4(vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m1(vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m2(vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m1(vs2, vl);
+}
+
+vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m2(vs2, vl);
+}
+
+vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl);
+}
+
+vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl);
+}
+
+vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8_overloaded.c
new file mode 100644
index 00000000000..8a578fd19cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev8_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vbrev8(vs2, vl);
+}
+
+vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev_overloaded.c
new file mode 100644
index 00000000000..ae22a1ea15a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vbrev_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vbrev(vs2, vl);
+}
+
+vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vbrev_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz.c
new file mode 100644
index 00000000000..df19efd3c7d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz.c
@@ -0,0 +1,187 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf8(vs2, vl);
+}
+
+vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf4(vs2, vl);
+}
+
+vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf2(vs2, vl);
+}
+
+vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m1(vs2, vl);
+}
+
+vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m2(vs2, vl);
+}
+
+vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m4(vs2, vl);
+}
+
+vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m8(vs2, vl);
+}
+
+vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16mf4(vs2, vl);
+}
+
+vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16mf2(vs2, vl);
+}
+
+vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m1(vs2, vl);
+}
+
+vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m2(vs2, vl);
+}
+
+vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m4(vs2, vl);
+}
+
+vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m8(vs2, vl);
+}
+
+vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32mf2(vs2, vl);
+}
+
+vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m1(vs2, vl);
+}
+
+vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m2(vs2, vl);
+}
+
+vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m4(vs2, vl);
+}
+
+vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m8(vs2, vl);
+}
+
+vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m1(vs2, vl);
+}
+
+vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m2(vs2, vl);
+}
+
+vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m4(vs2, vl);
+}
+
+vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m8(vs2, vl);
+}
+
+vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf8_m(mask, vs2, vl);
+}
+
+vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf4_m(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8mf2_m(mask, vs2, vl);
+}
+
+vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m1_m(mask, vs2, vl);
+}
+
+vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m2_m(mask, vs2, vl);
+}
+
+vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m4_m(mask, vs2, vl);
+}
+
+vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u8m8_m(mask, vs2, vl);
+}
+
+vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16mf4_m(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16mf2_m(mask, vs2, vl);
+}
+
+vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m1_m(mask, vs2, vl);
+}
+
+vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m2_m(mask, vs2, vl);
+}
+
+vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m4_m(mask, vs2, vl);
+}
+
+vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u16m8_m(mask, vs2, vl);
+}
+
+vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32mf2_m(mask, vs2, vl);
+}
+
+vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m1_m(mask, vs2, vl);
+}
+
+vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m2_m(mask, vs2, vl);
+}
+
+vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m4_m(mask, vs2, vl);
+}
+
+vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u32m8_m(mask, vs2, vl);
+}
+
+vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m1_m(mask, vs2, vl);
+}
+
+vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m2_m(mask, vs2, vl);
+}
+
+vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m4_m(mask, vs2, vl);
+}
+
+vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vclz_v_u64m8_m(mask, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 44 } } */
+/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]} 44 } } */
+/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 22 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz_overloaded.c
new file mode 100644
index 00000000000..b4466bd4c23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vclz_overloaded.c
@@ -0,0 +1,187 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) {
+  return __riscv_vclz(vs2, vl);
+}
+
+vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vclz(mask, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 44 } } */
+/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]} 44 } } */
+/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 22 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz.c
new file mode 100644
index 00000000000..ce3cc6dc551
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz.c
@@ -0,0 +1,187 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf8(vs2, vl);
+}
+
+vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf4(vs2, vl);
+}
+
+vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf2(vs2, vl);
+}
+
+vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m1(vs2, vl);
+}
+
+vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m2(vs2, vl);
+}
+
+vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m4(vs2, vl);
+}
+
+vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m8(vs2, vl);
+}
+
+vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16mf4(vs2, vl);
+}
+
+vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16mf2(vs2, vl);
+}
+
+vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m1(vs2, vl);
+}
+
+vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m2(vs2, vl);
+}
+
+vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m4(vs2, vl);
+}
+
+vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m8(vs2, vl);
+}
+
+vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32mf2(vs2, vl);
+}
+
+vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m1(vs2, vl);
+}
+
+vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m2(vs2, vl);
+}
+
+vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m4(vs2, vl);
+}
+
+vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m8(vs2, vl);
+}
+
+vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m1(vs2, vl);
+}
+
+vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m2(vs2, vl);
+}
+
+vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m4(vs2, vl);
+}
+
+vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m8(vs2, vl);
+}
+
+vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf8_m(mask, vs2, vl);
+}
+
+vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf4_m(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8mf2_m(mask, vs2, vl);
+}
+
+vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m1_m(mask, vs2, vl);
+}
+
+vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m2_m(mask, vs2, vl);
+}
+
+vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m4_m(mask, vs2, vl);
+}
+
+vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u8m8_m(mask, vs2, vl);
+}
+
+vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16mf4_m(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16mf2_m(mask, vs2, vl);
+}
+
+vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m1_m(mask, vs2, vl);
+}
+
+vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m2_m(mask, vs2, vl);
+}
+
+vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m4_m(mask, vs2, vl);
+}
+
+vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u16m8_m(mask, vs2, vl);
+}
+
+vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32mf2_m(mask, vs2, vl);
+}
+
+vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m1_m(mask, vs2, vl);
+}
+
+vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m2_m(mask, vs2, vl);
+}
+
+vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m4_m(mask, vs2, vl);
+}
+
+vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u32m8_m(mask, vs2, vl);
+}
+
+vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m1_m(mask, vs2, vl);
+}
+
+vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m2_m(mask, vs2, vl);
+}
+
+vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m4_m(mask, vs2, vl);
+}
+
+vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vctz_v_u64m8_m(mask, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 44 } } */
+/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]} 44 } } */
+/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 22 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz_overloaded.c
new file mode 100644
index 00000000000..df083ea4504
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vctz_overloaded.c
@@ -0,0 +1,188 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) {
+  return __riscv_vctz(vs2, vl);
+}
+
+vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vctz(mask, vs2, vl);
+}
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 44 } } */
+/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]} 44 } } */
+/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 22 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8.c
new file mode 100644
index 00000000000..92cb10bd086
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8mf8(vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m8(vs2, vl);
+}
+
+vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16mf4(vs2, vl);
+}
+
+vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m4(vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m1(vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m2(vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m1(vs2, vl);
+}
+
+vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m2(vs2, vl);
+}
+
+vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl);
+}
+
+vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m4_m(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl);
+}
+
+vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m1_m(mask, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m1_m(mask, vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m2_m(mask, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m1_m(mask, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m4_m(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8_overloaded.c
new file mode 100644
index 00000000000..86822d0c31a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrev8_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) {
+  return __riscv_vrev8(vs2, vl);
+}
+
+vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8(mask, vs2, vl);
+}
+
+vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+  return __riscv_vrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tu(maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+  return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+  return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]} 40 } } */
+/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 27 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol.c
new file mode 100644
index 00000000000..5b9846a624f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m8(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16mf4(vs2, rs1, vl);
+}
+
+vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m4(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m1(vs2, rs1, vl);
+}
+
+vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m2(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl);
+}
+
+vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 12 } } */
+/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */
+/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 28 } } */
+/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 19 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol_overloaded.c
new file mode 100644
index 00000000000..ff82225b969
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vrol_overloaded.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vrol(vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, vs1, vl);
+}
+
+vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vrol_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vrol_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 12 } } */
+/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */
+/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 28 } } */
+/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 19 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror.c
new file mode 100644
index 00000000000..0c36e5ff13f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m8(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16mf4(vs2, rs1, vl);
+}
+
+vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m4(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m1(vs2, rs1, vl);
+}
+
+vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m2(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl);
+}
+
+vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 12 } } */
+/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */
+/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 28 } } */
+/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 19 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror_overloaded.c
new file mode 100644
index 00000000000..7274e013121
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vror_overloaded.c
@@ -0,0 +1,172 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vror(vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(vs2, rs1, vl);
+}
+
+vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vror(mask, vs2, vs1, vl);
+}
+
+vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vror(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror(mask, vs2, rs1, vl);
+}
+
+vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vror_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vror_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 12 } } */
+/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */
+/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 28 } } */
+/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 19 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll.c
new file mode 100644
index 00000000000..f3170880679
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll.c
@@ -0,0 +1,164 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16m1(vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16m1(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32m1(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16m1_m(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16m1_m(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 14 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 6 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 6 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 19 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 12 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 19 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 12 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll_overloaded.c
new file mode 100644
index 00000000000..c38b37f3ca0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/vwsll_overloaded.c
@@ -0,0 +1,165 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <riscv_vector.h>
+
+vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwsll(vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll(vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(vs2, rs1, vl);
+}
+
+vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwsll(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vwsll(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll(mask, vs2, rs1, vl);
+}
+
+vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+
+vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vwsll_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 14 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 6 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 6 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 19 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 12 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 19 } } */
+/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 12 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbb/zvkb.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/zvkb.c
new file mode 100644
index 00000000000..cf956ced59e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbb/zvkb.c
@@ -0,0 +1,48 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkb -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+
+vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vandn_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return __riscv_vandn_vx_u8mf8(vs2, rs1, vl);
+}
+
+vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8mf8(vs2, vl);
+}
+
+vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vbrev8_v_u8mf4(vs2, vl);
+}
+
+vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8mf8(vs2, vl);
+}
+
+vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vrev8_v_u8mf4(vs2, vl);
+}
+
+vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vror_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vror_vx_u8mf8(vs2, rs1, vl);
+}
+
+vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vrol_vv_u8mf8(vs2, vs1, vl);
+}
+
+vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) {
+  return __riscv_vrol_vx_u8mf8(vs2, rs1, vl);
+}
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
new file mode 100644
index 00000000000..ba3e5cf858e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
new file mode 100644
index 00000000000..1e25831f3f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
new file mode 100644
index 00000000000..c14b8a56490
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
new file mode 100644
index 00000000000..ed3c4388af6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
new file mode 100644
index 00000000000..5e2778a51a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
@@ -0,0 +1,54 @@ 
+# Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3.  If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't a RISC-V target.
+if ![istarget riscv*-*-*] then {
+  return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+    set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbb/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbc/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvkg/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvkned/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvknha/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvknhb/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvksed/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvksh/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
new file mode 100644
index 00000000000..3837f99fea3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vghsh\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
new file mode 100644
index 00000000000..2d2004bc653
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vghsh\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
new file mode 100644
index 00000000000..902de106c12
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vgmul\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
new file mode 100644
index 00000000000..53397ebc69b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2,  size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vgmul\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf.c
new file mode 100644
index 00000000000..8fcfd493f2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf.c
@@ -0,0 +1,169 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+/* non-policy */
+vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesdf\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesdf\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf_overloaded.c
new file mode 100644
index 00000000000..b8570818358
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdf_overloaded.c
@@ -0,0 +1,169 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+/* non-policy */
+vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vv_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdf_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdf_vs_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesdf\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesdf\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm.c
new file mode 100644
index 00000000000..1d4a1711cc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesdm\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesdm\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm_overloaded.c
new file mode 100644
index 00000000000..4247ba3901b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesdm_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vv_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesdm_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesdm_vs_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesdm\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesdm\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef.c
new file mode 100644
index 00000000000..93a79ffa51c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesef\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesef\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef_overloaded.c
new file mode 100644
index 00000000000..9e3998ef055
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesef_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vv(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vv(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vv(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vs(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vv_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesef_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesef_vs_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesef\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesef\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem.c
new file mode 100644
index 00000000000..43e468c6f0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesem\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesem\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem_overloaded.c
new file mode 100644
index 00000000000..bb2e7dea733
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesem_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vv(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vv(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vv(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vs(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vv_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesem_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesem_vs_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vaesem\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vaesem\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1.c
new file mode 100644
index 00000000000..0edbb6d9108
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vaeskf1\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1_overloaded.c
new file mode 100644
index 00000000000..63e3537a06b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf1_overloaded.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf1(vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf1(vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf1(vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf1(vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf1(vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vaeskf1\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2.c
new file mode 100644
index 00000000000..06fed681d6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vaeskf2\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2_overloaded.c
new file mode 100644
index 00000000000..da7f42aef88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaeskf2_overloaded.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf2(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf2(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf2(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf2(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf2(vd, vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaeskf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaeskf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaeskf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaeskf2_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vaeskf2\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz.c
new file mode 100644
index 00000000000..fbbbeaa78ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz.c
@@ -0,0 +1,130 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesz_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 15 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 15 } } */
+/* { dg-final { scan-assembler-times {vaesz\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz_overloaded.c
new file mode 100644
index 00000000000..9130fbdc4ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkned/vaesz_overloaded.c
@@ -0,0 +1,130 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkned -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesz(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vaesz_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vaesz_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 15 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 15 } } */
+/* { dg-final { scan-assembler-times {vaesz\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch.c
new file mode 100644
index 00000000000..2dea4bbb89f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch_overloaded.c
new file mode 100644
index 00000000000..5a16400f800
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ch_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl.c
new file mode 100644
index 00000000000..b2bbc1559f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl_overloaded.c
new file mode 100644
index 00000000000..7a54466b204
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2cl_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms.c
new file mode 100644
index 00000000000..57523576c3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms_overloaded.c
new file mode 100644
index 00000000000..4d31ee0ee34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknha/vsha2ms_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknha -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch.c
new file mode 100644
index 00000000000..811c313887b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch_overloaded.c
new file mode 100644
index 00000000000..a09f9876c75
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ch_overloaded.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ch_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl.c
new file mode 100644
index 00000000000..f44c5a2cfbf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl_overloaded.c
new file mode 100644
index 00000000000..2354ab54a63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2cl_overloaded.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2cl_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms.c
new file mode 100644
index 00000000000..45aba16119d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms_overloaded.c
new file mode 100644
index 00000000000..3cad2e09fc7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvknhb/vsha2ms_overloaded.c
@@ -0,0 +1,83 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vsha2ms_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 9 } } */
+/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k.c
new file mode 100644
index 00000000000..7a8a0857f31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksed -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m1(vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m2(vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m4(vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m8(vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm4k\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k_overloaded.c
new file mode 100644
index 00000000000..dd06a7e58d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4k_overloaded.c
@@ -0,0 +1,50 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksed -mabi=lp64d -O2 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4k(vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4k(vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4k(vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4k(vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4k(vs2, 0, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm4k\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r.c
new file mode 100644
index 00000000000..dac66db3abb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksed -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m8_u32m8(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_u32m8_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsm4r\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vsm4r\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r_overloaded.c
new file mode 100644
index 00000000000..6311adfb2d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksed/vsm4r_overloaded.c
@@ -0,0 +1,170 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksed -mabi=lp64d -O2 -Wno-psabi" } */
+#include "riscv_vector.h"
+
+/* non-policy */
+vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs(vd, vs2, vl);
+}
+
+/* policy */
+vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_tu(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vv_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vsm4r_vs_u32m8_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm4r_vs_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 20 } } */
+/* { dg-final { scan-assembler-times {vsm4r\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
+/* { dg-final { scan-assembler-times {vsm4r\.vs\s+v[0-9]+,\s*v[0-9]} 30 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c.c
new file mode 100644
index 00000000000..1cea2489708
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl);
+}
+
+vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3c\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c_overloaded.c
new file mode 100644
index 00000000000..01b4c0fbb95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3c_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3c\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me.c
new file mode 100644
index 00000000000..78fdf741643
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3me\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me_overloaded.c
new file mode 100644
index 00000000000..00c9cfe56ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvksh/vsm3me_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3me\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvkb.c b/gcc/testsuite/gcc.target/riscv/zvkb.c
new file mode 100644
index 00000000000..d5c28e79ef6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvkb.c
@@ -0,0 +1,13 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkb" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvkb" { target { rv32 } } } */
+
+#ifndef __riscv_zvkb
+#error "Feature macro not defined"
+#endif
+
+int
+foo (int a)
+{
+  return a;
+}