new file mode 100644
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
new file mode 100644
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+ return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai> gcc/testsuite/ChangeLog: * g++.target/riscv/rvv/base/vmul_vv-1.C: New test. * g++.target/riscv/rvv/base/vmul_vv-2.C: New test. * g++.target/riscv/rvv/base/vmul_vv-3.C: New test. * g++.target/riscv/rvv/base/vmul_vv_mu-1.C: New test. * g++.target/riscv/rvv/base/vmul_vv_mu-2.C: New test. * g++.target/riscv/rvv/base/vmul_vv_mu-3.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tu-1.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tu-2.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tu-3.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tum-1.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tum-2.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tum-3.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tumu-1.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tumu-2.C: New test. * g++.target/riscv/rvv/base/vmul_vv_tumu-3.C: New test. --- .../g++.target/riscv/rvv/base/vmul_vv-1.C | 578 ++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vv-2.C | 578 ++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vv-3.C | 578 ++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vv_mu-1.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_mu-2.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_mu-3.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tu-1.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tu-2.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tu-3.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tum-1.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tum-2.C | 292 +++++++++ .../g++.target/riscv/rvv/base/vmul_vv_tum-3.C | 292 +++++++++ .../riscv/rvv/base/vmul_vv_tumu-1.C | 292 +++++++++ .../riscv/rvv/base/vmul_vv_tumu-2.C | 292 +++++++++ .../riscv/rvv/base/vmul_vv_tumu-3.C | 292 +++++++++ 15 files changed, 5238 insertions(+) create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-3.C