diff mbox series

[v1] RISC-V: Add xfail test case for highpart overlap floating-point widen insn

Message ID 20240422082707.1592898-1-pan2.li@intel.com
State New
Headers show
Series [v1] RISC-V: Add xfail test case for highpart overlap floating-point widen insn | expand

Commit Message

Li, Pan2 April 22, 2024, 8:27 a.m. UTC
From: Pan Li <pan2.li@intel.com>

We reverted below patch for register group overlap, add the related
insn test and mark it as xfail.  And we will remove the xfail
after we support the register overlap in GCC-15.

8614cbb2534 RISC-V: Support highpart overlap for floating-point widen instructions

The below test suites are passed.
* The rv64gcv fully regression test.

gcc/testsuite/ChangeLog:

	* gcc.target/riscv/rvv/base/pr112431-10.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-11.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-12.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-13.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-14.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-15.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-7.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-8.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-9.c: New test.

Signed-off-by: Pan Li <pan2.li@intel.com>
---
 .../gcc.target/riscv/rvv/base/pr112431-10.c   | 104 ++++++++++
 .../gcc.target/riscv/rvv/base/pr112431-11.c   |  68 +++++++
 .../gcc.target/riscv/rvv/base/pr112431-12.c   |  51 +++++
 .../gcc.target/riscv/rvv/base/pr112431-13.c   | 188 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/pr112431-14.c   | 119 +++++++++++
 .../gcc.target/riscv/rvv/base/pr112431-15.c   |  86 ++++++++
 .../gcc.target/riscv/rvv/base/pr112431-7.c    | 104 ++++++++++
 .../gcc.target/riscv/rvv/base/pr112431-8.c    |  68 +++++++
 .../gcc.target/riscv/rvv/base/pr112431-9.c    |  51 +++++
 9 files changed, 839 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c

Comments

钟居哲 April 22, 2024, 8:28 a.m. UTC | #1
lgtm



juzhe.zhong@rivai.ai
 
From: pan2.li
Date: 2024-04-22 16:27
To: gcc-patches
CC: juzhe.zhong; kito.cheng; rdapp.gcc; Pan Li
Subject: [PATCH v1] RISC-V: Add xfail test case for highpart overlap floating-point widen insn
From: Pan Li <pan2.li@intel.com>
 
We reverted below patch for register group overlap, add the related
insn test and mark it as xfail.  And we will remove the xfail
after we support the register overlap in GCC-15.
 
8614cbb2534 RISC-V: Support highpart overlap for floating-point widen instructions
 
The below test suites are passed.
* The rv64gcv fully regression test.
 
gcc/testsuite/ChangeLog:
 
* gcc.target/riscv/rvv/base/pr112431-10.c: New test.
* gcc.target/riscv/rvv/base/pr112431-11.c: New test.
* gcc.target/riscv/rvv/base/pr112431-12.c: New test.
* gcc.target/riscv/rvv/base/pr112431-13.c: New test.
* gcc.target/riscv/rvv/base/pr112431-14.c: New test.
* gcc.target/riscv/rvv/base/pr112431-15.c: New test.
* gcc.target/riscv/rvv/base/pr112431-7.c: New test.
* gcc.target/riscv/rvv/base/pr112431-8.c: New test.
* gcc.target/riscv/rvv/base/pr112431-9.c: New test.
 
Signed-off-by: Pan Li <pan2.li@intel.com>
---
.../gcc.target/riscv/rvv/base/pr112431-10.c   | 104 ++++++++++
.../gcc.target/riscv/rvv/base/pr112431-11.c   |  68 +++++++
.../gcc.target/riscv/rvv/base/pr112431-12.c   |  51 +++++
.../gcc.target/riscv/rvv/base/pr112431-13.c   | 188 ++++++++++++++++++
.../gcc.target/riscv/rvv/base/pr112431-14.c   | 119 +++++++++++
.../gcc.target/riscv/rvv/base/pr112431-15.c   |  86 ++++++++
.../gcc.target/riscv/rvv/base/pr112431-7.c    | 104 ++++++++++
.../gcc.target/riscv/rvv/base/pr112431-8.c    |  68 +++++++
.../gcc.target/riscv/rvv/base/pr112431-9.c    |  51 +++++
9 files changed, 839 insertions(+)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
 
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
new file mode 100644
index 00000000000..5d3f2fbe46d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7, double sum8, double sum9,
+   double sum10, double sum11, double sum12, double sum13, double sum14,
+   double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m1_t v0 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v1 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v2 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v3 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v4 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v5 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v6 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v7 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v8 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v9 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v10 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v11 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v12 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v13 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v14 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v15 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vfloat64m2_t vw0 = __riscv_vfwcvt_f_x_v_f64m2 (v0, vl);
+      vfloat64m2_t vw1 = __riscv_vfwcvt_f_x_v_f64m2 (v1, vl);
+      vfloat64m2_t vw2 = __riscv_vfwcvt_f_x_v_f64m2 (v2, vl);
+      vfloat64m2_t vw3 = __riscv_vfwcvt_f_x_v_f64m2 (v3, vl);
+      vfloat64m2_t vw4 = __riscv_vfwcvt_f_x_v_f64m2 (v4, vl);
+      vfloat64m2_t vw5 = __riscv_vfwcvt_f_x_v_f64m2 (v5, vl);
+      vfloat64m2_t vw6 = __riscv_vfwcvt_f_x_v_f64m2 (v6, vl);
+      vfloat64m2_t vw7 = __riscv_vfwcvt_f_x_v_f64m2 (v7, vl);
+      vfloat64m2_t vw8 = __riscv_vfwcvt_f_x_v_f64m2 (v8, vl);
+      vfloat64m2_t vw9 = __riscv_vfwcvt_f_x_v_f64m2 (v9, vl);
+      vfloat64m2_t vw10 = __riscv_vfwcvt_f_x_v_f64m2 (v10, vl);
+      vfloat64m2_t vw11 = __riscv_vfwcvt_f_x_v_f64m2 (v11, vl);
+      vfloat64m2_t vw12 = __riscv_vfwcvt_f_x_v_f64m2 (v12, vl);
+      vfloat64m2_t vw13 = __riscv_vfwcvt_f_x_v_f64m2 (v13, vl);
+      vfloat64m2_t vw14 = __riscv_vfwcvt_f_x_v_f64m2 (v14, vl);
+      vfloat64m2_t vw15 = __riscv_vfwcvt_f_x_v_f64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+      double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+      double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+      double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+      double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+      double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+      double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+      double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+      double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+        sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
new file mode 100644
index 00000000000..6a2301b523f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m2_t v0 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v1 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v2 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v3 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v4 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v5 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v6 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v7 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m4_t vw0 = __riscv_vfwcvt_f_x_v_f64m4 (v0, vl);
+      vfloat64m4_t vw1 = __riscv_vfwcvt_f_x_v_f64m4 (v1, vl);
+      vfloat64m4_t vw2 = __riscv_vfwcvt_f_x_v_f64m4 (v2, vl);
+      vfloat64m4_t vw3 = __riscv_vfwcvt_f_x_v_f64m4 (v3, vl);
+      vfloat64m4_t vw4 = __riscv_vfwcvt_f_x_v_f64m4 (v4, vl);
+      vfloat64m4_t vw5 = __riscv_vfwcvt_f_x_v_f64m4 (v5, vl);
+      vfloat64m4_t vw6 = __riscv_vfwcvt_f_x_v_f64m4 (v6, vl);
+      vfloat64m4_t vw7 = __riscv_vfwcvt_f_x_v_f64m4 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
new file mode 100644
index 00000000000..0f3eb4d58de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m4_t v0 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v1 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v2 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v3 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m8_t vw0 = __riscv_vfwcvt_f_x_v_f64m8 (v0, vl);
+      vfloat64m8_t vw1 = __riscv_vfwcvt_f_x_v_f64m8 (v1, vl);
+      vfloat64m8_t vw2 = __riscv_vfwcvt_f_x_v_f64m8 (v2, vl);
+      vfloat64m8_t vw3 = __riscv_vfwcvt_f_x_v_f64m8 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
new file mode 100644
index 00000000000..71786995c56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
@@ -0,0 +1,188 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7, double sum8, double sum9,
+   double sum10, double sum11, double sum12, double sum13, double sum14,
+   double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+      vint64m2_t vw8 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v8, vl);
+      vint64m2_t vw9 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v9, vl);
+      vint64m2_t vw10 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v10, vl);
+      vint64m2_t vw11 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v11, vl);
+      vint64m2_t vw12 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v12, vl);
+      vint64m2_t vw13 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v13, vl);
+      vint64m2_t vw14 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v14, vl);
+      vint64m2_t vw15 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+      double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+      double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+      double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+      double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+      double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+      double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+      double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+      double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+        sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+      vint64m2_t vw8 = __riscv_vfwcvt_x_f_v_i64m2 (v8, vl);
+      vint64m2_t vw9 = __riscv_vfwcvt_x_f_v_i64m2 (v9, vl);
+      vint64m2_t vw10 = __riscv_vfwcvt_x_f_v_i64m2 (v10, vl);
+      vint64m2_t vw11 = __riscv_vfwcvt_x_f_v_i64m2 (v11, vl);
+      vint64m2_t vw12 = __riscv_vfwcvt_x_f_v_i64m2 (v12, vl);
+      vint64m2_t vw13 = __riscv_vfwcvt_x_f_v_i64m2 (v13, vl);
+      vint64m2_t vw14 = __riscv_vfwcvt_x_f_v_i64m2 (v14, vl);
+      vint64m2_t vw15 = __riscv_vfwcvt_x_f_v_i64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+      double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+      double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+      double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+      double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+      double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+      double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+      double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+      double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+        sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
new file mode 100644
index 00000000000..535ea7ce34b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
new file mode 100644
index 00000000000..3d46e4a829a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
new file mode 100644
index 00000000000..59cbd7ff4be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7, double sum8, double sum9,
+   double sum10, double sum11, double sum12, double sum13, double sum14,
+   double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vfloat64m2_t vw0 = __riscv_vfwcvt_f_f_v_f64m2 (v0, vl);
+      vfloat64m2_t vw1 = __riscv_vfwcvt_f_f_v_f64m2 (v1, vl);
+      vfloat64m2_t vw2 = __riscv_vfwcvt_f_f_v_f64m2 (v2, vl);
+      vfloat64m2_t vw3 = __riscv_vfwcvt_f_f_v_f64m2 (v3, vl);
+      vfloat64m2_t vw4 = __riscv_vfwcvt_f_f_v_f64m2 (v4, vl);
+      vfloat64m2_t vw5 = __riscv_vfwcvt_f_f_v_f64m2 (v5, vl);
+      vfloat64m2_t vw6 = __riscv_vfwcvt_f_f_v_f64m2 (v6, vl);
+      vfloat64m2_t vw7 = __riscv_vfwcvt_f_f_v_f64m2 (v7, vl);
+      vfloat64m2_t vw8 = __riscv_vfwcvt_f_f_v_f64m2 (v8, vl);
+      vfloat64m2_t vw9 = __riscv_vfwcvt_f_f_v_f64m2 (v9, vl);
+      vfloat64m2_t vw10 = __riscv_vfwcvt_f_f_v_f64m2 (v10, vl);
+      vfloat64m2_t vw11 = __riscv_vfwcvt_f_f_v_f64m2 (v11, vl);
+      vfloat64m2_t vw12 = __riscv_vfwcvt_f_f_v_f64m2 (v12, vl);
+      vfloat64m2_t vw13 = __riscv_vfwcvt_f_f_v_f64m2 (v13, vl);
+      vfloat64m2_t vw14 = __riscv_vfwcvt_f_f_v_f64m2 (v14, vl);
+      vfloat64m2_t vw15 = __riscv_vfwcvt_f_f_v_f64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+      double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+      double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+      double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+      double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+      double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+      double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+      double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+      double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+        sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
new file mode 100644
index 00000000000..3a8ca02bd21
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+   double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m4_t vw0 = __riscv_vfwcvt_f_f_v_f64m4 (v0, vl);
+      vfloat64m4_t vw1 = __riscv_vfwcvt_f_f_v_f64m4 (v1, vl);
+      vfloat64m4_t vw2 = __riscv_vfwcvt_f_f_v_f64m4 (v2, vl);
+      vfloat64m4_t vw3 = __riscv_vfwcvt_f_f_v_f64m4 (v3, vl);
+      vfloat64m4_t vw4 = __riscv_vfwcvt_f_f_v_f64m4 (v4, vl);
+      vfloat64m4_t vw5 = __riscv_vfwcvt_f_f_v_f64m4 (v5, vl);
+      vfloat64m4_t vw6 = __riscv_vfwcvt_f_f_v_f64m4 (v6, vl);
+      vfloat64m4_t vw7 = __riscv_vfwcvt_f_f_v_f64m4 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
new file mode 100644
index 00000000000..88ab1d9da5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m8_t vw0 = __riscv_vfwcvt_f_f_v_f64m8 (v0, vl);
+      vfloat64m8_t vw1 = __riscv_vfwcvt_f_f_v_f64m8 (v1, vl);
+      vfloat64m8_t vw2 = __riscv_vfwcvt_f_f_v_f64m8 (v2, vl);
+      vfloat64m8_t vw3 = __riscv_vfwcvt_f_f_v_f64m8 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff mbox series

Patch

diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
new file mode 100644
index 00000000000..5d3f2fbe46d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
@@ -0,0 +1,104 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7, double sum8, double sum9,
+	  double sum10, double sum11, double sum12, double sum13, double sum14,
+	  double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+	 + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m1_t v0 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v1 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v2 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v3 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v4 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v5 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v6 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v7 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v8 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v9 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v10 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v11 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v12 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v13 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v14 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      vint32m1_t v15 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vfloat64m2_t vw0 = __riscv_vfwcvt_f_x_v_f64m2 (v0, vl);
+      vfloat64m2_t vw1 = __riscv_vfwcvt_f_x_v_f64m2 (v1, vl);
+      vfloat64m2_t vw2 = __riscv_vfwcvt_f_x_v_f64m2 (v2, vl);
+      vfloat64m2_t vw3 = __riscv_vfwcvt_f_x_v_f64m2 (v3, vl);
+      vfloat64m2_t vw4 = __riscv_vfwcvt_f_x_v_f64m2 (v4, vl);
+      vfloat64m2_t vw5 = __riscv_vfwcvt_f_x_v_f64m2 (v5, vl);
+      vfloat64m2_t vw6 = __riscv_vfwcvt_f_x_v_f64m2 (v6, vl);
+      vfloat64m2_t vw7 = __riscv_vfwcvt_f_x_v_f64m2 (v7, vl);
+      vfloat64m2_t vw8 = __riscv_vfwcvt_f_x_v_f64m2 (v8, vl);
+      vfloat64m2_t vw9 = __riscv_vfwcvt_f_x_v_f64m2 (v9, vl);
+      vfloat64m2_t vw10 = __riscv_vfwcvt_f_x_v_f64m2 (v10, vl);
+      vfloat64m2_t vw11 = __riscv_vfwcvt_f_x_v_f64m2 (v11, vl);
+      vfloat64m2_t vw12 = __riscv_vfwcvt_f_x_v_f64m2 (v12, vl);
+      vfloat64m2_t vw13 = __riscv_vfwcvt_f_x_v_f64m2 (v13, vl);
+      vfloat64m2_t vw14 = __riscv_vfwcvt_f_x_v_f64m2 (v14, vl);
+      vfloat64m2_t vw15 = __riscv_vfwcvt_f_x_v_f64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+      double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+      double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+      double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+      double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+      double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+      double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+      double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+      double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+		       sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
new file mode 100644
index 00000000000..6a2301b523f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
@@ -0,0 +1,68 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m2_t v0 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v1 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v2 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v3 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v4 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v5 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v6 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+      vint32m2_t v7 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m4_t vw0 = __riscv_vfwcvt_f_x_v_f64m4 (v0, vl);
+      vfloat64m4_t vw1 = __riscv_vfwcvt_f_x_v_f64m4 (v1, vl);
+      vfloat64m4_t vw2 = __riscv_vfwcvt_f_x_v_f64m4 (v2, vl);
+      vfloat64m4_t vw3 = __riscv_vfwcvt_f_x_v_f64m4 (v3, vl);
+      vfloat64m4_t vw4 = __riscv_vfwcvt_f_x_v_f64m4 (v4, vl);
+      vfloat64m4_t vw5 = __riscv_vfwcvt_f_x_v_f64m4 (v5, vl);
+      vfloat64m4_t vw6 = __riscv_vfwcvt_f_x_v_f64m4 (v6, vl);
+      vfloat64m4_t vw7 = __riscv_vfwcvt_f_x_v_f64m4 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
new file mode 100644
index 00000000000..0f3eb4d58de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint32m4_t v0 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v1 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v2 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+      vint32m4_t v3 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m8_t vw0 = __riscv_vfwcvt_f_x_v_f64m8 (v0, vl);
+      vfloat64m8_t vw1 = __riscv_vfwcvt_f_x_v_f64m8 (v1, vl);
+      vfloat64m8_t vw2 = __riscv_vfwcvt_f_x_v_f64m8 (v2, vl);
+      vfloat64m8_t vw3 = __riscv_vfwcvt_f_x_v_f64m8 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
new file mode 100644
index 00000000000..71786995c56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
@@ -0,0 +1,188 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7, double sum8, double sum9,
+	  double sum10, double sum11, double sum12, double sum13, double sum14,
+	  double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+	 + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+      vint64m2_t vw8 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v8, vl);
+      vint64m2_t vw9 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v9, vl);
+      vint64m2_t vw10 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v10, vl);
+      vint64m2_t vw11 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v11, vl);
+      vint64m2_t vw12 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v12, vl);
+      vint64m2_t vw13 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v13, vl);
+      vint64m2_t vw14 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v14, vl);
+      vint64m2_t vw15 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+      double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+      double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+      double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+      double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+      double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+      double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+      double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+      double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+		       sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+      vint64m2_t vw8 = __riscv_vfwcvt_x_f_v_i64m2 (v8, vl);
+      vint64m2_t vw9 = __riscv_vfwcvt_x_f_v_i64m2 (v9, vl);
+      vint64m2_t vw10 = __riscv_vfwcvt_x_f_v_i64m2 (v10, vl);
+      vint64m2_t vw11 = __riscv_vfwcvt_x_f_v_i64m2 (v11, vl);
+      vint64m2_t vw12 = __riscv_vfwcvt_x_f_v_i64m2 (v12, vl);
+      vint64m2_t vw13 = __riscv_vfwcvt_x_f_v_i64m2 (v13, vl);
+      vint64m2_t vw14 = __riscv_vfwcvt_x_f_v_i64m2 (v14, vl);
+      vint64m2_t vw15 = __riscv_vfwcvt_x_f_v_i64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+      double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+      double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+      double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+      double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+      double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+      double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+      double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+      double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+		       sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
new file mode 100644
index 00000000000..535ea7ce34b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
@@ -0,0 +1,119 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+      vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+      vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+      vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+      vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+      double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+      double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+      double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+      double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
new file mode 100644
index 00000000000..3d46e4a829a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
@@ -0,0 +1,86 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+      vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+      vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+      vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+      double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+      double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+      double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
new file mode 100644
index 00000000000..59cbd7ff4be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
@@ -0,0 +1,104 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7, double sum8, double sum9,
+	  double sum10, double sum11, double sum12, double sum13, double sum14,
+	  double sum15)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+	 + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vfloat64m2_t vw0 = __riscv_vfwcvt_f_f_v_f64m2 (v0, vl);
+      vfloat64m2_t vw1 = __riscv_vfwcvt_f_f_v_f64m2 (v1, vl);
+      vfloat64m2_t vw2 = __riscv_vfwcvt_f_f_v_f64m2 (v2, vl);
+      vfloat64m2_t vw3 = __riscv_vfwcvt_f_f_v_f64m2 (v3, vl);
+      vfloat64m2_t vw4 = __riscv_vfwcvt_f_f_v_f64m2 (v4, vl);
+      vfloat64m2_t vw5 = __riscv_vfwcvt_f_f_v_f64m2 (v5, vl);
+      vfloat64m2_t vw6 = __riscv_vfwcvt_f_f_v_f64m2 (v6, vl);
+      vfloat64m2_t vw7 = __riscv_vfwcvt_f_f_v_f64m2 (v7, vl);
+      vfloat64m2_t vw8 = __riscv_vfwcvt_f_f_v_f64m2 (v8, vl);
+      vfloat64m2_t vw9 = __riscv_vfwcvt_f_f_v_f64m2 (v9, vl);
+      vfloat64m2_t vw10 = __riscv_vfwcvt_f_f_v_f64m2 (v10, vl);
+      vfloat64m2_t vw11 = __riscv_vfwcvt_f_f_v_f64m2 (v11, vl);
+      vfloat64m2_t vw12 = __riscv_vfwcvt_f_f_v_f64m2 (v12, vl);
+      vfloat64m2_t vw13 = __riscv_vfwcvt_f_f_v_f64m2 (v13, vl);
+      vfloat64m2_t vw14 = __riscv_vfwcvt_f_f_v_f64m2 (v14, vl);
+      vfloat64m2_t vw15 = __riscv_vfwcvt_f_f_v_f64m2 (v15, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+      double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+      double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+      double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+      double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+      double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+      double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+      double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+      double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+		       sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
new file mode 100644
index 00000000000..3a8ca02bd21
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
@@ -0,0 +1,68 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+	  double sum5, double sum6, double sum7)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m4_t vw0 = __riscv_vfwcvt_f_f_v_f64m4 (v0, vl);
+      vfloat64m4_t vw1 = __riscv_vfwcvt_f_f_v_f64m4 (v1, vl);
+      vfloat64m4_t vw2 = __riscv_vfwcvt_f_f_v_f64m4 (v2, vl);
+      vfloat64m4_t vw3 = __riscv_vfwcvt_f_f_v_f64m4 (v3, vl);
+      vfloat64m4_t vw4 = __riscv_vfwcvt_f_f_v_f64m4 (v4, vl);
+      vfloat64m4_t vw5 = __riscv_vfwcvt_f_f_v_f64m4 (v5, vl);
+      vfloat64m4_t vw6 = __riscv_vfwcvt_f_f_v_f64m4 (v6, vl);
+      vfloat64m4_t vw7 = __riscv_vfwcvt_f_f_v_f64m4 (v7, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+      double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+      double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+      double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+      double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
new file mode 100644
index 00000000000..88ab1d9da5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+  return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+  double sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+      vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+      it += vl;
+
+      asm volatile("nop" ::: "memory");
+      vfloat64m8_t vw0 = __riscv_vfwcvt_f_f_v_f64m8 (v0, vl);
+      vfloat64m8_t vw1 = __riscv_vfwcvt_f_f_v_f64m8 (v1, vl);
+      vfloat64m8_t vw2 = __riscv_vfwcvt_f_f_v_f64m8 (v2, vl);
+      vfloat64m8_t vw3 = __riscv_vfwcvt_f_f_v_f64m8 (v3, vl);
+
+      asm volatile("nop" ::: "memory");
+      double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+      double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+      double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+      double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+      sum += sumation (sum0, sum1, sum2, sum3);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} { xfail riscv*-*-* } } } */