diff mbox series

[v6,6/7] target/riscv: rvv: Provide group continuous ld/st flow for unit-stride ld/st instructions

Message ID 20240918171412.150107-7-max.chou@sifive.com
State New
Headers show
Series [v6,1/7] target/riscv: Set vdata.vm field for vector load/store whole register instructions | expand

Commit Message

Max Chou Sept. 18, 2024, 5:14 p.m. UTC
The vector unmasked unit-stride and whole register load/store
instructions will load/store continuous memory. If the endian of both
the host and guest architecture are the same, then we can group the
element load/store to load/store more data at a time.

Signed-off-by: Max Chou <max.chou@sifive.com>
---
 target/riscv/vector_helper.c | 77 +++++++++++++++++++++++++++++-------
 1 file changed, 63 insertions(+), 14 deletions(-)

Comments

Daniel Henrique Barboza Oct. 29, 2024, 7:07 p.m. UTC | #1
On 9/18/24 2:14 PM, Max Chou wrote:
> The vector unmasked unit-stride and whole register load/store
> instructions will load/store continuous memory. If the endian of both
> the host and guest architecture are the same, then we can group the
> element load/store to load/store more data at a time.
> 
> Signed-off-by: Max Chou <max.chou@sifive.com>
> ---

Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>

>   target/riscv/vector_helper.c | 77 +++++++++++++++++++++++++++++-------
>   1 file changed, 63 insertions(+), 14 deletions(-)
> 
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index 59009a940ff..654d5e111f3 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -189,6 +189,45 @@ GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw)
>   GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
>   GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
>   
> +static inline QEMU_ALWAYS_INLINE void
> +vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
> +                       void *vd, uint32_t evl, target_ulong addr,
> +                       uint32_t reg_start, uintptr_t ra, uint32_t esz,
> +                       bool is_load)
> +{
> +    uint32_t i;
> +    for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) {
> +        ldst_tlb(env, adjust_addr(env, addr), i, vd, ra);
> +    }
> +}
> +
> +static inline QEMU_ALWAYS_INLINE void
> +vext_continus_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
> +                        void *vd, uint32_t evl, uint32_t reg_start, void *host,
> +                        uint32_t esz, bool is_load)
> +{
> +#if HOST_BIG_ENDIAN
> +    for (; reg_start < evl; reg_start++, host += esz) {
> +        ldst_host(vd, reg_start, host);
> +    }
> +#else
> +    if (esz == 1) {
> +        uint32_t byte_offset = reg_start * esz;
> +        uint32_t size = (evl - reg_start) * esz;
> +
> +        if (is_load) {
> +            memcpy(vd + byte_offset, host, size);
> +        } else {
> +            memcpy(host, vd + byte_offset, size);
> +        }
> +    } else {
> +        for (; reg_start < evl; reg_start++, host += esz) {
> +            ldst_host(vd, reg_start, host);
> +        }
> +    }
> +#endif
> +}
> +
>   static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
>                                      uint32_t desc, uint32_t nf,
>                                      uint32_t esz, uint32_t max_elems)
> @@ -297,24 +336,34 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
>                                  mmu_index, true, &host, ra);
>   
>       if (flags == 0) {
> -        for (i = env->vstart; i < evl; ++i) {
> -            k = 0;
> -            while (k < nf) {
> -                ldst_host(vd, i + k * max_elems, host);
> -                host += esz;
> -                k++;
> +        if (nf == 1) {
> +            vext_continus_ldst_host(env, ldst_host, vd, evl, env->vstart, host,
> +                                    esz, is_load);
> +        } else {
> +            for (i = env->vstart; i < evl; ++i) {
> +                k = 0;
> +                while (k < nf) {
> +                    ldst_host(vd, i + k * max_elems, host);
> +                    host += esz;
> +                    k++;
> +                }
>               }
>           }
>           env->vstart += elems;
>       } else {
> -        /* load bytes from guest memory */
> -        for (i = env->vstart; i < evl; env->vstart = ++i) {
> -            k = 0;
> -            while (k < nf) {
> -                ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems, vd,
> -                         ra);
> -                addr += esz;
> -                k++;
> +        if (nf == 1) {
> +            vext_continus_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
> +                                   ra, esz, is_load);
> +        } else {
> +            /* load bytes from guest memory */
> +            for (i = env->vstart; i < evl; env->vstart = ++i) {
> +                k = 0;
> +                while (k < nf) {
> +                    ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
> +                             vd, ra);
> +                    addr += esz;
> +                    k++;
> +                }
>               }
>           }
>       }
diff mbox series

Patch

diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 59009a940ff..654d5e111f3 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -189,6 +189,45 @@  GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw)
 GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
 GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
 
+static inline QEMU_ALWAYS_INLINE void
+vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
+                       void *vd, uint32_t evl, target_ulong addr,
+                       uint32_t reg_start, uintptr_t ra, uint32_t esz,
+                       bool is_load)
+{
+    uint32_t i;
+    for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) {
+        ldst_tlb(env, adjust_addr(env, addr), i, vd, ra);
+    }
+}
+
+static inline QEMU_ALWAYS_INLINE void
+vext_continus_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
+                        void *vd, uint32_t evl, uint32_t reg_start, void *host,
+                        uint32_t esz, bool is_load)
+{
+#if HOST_BIG_ENDIAN
+    for (; reg_start < evl; reg_start++, host += esz) {
+        ldst_host(vd, reg_start, host);
+    }
+#else
+    if (esz == 1) {
+        uint32_t byte_offset = reg_start * esz;
+        uint32_t size = (evl - reg_start) * esz;
+
+        if (is_load) {
+            memcpy(vd + byte_offset, host, size);
+        } else {
+            memcpy(host, vd + byte_offset, size);
+        }
+    } else {
+        for (; reg_start < evl; reg_start++, host += esz) {
+            ldst_host(vd, reg_start, host);
+        }
+    }
+#endif
+}
+
 static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
                                    uint32_t desc, uint32_t nf,
                                    uint32_t esz, uint32_t max_elems)
@@ -297,24 +336,34 @@  vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
                                mmu_index, true, &host, ra);
 
     if (flags == 0) {
-        for (i = env->vstart; i < evl; ++i) {
-            k = 0;
-            while (k < nf) {
-                ldst_host(vd, i + k * max_elems, host);
-                host += esz;
-                k++;
+        if (nf == 1) {
+            vext_continus_ldst_host(env, ldst_host, vd, evl, env->vstart, host,
+                                    esz, is_load);
+        } else {
+            for (i = env->vstart; i < evl; ++i) {
+                k = 0;
+                while (k < nf) {
+                    ldst_host(vd, i + k * max_elems, host);
+                    host += esz;
+                    k++;
+                }
             }
         }
         env->vstart += elems;
     } else {
-        /* load bytes from guest memory */
-        for (i = env->vstart; i < evl; env->vstart = ++i) {
-            k = 0;
-            while (k < nf) {
-                ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems, vd,
-                         ra);
-                addr += esz;
-                k++;
+        if (nf == 1) {
+            vext_continus_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
+                                   ra, esz, is_load);
+        } else {
+            /* load bytes from guest memory */
+            for (i = env->vstart; i < evl; env->vstart = ++i) {
+                k = 0;
+                while (k < nf) {
+                    ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+                             vd, ra);
+                    addr += esz;
+                    k++;
+                }
             }
         }
     }