@@ -935,81 +935,162 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb)
*/
static void
vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz,
- uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uintptr_t ra, bool is_load)
{
- uint32_t i, k, off, pos;
+ RVVContLdSt info;
+ target_ulong addr;
+ void *host;
+ int flags;
+ intptr_t reg_start, reg_last;
+ uint32_t idx_nf, off, evl;
uint32_t nf = vext_nf(desc);
uint32_t vlenb = riscv_cpu_cfg(env)->vlenb;
uint32_t max_elems = vlenb >> log2_esz;
+ uint32_t esz = 1 << log2_esz;
if (env->vstart >= ((vlenb * nf) >> log2_esz)) {
env->vstart = 0;
return;
}
- k = env->vstart / max_elems;
- off = env->vstart % max_elems;
+ vext_cont_ldst_elements(&info, base, env->vreg, env->vstart,
+ nf * max_elems, desc, log2_esz, true);
+ vext_cont_ldst_pages(env, &info, base, is_load, desc, esz, ra, true);
+ vext_cont_ldst_watchpoints(env, &info, env->vreg, base, esz, is_load, ra,
+ desc);
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+ /* At least one page includes MMIO. */
+ reg_start = info.reg_idx_first[0];
+ idx_nf = reg_start / max_elems;
+ off = reg_start % max_elems;
+ evl = (idx_nf + 1) * max_elems;
+
+ if (off) {
+ /*
+ * load/store rest of elements of current segment pointed by vstart
+ */
+ addr = base + (reg_start << log2_esz);
+ for (; reg_start < evl; reg_start++, addr += esz) {
+ ldst_tlb(env, adjust_addr(env, addr), reg_start << log2_esz,
+ vd, ra);
+ }
+ idx_nf++;
+ }
+
+ /* load/store elements for rest of segments */
+ evl = nf * max_elems;
+ addr = base + (reg_start << log2_esz);
+ for (; reg_start < evl; reg_start++, addr += esz) {
+ ldst_tlb(env, adjust_addr(env, addr), reg_start << log2_esz, vd,
+ ra);
+ }
+
+ env->vstart = 0;
+ return;
+ }
+
+ /* The entire operation is in RAM, on valid pages. */
+ reg_start = info.reg_idx_first[0];
+ reg_last = info.reg_idx_last[0] + 1;
+ host = info.page[0].host;
+ idx_nf = reg_start / max_elems;
+ off = reg_start % max_elems;
+ evl = (idx_nf + 1) * max_elems;
if (off) {
/* load/store rest of elements of current segment pointed by vstart */
- for (pos = off; pos < max_elems; pos++, env->vstart++) {
- target_ulong addr = base + ((pos + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr),
- (pos + k * max_elems) << log2_esz, vd, ra);
+ for (; reg_start < evl; reg_start++) {
+ ldst_host(vd, reg_start << log2_esz,
+ host + (reg_start << log2_esz));
}
- k++;
+ idx_nf++;
}
/* load/store elements for rest of segments */
- for (; k < nf; k++) {
- for (i = 0; i < max_elems; i++, env->vstart++) {
- target_ulong addr = base + ((i + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr),
- (i + k * max_elems) << log2_esz, vd, ra);
+ for (; reg_start < reg_last; reg_start++) {
+ ldst_host(vd, reg_start << log2_esz, host + (reg_start << log2_esz));
+ }
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ if (unlikely(info.mem_off_split >= 0)) {
+ reg_start = info.reg_idx_split;
+ addr = base + (reg_start << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), reg_start << log2_esz, vd, ra);
+ }
+
+ if (unlikely(info.mem_off_first[1] >= 0)) {
+ reg_start = info.reg_idx_first[1];
+ reg_last = info.reg_idx_last[1] + 1;
+ host = info.page[1].host;
+ idx_nf = reg_start / max_elems;
+ off = reg_start % max_elems;
+ evl = (idx_nf + 1) * max_elems;
+
+ if (off) {
+ /*
+ * load/store rest of elements of current segment pointed by vstart
+ */
+ for (; reg_start < evl; reg_start++) {
+ ldst_host(vd, reg_start << log2_esz,
+ host + (reg_start << log2_esz));
+ }
+ idx_nf++;
+ }
+
+ /* load/store elements for rest of segments */
+ for (; reg_start < reg_last; reg_start++) {
+ ldst_host(vd, reg_start << log2_esz,
+ host + (reg_start << log2_esz));
}
}
env->vstart = 0;
}
-#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb)
-GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb)
-GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb)
-GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb)
-GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb)
-GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb)
-GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb)
-GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb)
-GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb)
-GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb)
-GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb)
-GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb)
-GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb)
-GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb)
-GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb)
-GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb)
-
-#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb)
-GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb)
-GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb)
-GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb)
+#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), true); \
+}
+
+GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb, lde_d_host)
+
+#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), false); \
+}
+
+GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb, ste_b_host)
/*
* Vector Integer Arithmetic Instructions
The vector unit-stride whole register load/store instructions are similar to unmasked unit-stride load/store instructions that is suitable to be optimized by using a direct access to host ram fast path. Signed-off-by: Max Chou <max.chou@sifive.com> --- target/riscv/vector_helper.c | 185 +++++++++++++++++++++++++---------- 1 file changed, 133 insertions(+), 52 deletions(-)