@@ -581,7 +581,6 @@ target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
-bool riscv_cpu_two_stage_lookup(int mmu_idx);
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
@@ -51,6 +51,11 @@ static inline bool mmuidx_sum(int mmu_idx)
return (mmu_idx & 3) == MMUIdx_S_SUM;
}
+static inline bool mmuidx_2stage(int mmu_idx)
+{
+ return mmu_idx & MMU_2STAGE_BIT;
+}
+
/* share data between vector helpers and decode code */
FIELD(VDATA, VM, 0, 1)
FIELD(VDATA, LMUL, 1, 3)
@@ -591,11 +591,6 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
}
}
-bool riscv_cpu_two_stage_lookup(int mmu_idx)
-{
- return mmu_idx & MMU_2STAGE_BIT;
-}
-
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -779,7 +774,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
* MPRV does not affect the virtual-machine load/store
* instructions, HLV, HLVX, and HSV.
*/
- if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ if (mmuidx_2stage(mmu_idx)) {
mode = get_field(env->hstatus, HSTATUS_SPVP);
}
@@ -1175,8 +1170,7 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
}
env->badaddr = addr;
- env->two_stage_lookup = env->virt_enabled ||
- riscv_cpu_two_stage_lookup(mmu_idx);
+ env->two_stage_lookup = env->virt_enabled || mmuidx_2stage(mmu_idx);
env->two_stage_indirect_lookup = false;
cpu_loop_exit_restore(cs, retaddr);
}
@@ -1201,8 +1195,7 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
g_assert_not_reached();
}
env->badaddr = addr;
- env->two_stage_lookup = env->virt_enabled ||
- riscv_cpu_two_stage_lookup(mmu_idx);
+ env->two_stage_lookup = env->virt_enabled || mmuidx_2stage(mmu_idx);
env->two_stage_indirect_lookup = false;
cpu_loop_exit_restore(cs, retaddr);
}
@@ -1256,7 +1249,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
* MPRV does not affect the virtual-machine load/store
* instructions, HLV, HLVX, and HSV.
*/
- if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ if (mmuidx_2stage(mmu_idx)) {
mode = get_field(env->hstatus, HSTATUS_SPVP);
} else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
get_field(env->mstatus, MSTATUS_MPRV)) {
@@ -1268,7 +1261,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
pmu_tlb_fill_incr_ctr(cpu, access_type);
if (env->virt_enabled ||
- ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
+ ((mmuidx_2stage(mmu_idx) || two_stage_lookup) &&
access_type != MMU_INST_FETCH)) {
/* Two stage lookup */
ret = get_physical_address(env, &pa, &prot, address,
@@ -1366,8 +1359,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else {
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error,
- env->virt_enabled ||
- riscv_cpu_two_stage_lookup(mmu_idx),
+ env->virt_enabled || mmuidx_2stage(mmu_idx),
two_stage_indirect_error);
cpu_loop_exit_restore(cs, retaddr);
}