@@ -326,11 +326,21 @@ typedef struct xtensa_tlb {
unsigned nrefillentries;
} xtensa_tlb;
+typedef struct XtensaMMU {
+ xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
+ xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
+ unsigned autorefill_idx;
+} XtensaMMU;
+
typedef struct xtensa_mpu_entry {
uint32_t vaddr;
uint32_t attr;
} xtensa_mpu_entry;
+typedef struct XtensaMPU {
+ xtensa_mpu_entry fg[MAX_MPU_FOREGROUND_SEGMENTS];
+} XtensaMPU;
+
typedef struct XtensaGdbReg {
int targno;
unsigned flags;
@@ -526,10 +536,10 @@ struct CPUArchState {
uint32_t exclusive_val;
#ifndef CONFIG_USER_ONLY
- xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
- xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
- xtensa_mpu_entry mpu_fg[MAX_MPU_FOREGROUND_SEGMENTS];
- unsigned autorefill_idx;
+ union {
+ XtensaMMU mmu;
+ XtensaMPU mpu;
+ };
bool runstall;
AddressSpace *address_space_er;
MemoryRegion *system_er;
@@ -250,8 +250,8 @@ static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb,
assert(wi < tlb->nways && ei < tlb->way_size[wi]);
return dtlb ?
- env->dtlb[wi] + ei :
- env->itlb[wi] + ei;
+ env->mmu.dtlb[wi] + ei :
+ env->mmu.itlb[wi] + ei;
}
static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
@@ -411,11 +411,11 @@ void reset_mmu(CPUXtensaState *env)
env->sregs[RASID] = 0x04030201;
env->sregs[ITLBCFG] = 0;
env->sregs[DTLBCFG] = 0;
- env->autorefill_idx = 0;
- reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
- reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
- reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
- reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
+ env->mmu.autorefill_idx = 0;
+ reset_tlb_mmu_all_ways(env, &env->config->itlb, env->mmu.itlb);
+ reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->mmu.dtlb);
+ reset_tlb_mmu_ways56(env, &env->config->itlb, env->mmu.itlb);
+ reset_tlb_mmu_ways56(env, &env->config->dtlb, env->mmu.dtlb);
} else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) {
unsigned i;
@@ -430,8 +430,8 @@ void reset_mmu(CPUXtensaState *env)
}
} else {
env->sregs[CACHEATTR] = 0x22222222;
- reset_tlb_region_way0(env, env->itlb);
- reset_tlb_region_way0(env, env->dtlb);
+ reset_tlb_region_way0(env, env->mmu.itlb);
+ reset_tlb_region_way0(env, env->mmu.dtlb);
}
}
@@ -462,7 +462,7 @@ static int xtensa_tlb_lookup(const CPUXtensaState *env,
const xtensa_tlb *tlb = dtlb ?
&env->config->dtlb : &env->config->itlb;
const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
- env->dtlb : env->itlb;
+ env->mmu.dtlb : env->mmu.itlb;
int nhits = 0;
unsigned wi;
@@ -821,7 +821,7 @@ static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
if (update_tlb) {
- wi = ++env->autorefill_idx & 0x3;
+ wi = ++env->mmu.autorefill_idx & 0x3;
xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
env->sregs[EXCVADDR] = vaddr;
qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
@@ -957,8 +957,8 @@ void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v)
unsigned segment = p & XTENSA_MPU_SEGMENT_MASK;
if (segment < env->config->n_mpu_fg_segments) {
- env->mpu_fg[segment].vaddr = v & -env->config->mpu_align;
- env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK;
+ env->mpu.fg[segment].vaddr = v & -env->config->mpu_align;
+ env->mpu.fg[segment].attr = p & XTENSA_MPU_ATTR_MASK;
env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v);
tlb_flush(env_cpu(env));
}
@@ -969,7 +969,7 @@ uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s)
unsigned segment = s & XTENSA_MPU_SEGMENT_MASK;
if (segment < env->config->n_mpu_fg_segments) {
- return env->mpu_fg[segment].vaddr |
+ return env->mpu.fg[segment].vaddr |
extract32(env->sregs[MPUENB], segment, 1);
} else {
return 0;
@@ -981,7 +981,7 @@ uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s)
unsigned segment = s & XTENSA_MPU_SEGMENT_MASK;
if (segment < env->config->n_mpu_fg_segments) {
- return env->mpu_fg[segment].attr;
+ return env->mpu.fg[segment].attr;
} else {
return 0;
}
@@ -993,13 +993,13 @@ uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v)
unsigned segment = XTENSA_MPU_PROBE_B;
unsigned bg_segment;
- nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments,
+ nhits = xtensa_mpu_lookup(env->mpu.fg, env->config->n_mpu_fg_segments,
v, &segment);
if (nhits > 1) {
HELPER(exception_cause_vaddr)(env, env->pc,
LOAD_STORE_TLB_MULTI_HIT_CAUSE, v);
} else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) {
- return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V;
+ return env->mpu.fg[segment].attr | segment | XTENSA_MPU_PROBE_V;
} else {
xtensa_mpu_lookup(env->config->mpu_bg,
env->config->n_mpu_bg_segments,
@@ -1017,14 +1017,14 @@ static int get_physical_addr_mpu(CPUXtensaState *env,
unsigned segment;
uint32_t attr;
- nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments,
+ nhits = xtensa_mpu_lookup(env->mpu.fg, env->config->n_mpu_fg_segments,
vaddr, &segment);
if (nhits > 1) {
return is_write < 2 ?
LOAD_STORE_TLB_MULTI_HIT_CAUSE :
INST_TLB_MULTI_HIT_CAUSE;
} else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) {
- attr = env->mpu_fg[segment].attr;
+ attr = env->mpu.fg[segment].attr;
} else {
xtensa_mpu_lookup(env->config->mpu_bg,
env->config->n_mpu_bg_segments,
@@ -1205,7 +1205,7 @@ void dump_mmu(CPUXtensaState *env)
dump_tlb(env, true);
} else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) {
qemu_printf("Foreground map:\n");
- dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments);
+ dump_mpu(env, env->mpu.fg, env->config->n_mpu_fg_segments);
qemu_printf("\nBackground map:\n");
dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments);
} else {
Make separation of alternative xtensa memory management options state explicit. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> --- target/xtensa/cpu.h | 18 +++++++++++++---- target/xtensa/mmu_helper.c | 40 +++++++++++++++++++------------------- 2 files changed, 34 insertions(+), 24 deletions(-)