@@ -1033,6 +1033,8 @@ struct sysib_322 {
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define VADDR_PX 0xff000 /* page index bits */
+
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_RES0 0x800 /* bit must be zero */
@@ -1073,19 +1073,22 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
}
/* invalidate pte */
-void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
+void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t page = vaddr & TARGET_PAGE_MASK;
- uint64_t pte = 0;
+ uint64_t pte_addr, pte;
/* XXX broadcast to other CPUs */
- /* XXX Linux is nice enough to give us the exact pte address.
- According to spec we'd have to find it out ourselves */
- /* XXX Linux is fine with overwriting the pte, the spec requires
- us to only set the invalid bit */
- stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
+ /* Compute the page table entry address */
+ pte_addr = (pto & _SEGMENT_ENTRY_ORIGIN);
+ pte_addr += (vaddr & _VADDR_PX) >> 9;
+
+ /* Mark the page table entry as invalid */
+ pte = ldq_phys(cs->as, pte_addr);
+ pte |= _PAGE_INVALID;
+ stq_phys(cs->as, pte_addr, pte);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
@@ -143,8 +143,6 @@ static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
return 0;
}
-#define VADDR_PX 0xff000 /* Page index bits */
-
/* Decode segment table entry */
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t st_entry,
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> --- target/s390x/cpu.h | 2 ++ target/s390x/mem_helper.c | 17 ++++++++++------- target/s390x/mmu_helper.c | 2 -- 3 files changed, 12 insertions(+), 9 deletions(-)