diff mbox series

[v1,2/2] s390x/tcg: low-address protection support

Message ID 20171016202358.3633-3-david@redhat.com
State New
Headers show
Series s390x/tcg: LAP support using immediate TLB invalidation | expand

Commit Message

David Hildenbrand Oct. 16, 2017, 8:23 p.m. UTC
This is a neat way to implement low address protection, whereby
only the first 512 bytes of the first two pages (each 4096 bytes) of
every address space are protected.

Store a tec of 0 for the access exception, this is what is defined by
Enhanced Suppression on Protection in case of a low address protection
(Bit 61 set to 0, rest undefined).

We have to make sure to to pass the access address, not the masked page
address into mmu_translate*().

Drop the check from testblock. So we can properly test this via
kvm-unit-tests.

This will check every access going through one of the MMUs.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 target/s390x/excp_helper.c |  3 +-
 target/s390x/mem_helper.c  |  8 ----
 target/s390x/mmu_helper.c  | 94 +++++++++++++++++++++++++++++-----------------
 3 files changed, 60 insertions(+), 45 deletions(-)

Comments

Thomas Huth Oct. 18, 2017, 6:21 p.m. UTC | #1
On 16.10.2017 22:23, David Hildenbrand wrote:
> This is a neat way to implement low address protection, whereby
> only the first 512 bytes of the first two pages (each 4096 bytes) of
> every address space are protected.
> 
> Store a tec of 0 for the access exception, this is what is defined by
> Enhanced Suppression on Protection in case of a low address protection
> (Bit 61 set to 0, rest undefined).
> 
> We have to make sure to to pass the access address, not the masked page
> address into mmu_translate*().
> 
> Drop the check from testblock. So we can properly test this via
> kvm-unit-tests.
> 
> This will check every access going through one of the MMUs.
> 
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  target/s390x/excp_helper.c |  3 +-
>  target/s390x/mem_helper.c  |  8 ----
>  target/s390x/mmu_helper.c  | 94 +++++++++++++++++++++++++++++-----------------
>  3 files changed, 60 insertions(+), 45 deletions(-)
[...]
> diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
> index 9daa0fd8e2..9806685bee 100644
> --- a/target/s390x/mmu_helper.c
> +++ b/target/s390x/mmu_helper.c
> @@ -106,6 +106,35 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
>      trigger_access_exception(env, type, ilen, tec);
>  }
>  
> +/* check whether the address would be proteted by Low-Address Protection */
> +static bool is_low_address(uint64_t addr)
> +{
> +    return addr < 512 || (addr >= 4096 && addr <= 4607);
> +}

Just cosmetic, but I'd rather either use "<=" or "<" both times, so:

   return addr <= 511 || (addr >= 4096 && addr <= 4607);

or:

   return addr < 512 || (addr >= 4096 && addr < 4608);

> +/* check whether Low-Address Protection is enabled for mmu_translate() */
> +static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc)
> +{
> +    if (!(env->cregs[0] & CR0_LOWPROT)) {
> +        return false;
> +    }
> +    if (!(env->psw.mask & PSW_MASK_DAT)) {
> +        return true;
> +    }
> +
> +    /* Check the private-space control bit */
> +    switch (asc) {
> +    case PSW_ASC_PRIMARY:
> +        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
> +    case PSW_ASC_SECONDARY:
> +        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
> +    case PSW_ASC_HOME:
> +        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
> +    default:
> +        g_assert_not_reached();

Well, this is certainly reachable - if the guest was running in access
register mode. So it might be nicer to the user if you keep the
error_report() here?

> +    }
> +}
> +
>  /**
>   * Translate real address to absolute (= physical)
>   * address by taking care of the prefix mapping.
> @@ -323,6 +352,24 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
>      }
>  
>      *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
> +    if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) {
> +        /*
> +         * If any part of this page is currently protected, make sure the
> +         * TLB entry will not be reused.
> +         *
> +         * As the protected range is always the first 512 bytes of the
> +         * two first pages, we are able to catch all writes to these areas
> +         * just by looking at the start address (triggering the tlb miss).
> +         */
> +        *flags |= PAGE_WRITE_INV;
> +        if (is_low_address(vaddr) && rw == MMU_DATA_STORE) {
> +            if (exc) {
> +                trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
> +            }
> +            return -EACCES;
> +        }
> +    }
> +
>      vaddr &= TARGET_PAGE_MASK;
>  
>      if (!(env->psw.mask & PSW_MASK_DAT)) {
> @@ -392,50 +439,17 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
>  }
>  
>  /**
> - * lowprot_enabled: Check whether low-address protection is enabled
> - */
> -static bool lowprot_enabled(const CPUS390XState *env)
> -{
> -    if (!(env->cregs[0] & CR0_LOWPROT)) {
> -        return false;
> -    }
> -    if (!(env->psw.mask & PSW_MASK_DAT)) {
> -        return true;
> -    }
> -
> -    /* Check the private-space control bit */
> -    switch (env->psw.mask & PSW_MASK_ASC) {
> -    case PSW_ASC_PRIMARY:
> -        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
> -    case PSW_ASC_SECONDARY:
> -        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
> -    case PSW_ASC_HOME:
> -        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
> -    default:
> -        /* We don't support access register mode */
> -        error_report("unsupported addressing mode");
> -        exit(1);
> -    }
> -}

Apart from the nits, the patch looks fine to me.

 Thomas
David Hildenbrand Oct. 18, 2017, 7:34 p.m. UTC | #2
On 18.10.2017 20:21, Thomas Huth wrote:
> On 16.10.2017 22:23, David Hildenbrand wrote:
>> This is a neat way to implement low address protection, whereby
>> only the first 512 bytes of the first two pages (each 4096 bytes) of
>> every address space are protected.
>>
>> Store a tec of 0 for the access exception, this is what is defined by
>> Enhanced Suppression on Protection in case of a low address protection
>> (Bit 61 set to 0, rest undefined).
>>
>> We have to make sure to to pass the access address, not the masked page
>> address into mmu_translate*().
>>
>> Drop the check from testblock. So we can properly test this via
>> kvm-unit-tests.
>>
>> This will check every access going through one of the MMUs.
>>
>> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>  target/s390x/excp_helper.c |  3 +-
>>  target/s390x/mem_helper.c  |  8 ----
>>  target/s390x/mmu_helper.c  | 94 +++++++++++++++++++++++++++++-----------------
>>  3 files changed, 60 insertions(+), 45 deletions(-)
> [...]
>> diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
>> index 9daa0fd8e2..9806685bee 100644
>> --- a/target/s390x/mmu_helper.c
>> +++ b/target/s390x/mmu_helper.c
>> @@ -106,6 +106,35 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
>>      trigger_access_exception(env, type, ilen, tec);
>>  }
>>  
>> +/* check whether the address would be proteted by Low-Address Protection */
>> +static bool is_low_address(uint64_t addr)
>> +{
>> +    return addr < 512 || (addr >= 4096 && addr <= 4607);
>> +}
> 
> Just cosmetic, but I'd rather either use "<=" or "<" both times, so:
> 
>    return addr <= 511 || (addr >= 4096 && addr <= 4607);
> 

That one then, as it matches the wording in the PoP.

>> +    /* Check the private-space control bit */
>> +    switch (asc) {
>> +    case PSW_ASC_PRIMARY:
>> +        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
>> +    case PSW_ASC_SECONDARY:
>> +        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
>> +    case PSW_ASC_HOME:
>> +        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
>> +    default:
>> +        g_assert_not_reached();
> 
> Well, this is certainly reachable - if the guest was running in access
> register mode. So it might be nicer to the user if you keep the
> error_report() here?

Right, this would be reachable via translate_pages(), but not via the
tlb. Although unlikely to hit it at that point, we can keep the error.

Conny, can you fix these two up or do you want me to resend?
Cornelia Huck Oct. 19, 2017, 8:56 a.m. UTC | #3
On Wed, 18 Oct 2017 21:34:07 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 18.10.2017 20:21, Thomas Huth wrote:
> > On 16.10.2017 22:23, David Hildenbrand wrote:  
> >> This is a neat way to implement low address protection, whereby
> >> only the first 512 bytes of the first two pages (each 4096 bytes) of
> >> every address space are protected.
> >>
> >> Store a tec of 0 for the access exception, this is what is defined by
> >> Enhanced Suppression on Protection in case of a low address protection
> >> (Bit 61 set to 0, rest undefined).
> >>
> >> We have to make sure to to pass the access address, not the masked page
> >> address into mmu_translate*().
> >>
> >> Drop the check from testblock. So we can properly test this via
> >> kvm-unit-tests.
> >>
> >> This will check every access going through one of the MMUs.
> >>
> >> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> >> Signed-off-by: David Hildenbrand <david@redhat.com>
> >> ---
> >>  target/s390x/excp_helper.c |  3 +-
> >>  target/s390x/mem_helper.c  |  8 ----
> >>  target/s390x/mmu_helper.c  | 94 +++++++++++++++++++++++++++++-----------------
> >>  3 files changed, 60 insertions(+), 45 deletions(-)  
> > [...]  
> >> diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
> >> index 9daa0fd8e2..9806685bee 100644
> >> --- a/target/s390x/mmu_helper.c
> >> +++ b/target/s390x/mmu_helper.c
> >> @@ -106,6 +106,35 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
> >>      trigger_access_exception(env, type, ilen, tec);
> >>  }
> >>  
> >> +/* check whether the address would be proteted by Low-Address Protection */
> >> +static bool is_low_address(uint64_t addr)
> >> +{
> >> +    return addr < 512 || (addr >= 4096 && addr <= 4607);
> >> +}  
> > 
> > Just cosmetic, but I'd rather either use "<=" or "<" both times, so:
> > 
> >    return addr <= 511 || (addr >= 4096 && addr <= 4607);
> >   
> 
> That one then, as it matches the wording in the PoP.
> 
> >> +    /* Check the private-space control bit */
> >> +    switch (asc) {
> >> +    case PSW_ASC_PRIMARY:
> >> +        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
> >> +    case PSW_ASC_SECONDARY:
> >> +        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
> >> +    case PSW_ASC_HOME:
> >> +        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
> >> +    default:
> >> +        g_assert_not_reached();  
> > 
> > Well, this is certainly reachable - if the guest was running in access
> > register mode. So it might be nicer to the user if you keep the
> > error_report() here?  
> 
> Right, this would be reachable via translate_pages(), but not via the
> tlb. Although unlikely to hit it at that point, we can keep the error.
> 
> Conny, can you fix these two up or do you want me to resend?

Fixed it up. Can you please verify that

git://github.com/cohuck/qemu lap

looks sane?
David Hildenbrand Oct. 19, 2017, 3:54 p.m. UTC | #4
On 19.10.2017 10:56, Cornelia Huck wrote:
> git://github.com/cohuck/qemu lap

Did a diff to my branch, looks very good!

Thanks!
diff mbox series

Patch

diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index cff308a18d..e04b670663 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -95,7 +95,6 @@  int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
     DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
             __func__, orig_vaddr, rw, mmu_idx);
 
-    orig_vaddr &= TARGET_PAGE_MASK;
     vaddr = orig_vaddr;
 
     if (mmu_idx < MMU_REAL_IDX) {
@@ -127,7 +126,7 @@  int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
     qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
             __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 
-    tlb_set_page(cs, orig_vaddr, raddr, prot,
+    tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
                  mmu_idx, TARGET_PAGE_SIZE);
 
     return 0;
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index bbbe1c62b3..69a16867d4 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -1687,18 +1687,10 @@  void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
 {
     uintptr_t ra = GETPC();
-    CPUState *cs = CPU(s390_env_get_cpu(env));
     int i;
 
     real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
 
-    /* Check low-address protection */
-    if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
-        cpu_restore_state(cs, ra);
-        program_interrupt(env, PGM_PROTECTION, 4);
-        return 1;
-    }
-
     for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
         cpu_stq_real_ra(env, real_addr + i, 0, ra);
     }
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 9daa0fd8e2..9806685bee 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -106,6 +106,35 @@  static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
     trigger_access_exception(env, type, ilen, tec);
 }
 
+/* check whether the address would be proteted by Low-Address Protection */
+static bool is_low_address(uint64_t addr)
+{
+    return addr < 512 || (addr >= 4096 && addr <= 4607);
+}
+
+/* check whether Low-Address Protection is enabled for mmu_translate() */
+static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc)
+{
+    if (!(env->cregs[0] & CR0_LOWPROT)) {
+        return false;
+    }
+    if (!(env->psw.mask & PSW_MASK_DAT)) {
+        return true;
+    }
+
+    /* Check the private-space control bit */
+    switch (asc) {
+    case PSW_ASC_PRIMARY:
+        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
+    case PSW_ASC_SECONDARY:
+        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
+    case PSW_ASC_HOME:
+        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
+    default:
+        g_assert_not_reached();
+    }
+}
+
 /**
  * Translate real address to absolute (= physical)
  * address by taking care of the prefix mapping.
@@ -323,6 +352,24 @@  int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
     }
 
     *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+    if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) {
+        /*
+         * If any part of this page is currently protected, make sure the
+         * TLB entry will not be reused.
+         *
+         * As the protected range is always the first 512 bytes of the
+         * two first pages, we are able to catch all writes to these areas
+         * just by looking at the start address (triggering the tlb miss).
+         */
+        *flags |= PAGE_WRITE_INV;
+        if (is_low_address(vaddr) && rw == MMU_DATA_STORE) {
+            if (exc) {
+                trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
+            }
+            return -EACCES;
+        }
+    }
+
     vaddr &= TARGET_PAGE_MASK;
 
     if (!(env->psw.mask & PSW_MASK_DAT)) {
@@ -392,50 +439,17 @@  int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
 }
 
 /**
- * lowprot_enabled: Check whether low-address protection is enabled
- */
-static bool lowprot_enabled(const CPUS390XState *env)
-{
-    if (!(env->cregs[0] & CR0_LOWPROT)) {
-        return false;
-    }
-    if (!(env->psw.mask & PSW_MASK_DAT)) {
-        return true;
-    }
-
-    /* Check the private-space control bit */
-    switch (env->psw.mask & PSW_MASK_ASC) {
-    case PSW_ASC_PRIMARY:
-        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
-    case PSW_ASC_SECONDARY:
-        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
-    case PSW_ASC_HOME:
-        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
-    default:
-        /* We don't support access register mode */
-        error_report("unsupported addressing mode");
-        exit(1);
-    }
-}
-
-/**
  * translate_pages: Translate a set of consecutive logical page addresses
  * to absolute addresses
  */
 static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
                            target_ulong *pages, bool is_write)
 {
-    bool lowprot = is_write && lowprot_enabled(&cpu->env);
     uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
     CPUS390XState *env = &cpu->env;
     int ret, i, pflags;
 
     for (i = 0; i < nr_pages; i++) {
-        /* Low-address protection? */
-        if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
-            trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
-            return -EACCES;
-        }
         ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
         if (ret) {
             return ret;
@@ -509,9 +523,19 @@  int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
 int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
                        target_ulong *addr, int *flags)
 {
-    /* TODO: low address protection once we flush the tlb on cr changes */
+    const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT;
+
     *flags = PAGE_READ | PAGE_WRITE;
-    *addr = mmu_real2abs(env, raddr);
+    if (is_low_address(raddr & TARGET_PAGE_MASK) && lowprot_enabled) {
+        /* see comment in mmu_translate() how this works */
+        *flags |= PAGE_WRITE_INV;
+        if (is_low_address(raddr) && rw == MMU_DATA_STORE) {
+            trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
+            return -EACCES;
+        }
+    }
+
+    *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK);
 
     /* TODO: storage key handling */
     return 0;