Message ID | 1423753507-30542-5-git-send-email-drjones@redhat.com |
---|---|
State | New |
Headers | show |
On Thu, Feb 12, 2015 at 04:05:06PM +0100, Andrew Jones wrote: > This patch makes the following changes to the determination of > whether an address is executable, when translating addresses > using LPAE. > > 1. No longer assumes that PL0 can't execute when it can't read. > It can in AArch64, a difference from AArch32. > 2. Use va_size == 64 to determine we're in AArch64, rather than > arm_feature(env, ARM_FEATURE_V8), which is insufficient. > 3. Add additional XN determinants > - NS && is_secure && (SCR & SCR_SIF) > - WXN && (prot & PAGE_WRITE) > - AArch64: (prot_PL0 & PAGE_WRITE) > - AArch32: UWXN && (prot_PL0 & PAGE_WRITE) > - XN determination should also work in secure mode (untested) > - XN may even work in EL2 (currently impossible to test) > 4. Cleans up the bloated PAGE_EXEC condition - by removing it. > > The helper get_S1prot is introduced, which also handles short- > descriptors and v6 XN determination. It may even work in EL2, > when support for that comes, but, as the function name implies, > it only works for stage 1 translations. Just because I like replying to myself so much... re: feature checking > > Signed-off-by: Andrew Jones <drjones@redhat.com> > --- > target-arm/helper.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ > 1 file changed, 86 insertions(+), 25 deletions(-) > > diff --git a/target-arm/helper.c b/target-arm/helper.c > index df78f481e92f4..20e5753bd216d 100644 > --- a/target-arm/helper.c > +++ b/target-arm/helper.c > @@ -4695,8 +4695,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) > /* Translate section/page access permissions to page > * R/W protection flags > */ > -static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > - bool is_user, int ap, int domain_prot) > +static int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > + bool is_user, int ap, int domain_prot) > { > bool simple_ap = regime_using_lpae_format(env, mmu_idx) > || (regime_sctlr(env, mmu_idx) & SCTLR_AFE); > @@ -4762,6 +4762,84 @@ static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > } > } > > +/* Translate section/page access permissions to protection flags */ > +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, > + int ap, int domain_prot, int ns, int xn, int pxn) > +{ > + bool domain_prot_valid = !regime_using_lpae_format(env, mmu_idx); > + bool is_user = regime_is_user(env, mmu_idx); > + bool have_wxn; > + int prot_rw, user_rw; > + int wxn = 0; > + > + assert(mmu_idx != ARMMMUIdx_S2NS); > + > + if (domain_prot_valid && domain_prot == 3) { > + return PAGE_READ | PAGE_WRITE | PAGE_EXEC; > + } > + > + user_rw = get_rw_prot(env, mmu_idx, true, ap, domain_prot); > + if (is_user) { > + prot_rw = user_rw; > + } else { > + prot_rw = get_rw_prot(env, mmu_idx, false, ap, domain_prot); > + } > + > + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { > + return prot_rw; > + } > + > + /* TODO have_wxn should be replaced with arm_feature(env, ARM_FEATURE_EL2), > + * when ARM_FEATURE_EL2 starts getting set. For now we assume all v7 > + * compatible processors have EL2, which is required for [U]WXN. > + */ > + have_wxn = arm_feature(env, ARM_FEATURE_V7); I probably should have used ARM_FEATURE_LPAE here instead to be a bit closer to the truth. > + > + if (have_wxn) { > + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; > + } > + > + if (is_aa64) { > + assert(arm_feature(env, ARM_FEATURE_V8)); > + switch (regime_el(env, mmu_idx)) { > + case 1: > + if (is_user && !user_rw) { > + wxn = 0; > + } else if (!is_user) { > + xn = pxn || (user_rw & PAGE_WRITE); > + } > + break; > + case 2: > + case 3: > + break; > + } > + } else if (arm_feature(env, ARM_FEATURE_V6K)) { > + switch (regime_el(env, mmu_idx)) { > + case 1: > + case 3: > + if (is_user) { > + xn = xn || !user_rw; > + } else { > + int uwxn = 0; > + if (have_wxn) { > + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; > + } > + xn = xn || !prot_rw || pxn || (uwxn && (user_rw & PAGE_WRITE)); I think it's OK to trust the caller to only send a non-zero pxn when they have arm_feature(env, ARM_FEATURE_PXN). However, it just occurred to me that checking that feature before using pxn may have been a nice touch... > + } > + break; > + case 2: > + break; > + } > + } else { > + xn = wxn = 0; > + } > + > + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { > + return prot_rw; > + } > + return prot_rw | PAGE_EXEC; > +} > + > static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, > uint32_t *table, uint32_t address) > { > @@ -5047,7 +5125,6 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > int32_t granule_sz = 9; > int32_t va_size = 32; > int32_t tbi = 0; > - bool is_user; > TCR *tcr = regime_tcr(env, mmu_idx); > > /* TODO: > @@ -5220,31 +5297,15 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > /* Access flag */ > goto do_fault; > } > + > + *prot = get_S1prot(env, mmu_idx, va_size == 64, extract32(attrs, 4, 2), 0, > + extract32(attrs, 3, 1), extract32(attrs, 12, 1), > + extract32(attrs, 11, 1)); > + > fault_type = permission_fault; > - is_user = regime_is_user(env, mmu_idx); > - if (is_user && !(attrs & (1 << 4))) { > - /* Unprivileged access not enabled */ > + if (!(*prot & (1 << access_type))) { > goto do_fault; > } > - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; > - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || > - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || > - (!is_user && (attrs & (1 << 11)))) { > - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally > - * treat XN/UXN as UXN for v8. > - */ > - if (access_type == 2) { > - goto do_fault; > - } > - *prot &= ~PAGE_EXEC; > - } > - if (attrs & (1 << 5)) { > - /* Write access forbidden */ > - if (access_type == 1) { > - goto do_fault; > - } > - *prot &= ~PAGE_WRITE; > - } > > *phys_ptr = descaddr; > *page_size_ptr = page_size; > -- > 1.9.3 > >
On 12 February 2015 at 15:05, Andrew Jones <drjones@redhat.com> wrote: > This patch makes the following changes to the determination of > whether an address is executable, when translating addresses > using LPAE. > > 1. No longer assumes that PL0 can't execute when it can't read. > It can in AArch64, a difference from AArch32. > 2. Use va_size == 64 to determine we're in AArch64, rather than > arm_feature(env, ARM_FEATURE_V8), which is insufficient. > 3. Add additional XN determinants > - NS && is_secure && (SCR & SCR_SIF) > - WXN && (prot & PAGE_WRITE) > - AArch64: (prot_PL0 & PAGE_WRITE) > - AArch32: UWXN && (prot_PL0 & PAGE_WRITE) > - XN determination should also work in secure mode (untested) > - XN may even work in EL2 (currently impossible to test) > 4. Cleans up the bloated PAGE_EXEC condition - by removing it. > > The helper get_S1prot is introduced, which also handles short- > descriptors and v6 XN determination. It may even work in EL2, > when support for that comes, but, as the function name implies, > it only works for stage 1 translations. > > Signed-off-by: Andrew Jones <drjones@redhat.com> > --- > target-arm/helper.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ > 1 file changed, 86 insertions(+), 25 deletions(-) > > diff --git a/target-arm/helper.c b/target-arm/helper.c > index df78f481e92f4..20e5753bd216d 100644 > --- a/target-arm/helper.c > +++ b/target-arm/helper.c > @@ -4695,8 +4695,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) > /* Translate section/page access permissions to page > * R/W protection flags > */ > -static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > - bool is_user, int ap, int domain_prot) > +static int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > + bool is_user, int ap, int domain_prot) ...why does this suddenly lose its 'inline' ? > { > bool simple_ap = regime_using_lpae_format(env, mmu_idx) > || (regime_sctlr(env, mmu_idx) & SCTLR_AFE); > @@ -4762,6 +4762,84 @@ static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > } > } > > +/* Translate section/page access permissions to protection flags */ This is LPAE-format only so it would be nice to mention that in the comment and function name. > +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, > + int ap, int domain_prot, int ns, int xn, int pxn) > +{ > + bool domain_prot_valid = !regime_using_lpae_format(env, mmu_idx); But this is always false! > + bool is_user = regime_is_user(env, mmu_idx); > + bool have_wxn; > + int prot_rw, user_rw; > + int wxn = 0; > + > + assert(mmu_idx != ARMMMUIdx_S2NS); > + > + if (domain_prot_valid && domain_prot == 3) { > + return PAGE_READ | PAGE_WRITE | PAGE_EXEC; > + } > + > + user_rw = get_rw_prot(env, mmu_idx, true, ap, domain_prot); > + if (is_user) { > + prot_rw = user_rw; > + } else { > + prot_rw = get_rw_prot(env, mmu_idx, false, ap, domain_prot); > + } I think it would be much better not to try to use the short-descriptor get_rw_prot function. For one thing, we know for definite that we won't be using the old-fashioned AP[2:0] access format, and that we don't have to worry about the domain protection stuff. So it's much simpler and better not to tangle it up with the legacy stuff. (Pull the simple-access-permissions check out into its own function if you like.) For instance, you're missing a shift here on the ap bits, because get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. > + > + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { > + return prot_rw; > + } > + > + /* TODO have_wxn should be replaced with arm_feature(env, ARM_FEATURE_EL2), > + * when ARM_FEATURE_EL2 starts getting set. For now we assume all v7 > + * compatible processors have EL2, which is required for [U]WXN. > + */ > + have_wxn = arm_feature(env, ARM_FEATURE_V7); ARMv8 CPUs without EL2 still have WXN, I think. > + > + if (have_wxn) { > + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; > + } > + > + if (is_aa64) { > + assert(arm_feature(env, ARM_FEATURE_V8)); I wouldn't bother with this assert. > + switch (regime_el(env, mmu_idx)) { > + case 1: > + if (is_user && !user_rw) { > + wxn = 0; > + } else if (!is_user) { > + xn = pxn || (user_rw & PAGE_WRITE); > + } > + break; > + case 2: > + case 3: > + break; > + } > + } else if (arm_feature(env, ARM_FEATURE_V6K)) { Always true, since you can't have long format descriptors unless this is at least v7. > + switch (regime_el(env, mmu_idx)) { > + case 1: > + case 3: > + if (is_user) { > + xn = xn || !user_rw; > + } else { > + int uwxn = 0; > + if (have_wxn) { > + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; > + } > + xn = xn || !prot_rw || pxn || (uwxn && (user_rw & PAGE_WRITE)); > + } > + break; Doesn't this lose us the "you need read permission to execute" check (for 32-bit)? Something in here should be doing a PAGE_READ check to see if we can have PAGE_EXEC. > + case 2: > + break; > + } > + } else { > + xn = wxn = 0; > + } > + > + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { > + return prot_rw; > + } > + return prot_rw | PAGE_EXEC; > +} > + > static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, > uint32_t *table, uint32_t address) > { > @@ -5047,7 +5125,6 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > int32_t granule_sz = 9; > int32_t va_size = 32; > int32_t tbi = 0; > - bool is_user; > TCR *tcr = regime_tcr(env, mmu_idx); > > /* TODO: > @@ -5220,31 +5297,15 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > /* Access flag */ > goto do_fault; > } > + > + *prot = get_S1prot(env, mmu_idx, va_size == 64, extract32(attrs, 4, 2), 0, > + extract32(attrs, 3, 1), extract32(attrs, 12, 1), > + extract32(attrs, 11, 1)); Urgh. It would be easier to understand if you just passed attrs into get_S1prot and had it pick the fields apart, because then you can match them up with variable names without cross-referencing against the function definition. > + > fault_type = permission_fault; > - is_user = regime_is_user(env, mmu_idx); > - if (is_user && !(attrs & (1 << 4))) { > - /* Unprivileged access not enabled */ > + if (!(*prot & (1 << access_type))) { > goto do_fault; > } > - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; > - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || > - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || > - (!is_user && (attrs & (1 << 11)))) { > - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally > - * treat XN/UXN as UXN for v8. > - */ > - if (access_type == 2) { > - goto do_fault; > - } > - *prot &= ~PAGE_EXEC; > - } > - if (attrs & (1 << 5)) { > - /* Write access forbidden */ > - if (access_type == 1) { > - goto do_fault; > - } > - *prot &= ~PAGE_WRITE; > - } > > *phys_ptr = descaddr; > *page_size_ptr = page_size; > -- > 1.9.3 > -- PMM
On Tue, Mar 10, 2015 at 03:56:11PM +0000, Peter Maydell wrote: > On 12 February 2015 at 15:05, Andrew Jones <drjones@redhat.com> wrote: > > This patch makes the following changes to the determination of > > whether an address is executable, when translating addresses > > using LPAE. > > > > 1. No longer assumes that PL0 can't execute when it can't read. > > It can in AArch64, a difference from AArch32. > > 2. Use va_size == 64 to determine we're in AArch64, rather than > > arm_feature(env, ARM_FEATURE_V8), which is insufficient. > > 3. Add additional XN determinants > > - NS && is_secure && (SCR & SCR_SIF) > > - WXN && (prot & PAGE_WRITE) > > - AArch64: (prot_PL0 & PAGE_WRITE) > > - AArch32: UWXN && (prot_PL0 & PAGE_WRITE) > > - XN determination should also work in secure mode (untested) > > - XN may even work in EL2 (currently impossible to test) > > 4. Cleans up the bloated PAGE_EXEC condition - by removing it. > > > > The helper get_S1prot is introduced, which also handles short- > > descriptors and v6 XN determination. It may even work in EL2, > > when support for that comes, but, as the function name implies, > > it only works for stage 1 translations. > > > > Signed-off-by: Andrew Jones <drjones@redhat.com> > > --- > > target-arm/helper.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ > > 1 file changed, 86 insertions(+), 25 deletions(-) > > > > diff --git a/target-arm/helper.c b/target-arm/helper.c > > index df78f481e92f4..20e5753bd216d 100644 > > --- a/target-arm/helper.c > > +++ b/target-arm/helper.c > > @@ -4695,8 +4695,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) > > /* Translate section/page access permissions to page > > * R/W protection flags > > */ > > -static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > > - bool is_user, int ap, int domain_prot) > > +static int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > > + bool is_user, int ap, int domain_prot) > > ...why does this suddenly lose its 'inline' ? Adding another caller, and thought it was a bit fat for explicit inlining, but have no problem returning it. > > > { > > bool simple_ap = regime_using_lpae_format(env, mmu_idx) > > || (regime_sctlr(env, mmu_idx) & SCTLR_AFE); > > @@ -4762,6 +4762,84 @@ static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > > } > > } > > > > +/* Translate section/page access permissions to protection flags */ > > This is LPAE-format only so it would be nice to mention that in the comment > and function name. Not after the next patch. I probably should have made it completely LPAE-only first, then added two more patches, one preparing it for non-LPAE, and then the next patch, or just done a better job pointing that out in the commit message. > > > +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, > > + int ap, int domain_prot, int ns, int xn, int pxn) > > +{ > > + bool domain_prot_valid = !regime_using_lpae_format(env, mmu_idx); > > But this is always false! Same response as above - not after next patch. > > > + bool is_user = regime_is_user(env, mmu_idx); > > + bool have_wxn; > > + int prot_rw, user_rw; > > + int wxn = 0; > > + > > + assert(mmu_idx != ARMMMUIdx_S2NS); > > + > > + if (domain_prot_valid && domain_prot == 3) { > > + return PAGE_READ | PAGE_WRITE | PAGE_EXEC; > > + } > > + > > + user_rw = get_rw_prot(env, mmu_idx, true, ap, domain_prot); > > + if (is_user) { > > + prot_rw = user_rw; > > + } else { > > + prot_rw = get_rw_prot(env, mmu_idx, false, ap, domain_prot); > > + } > > I think it would be much better not to try to use the short-descriptor > get_rw_prot function. For one thing, we know for definite that we > won't be using the old-fashioned AP[2:0] access format, and that > we don't have to worry about the domain protection stuff. So it's > much simpler and better not to tangle it up with the legacy stuff. > (Pull the simple-access-permissions check out into its own function > if you like.) legacy stuff is here for next patch too > > For instance, you're missing a shift here on the ap bits, because > get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. Don't need the shift because get_rw_prot supports the 2-bit format. > > > + > > + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { > > + return prot_rw; > > + } > > + > > + /* TODO have_wxn should be replaced with arm_feature(env, ARM_FEATURE_EL2), > > + * when ARM_FEATURE_EL2 starts getting set. For now we assume all v7 > > + * compatible processors have EL2, which is required for [U]WXN. > > + */ > > + have_wxn = arm_feature(env, ARM_FEATURE_V7); > > ARMv8 CPUs without EL2 still have WXN, I think. I think so too. So V8 || (V7 && EL2) would be the most appropriate. > > > + > > + if (have_wxn) { > > + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; > > + } > > + > > + if (is_aa64) { > > + assert(arm_feature(env, ARM_FEATURE_V8)); > > I wouldn't bother with this assert. OK > > > + switch (regime_el(env, mmu_idx)) { > > + case 1: > > + if (is_user && !user_rw) { > > + wxn = 0; > > + } else if (!is_user) { > > + xn = pxn || (user_rw & PAGE_WRITE); > > + } > > + break; > > + case 2: > > + case 3: > > + break; > > + } > > + } else if (arm_feature(env, ARM_FEATURE_V6K)) { > > Always true, since you can't have long format descriptors > unless this is at least v7. Next patch again. > > > + switch (regime_el(env, mmu_idx)) { > > + case 1: > > + case 3: > > + if (is_user) { > > + xn = xn || !user_rw; > > + } else { > > + int uwxn = 0; > > + if (have_wxn) { > > + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; > > + } > > + xn = xn || !prot_rw || pxn || (uwxn && (user_rw & PAGE_WRITE)); > > + } > > + break; > > Doesn't this lose us the "you need read permission to execute" > check (for 32-bit)? Something in here should be doing a > PAGE_READ check to see if we can have PAGE_EXEC. It's there. It's the '!user_rw' and the '!prot_rw' > > > + case 2: > > + break; > > + } > > + } else { > > + xn = wxn = 0; > > + } > > + > > + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { > > + return prot_rw; > > + } > > + return prot_rw | PAGE_EXEC; > > > > +} > > + > > static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, > > uint32_t *table, uint32_t address) > > { > > @@ -5047,7 +5125,6 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > > int32_t granule_sz = 9; > > int32_t va_size = 32; > > int32_t tbi = 0; > > - bool is_user; > > TCR *tcr = regime_tcr(env, mmu_idx); > > > > /* TODO: > > @@ -5220,31 +5297,15 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > > /* Access flag */ > > goto do_fault; > > } > > + > > + *prot = get_S1prot(env, mmu_idx, va_size == 64, extract32(attrs, 4, 2), 0, > > + extract32(attrs, 3, 1), extract32(attrs, 12, 1), > > + extract32(attrs, 11, 1)); > > Urgh. It would be easier to understand if you just passed attrs > into get_S1prot and had it pick the fields apart, because then > you can match them up with variable names without cross-referencing > against the function definition. I can pick them apart before passing into local well-named variables. I'd prefer to keep the helper function independent of get_phys_addr_lpae's internal bitmap. > > > + > > fault_type = permission_fault; > > - is_user = regime_is_user(env, mmu_idx); > > - if (is_user && !(attrs & (1 << 4))) { > > - /* Unprivileged access not enabled */ > > + if (!(*prot & (1 << access_type))) { > > goto do_fault; > > } > > - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; > > - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || > > - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || > > - (!is_user && (attrs & (1 << 11)))) { > > - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally > > - * treat XN/UXN as UXN for v8. > > - */ > > - if (access_type == 2) { > > - goto do_fault; > > - } > > - *prot &= ~PAGE_EXEC; > > - } > > - if (attrs & (1 << 5)) { > > - /* Write access forbidden */ > > - if (access_type == 1) { > > - goto do_fault; > > - } > > - *prot &= ~PAGE_WRITE; > > - } > > > > *phys_ptr = descaddr; > > *page_size_ptr = page_size; > > -- > > 1.9.3 > > > > > -- PMM
On 10 March 2015 at 16:48, Andrew Jones <drjones@redhat.com> wrote: > On Tue, Mar 10, 2015 at 03:56:11PM +0000, Peter Maydell wrote: >> For instance, you're missing a shift here on the ap bits, because >> get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. > > Don't need the shift because get_rw_prot supports the 2-bit format. No it doesn't... >> Doesn't this lose us the "you need read permission to execute" >> check (for 32-bit)? Something in here should be doing a >> PAGE_READ check to see if we can have PAGE_EXEC. > > It's there. It's the '!user_rw' and the '!prot_rw' Ah yes, and that works because you can't have a page which is writable but not readable (which is what I'd forgotten). -- PMM
On Tue, Mar 10, 2015 at 04:55:53PM +0000, Peter Maydell wrote: > On 10 March 2015 at 16:48, Andrew Jones <drjones@redhat.com> wrote: > > On Tue, Mar 10, 2015 at 03:56:11PM +0000, Peter Maydell wrote: > > >> For instance, you're missing a shift here on the ap bits, because > >> get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. > > > > Don't need the shift because get_rw_prot supports the 2-bit format. > > No it doesn't... Yes it does :-) That's the support patch 2/5 adds. > > >> Doesn't this lose us the "you need read permission to execute" > >> check (for 32-bit)? Something in here should be doing a > >> PAGE_READ check to see if we can have PAGE_EXEC. > > > > It's there. It's the '!user_rw' and the '!prot_rw' > > Ah yes, and that works because you can't have a page which > is writable but not readable (which is what I'd forgotten). > > -- PMM
On 10 March 2015 at 17:02, Andrew Jones <drjones@redhat.com> wrote: > On Tue, Mar 10, 2015 at 04:55:53PM +0000, Peter Maydell wrote: >> On 10 March 2015 at 16:48, Andrew Jones <drjones@redhat.com> wrote: >> > On Tue, Mar 10, 2015 at 03:56:11PM +0000, Peter Maydell wrote: >> >> >> For instance, you're missing a shift here on the ap bits, because >> >> get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. >> > >> > Don't need the shift because get_rw_prot supports the 2-bit format. >> >> No it doesn't... > > Yes it does :-) That's the support patch 2/5 adds. No, because patch 2 doesn't do anything in the callers to make them pass only bits [2:1]. So after patch 2 get_rw_prot still requires bits [2:0]. Except it's broken, because the function itself assumes it gets bits [2:1]. Having thought a bit more about it, I think we should just have two totally separate functions for the old style and simplified permission checks, because we can always call the correct one (always old for v5, either one for v6 since we already have the SCTLR.AFE check, and the simplified one for the lpae code path). -- PMM
On Tue, Mar 10, 2015 at 05:14:21PM +0000, Peter Maydell wrote: > On 10 March 2015 at 17:02, Andrew Jones <drjones@redhat.com> wrote: > > On Tue, Mar 10, 2015 at 04:55:53PM +0000, Peter Maydell wrote: > >> On 10 March 2015 at 16:48, Andrew Jones <drjones@redhat.com> wrote: > >> > On Tue, Mar 10, 2015 at 03:56:11PM +0000, Peter Maydell wrote: > >> > >> >> For instance, you're missing a shift here on the ap bits, because > >> >> get_rw_prot needs AP[2:0] and 'ap' here is AP[2:1]. > >> > > >> > Don't need the shift because get_rw_prot supports the 2-bit format. > >> > >> No it doesn't... > > > > Yes it does :-) That's the support patch 2/5 adds. > > No, because patch 2 doesn't do anything in the callers to > make them pass only bits [2:1]. So after patch 2 get_rw_prot > still requires bits [2:0]. Except it's broken, because the > function itself assumes it gets bits [2:1]. You've lost me. Patch 2 adds support for 2-bit ap, but doesn't remove support for 3-bit. There are not callers expecting it to support the simple model as 2 or 3 bits at that time, except v6, but that was broken already, and we fix it in patch 5 (and we add the ap shift there too). IOW, how can preparing a function for new callers, while still supporting the old callers, be 'broken'? > > Having thought a bit more about it, I think we should > just have two totally separate functions for the old > style and simplified permission checks, because we can > always call the correct one (always old for v5, either > one for v6 since we already have the SCTLR.AFE check, > and the simplified one for the lpae code path). I like that. Thanks, drew
On 10 March 2015 at 17:28, Andrew Jones <drjones@redhat.com> wrote: > On Tue, Mar 10, 2015 at 05:14:21PM +0000, Peter Maydell wrote:[?] >> No, because patch 2 doesn't do anything in the callers to >> make them pass only bits [2:1]. So after patch 2 get_rw_prot >> still requires bits [2:0]. Except it's broken, because the >> function itself assumes it gets bits [2:1]. > > You've lost me. Patch 2 adds support for 2-bit ap, but > doesn't remove support for 3-bit. There are not callers > expecting it to support the simple model as 2 or 3 bits > at that time, except v6, but that was broken already, and > we fix it in patch 5 (and we add the ap shift there too). > IOW, how can preparing a function for new callers, while > still supporting the old callers, be 'broken'? (This is all somewhat academic at this point given we're splitting the two functions up, but:) Because it's not being consistent with itself -- if it wants to accept AP[2:1] only then the v6 callsite needs to change, otherwise it needs to handle AP[2:0] and look only at bits 2:1 in that. You eventually fixed this in patch 5 with an extra shift, but if you wanted to do it that way then that part should have been in patch 2... -- PMM
On Thu, Feb 12, 2015 at 04:05:06PM +0100, Andrew Jones wrote: > This patch makes the following changes to the determination of > whether an address is executable, when translating addresses > using LPAE. > > 1. No longer assumes that PL0 can't execute when it can't read. > It can in AArch64, a difference from AArch32. > 2. Use va_size == 64 to determine we're in AArch64, rather than > arm_feature(env, ARM_FEATURE_V8), which is insufficient. > 3. Add additional XN determinants > - NS && is_secure && (SCR & SCR_SIF) > - WXN && (prot & PAGE_WRITE) > - AArch64: (prot_PL0 & PAGE_WRITE) > - AArch32: UWXN && (prot_PL0 & PAGE_WRITE) > - XN determination should also work in secure mode (untested) > - XN may even work in EL2 (currently impossible to test) > 4. Cleans up the bloated PAGE_EXEC condition - by removing it. > > The helper get_S1prot is introduced, which also handles short- > descriptors and v6 XN determination. It may even work in EL2, > when support for that comes, but, as the function name implies, > it only works for stage 1 translations. > > Signed-off-by: Andrew Jones <drjones@redhat.com> > --- > target-arm/helper.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ > 1 file changed, 86 insertions(+), 25 deletions(-) > > diff --git a/target-arm/helper.c b/target-arm/helper.c > index df78f481e92f4..20e5753bd216d 100644 > --- a/target-arm/helper.c > +++ b/target-arm/helper.c > @@ -4695,8 +4695,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) > /* Translate section/page access permissions to page > * R/W protection flags > */ > -static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > - bool is_user, int ap, int domain_prot) > +static int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > + bool is_user, int ap, int domain_prot) > { > bool simple_ap = regime_using_lpae_format(env, mmu_idx) > || (regime_sctlr(env, mmu_idx) & SCTLR_AFE); > @@ -4762,6 +4762,84 @@ static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, > } > } > > +/* Translate section/page access permissions to protection flags */ > +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, > + int ap, int domain_prot, int ns, int xn, int pxn) > +{ > + bool domain_prot_valid = !regime_using_lpae_format(env, mmu_idx); > + bool is_user = regime_is_user(env, mmu_idx); > + bool have_wxn; > + int prot_rw, user_rw; > + int wxn = 0; > + > + assert(mmu_idx != ARMMMUIdx_S2NS); > + > + if (domain_prot_valid && domain_prot == 3) { > + return PAGE_READ | PAGE_WRITE | PAGE_EXEC; > + } > + > + user_rw = get_rw_prot(env, mmu_idx, true, ap, domain_prot); > + if (is_user) { > + prot_rw = user_rw; > + } else { > + prot_rw = get_rw_prot(env, mmu_idx, false, ap, domain_prot); > + } > + > + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { > + return prot_rw; > + } > + > + /* TODO have_wxn should be replaced with arm_feature(env, ARM_FEATURE_EL2), > + * when ARM_FEATURE_EL2 starts getting set. For now we assume all v7 > + * compatible processors have EL2, which is required for [U]WXN. > + */ > + have_wxn = arm_feature(env, ARM_FEATURE_V7); > + > + if (have_wxn) { > + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; > + } > + > + if (is_aa64) { > + assert(arm_feature(env, ARM_FEATURE_V8)); > + switch (regime_el(env, mmu_idx)) { > + case 1: > + if (is_user && !user_rw) { > + wxn = 0; > + } else if (!is_user) { > + xn = pxn || (user_rw & PAGE_WRITE); > + } > + break; > + case 2: > + case 3: > + break; > + } > + } else if (arm_feature(env, ARM_FEATURE_V6K)) { > + switch (regime_el(env, mmu_idx)) { > + case 1: > + case 3: > + if (is_user) { > + xn = xn || !user_rw; > + } else { > + int uwxn = 0; > + if (have_wxn) { > + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; > + } > + xn = xn || !prot_rw || pxn || (uwxn && (user_rw & PAGE_WRITE)); > + } > + break; > + case 2: > + break; > + } > + } else { > + xn = wxn = 0; > + } > + > + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { > + return prot_rw; > + } > + return prot_rw | PAGE_EXEC; > +} > + > static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, > uint32_t *table, uint32_t address) > { > @@ -5047,7 +5125,6 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > int32_t granule_sz = 9; > int32_t va_size = 32; > int32_t tbi = 0; > - bool is_user; > TCR *tcr = regime_tcr(env, mmu_idx); > > /* TODO: > @@ -5220,31 +5297,15 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, > /* Access flag */ > goto do_fault; > } I just realized that I somehow dropped hunk 2 of patch 2 of the initial post, http://lists.gnu.org/archive/html/qemu-devel/2015-01/msg01435.html - /* Since we're always in the Non-secure state, NSTable is ignored. */ + attrs |= extract32(tableattrs, 4, 1) << 3; /* NS */ Unfortunately that's also missing from the v2 of this series, that I posted yesterday. I'll wait for other comments before sending a v3 though. > + > + *prot = get_S1prot(env, mmu_idx, va_size == 64, extract32(attrs, 4, 2), 0, > + extract32(attrs, 3, 1), extract32(attrs, 12, 1), > + extract32(attrs, 11, 1)); > + > fault_type = permission_fault; > - is_user = regime_is_user(env, mmu_idx); > - if (is_user && !(attrs & (1 << 4))) { > - /* Unprivileged access not enabled */ > + if (!(*prot & (1 << access_type))) { > goto do_fault; > } > - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; > - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || > - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || > - (!is_user && (attrs & (1 << 11)))) { > - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally > - * treat XN/UXN as UXN for v8. > - */ > - if (access_type == 2) { > - goto do_fault; > - } > - *prot &= ~PAGE_EXEC; > - } > - if (attrs & (1 << 5)) { > - /* Write access forbidden */ > - if (access_type == 1) { > - goto do_fault; > - } > - *prot &= ~PAGE_WRITE; > - } > > *phys_ptr = descaddr; > *page_size_ptr = page_size; > -- > 1.9.3 >
diff --git a/target-arm/helper.c b/target-arm/helper.c index df78f481e92f4..20e5753bd216d 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -4695,8 +4695,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) /* Translate section/page access permissions to page * R/W protection flags */ -static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, - bool is_user, int ap, int domain_prot) +static int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, + bool is_user, int ap, int domain_prot) { bool simple_ap = regime_using_lpae_format(env, mmu_idx) || (regime_sctlr(env, mmu_idx) & SCTLR_AFE); @@ -4762,6 +4762,84 @@ static inline int get_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, } } +/* Translate section/page access permissions to protection flags */ +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, + int ap, int domain_prot, int ns, int xn, int pxn) +{ + bool domain_prot_valid = !regime_using_lpae_format(env, mmu_idx); + bool is_user = regime_is_user(env, mmu_idx); + bool have_wxn; + int prot_rw, user_rw; + int wxn = 0; + + assert(mmu_idx != ARMMMUIdx_S2NS); + + if (domain_prot_valid && domain_prot == 3) { + return PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } + + user_rw = get_rw_prot(env, mmu_idx, true, ap, domain_prot); + if (is_user) { + prot_rw = user_rw; + } else { + prot_rw = get_rw_prot(env, mmu_idx, false, ap, domain_prot); + } + + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { + return prot_rw; + } + + /* TODO have_wxn should be replaced with arm_feature(env, ARM_FEATURE_EL2), + * when ARM_FEATURE_EL2 starts getting set. For now we assume all v7 + * compatible processors have EL2, which is required for [U]WXN. + */ + have_wxn = arm_feature(env, ARM_FEATURE_V7); + + if (have_wxn) { + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; + } + + if (is_aa64) { + assert(arm_feature(env, ARM_FEATURE_V8)); + switch (regime_el(env, mmu_idx)) { + case 1: + if (is_user && !user_rw) { + wxn = 0; + } else if (!is_user) { + xn = pxn || (user_rw & PAGE_WRITE); + } + break; + case 2: + case 3: + break; + } + } else if (arm_feature(env, ARM_FEATURE_V6K)) { + switch (regime_el(env, mmu_idx)) { + case 1: + case 3: + if (is_user) { + xn = xn || !user_rw; + } else { + int uwxn = 0; + if (have_wxn) { + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; + } + xn = xn || !prot_rw || pxn || (uwxn && (user_rw & PAGE_WRITE)); + } + break; + case 2: + break; + } + } else { + xn = wxn = 0; + } + + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { + return prot_rw; + } + return prot_rw | PAGE_EXEC; +} + static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t *table, uint32_t address) { @@ -5047,7 +5125,6 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, int32_t granule_sz = 9; int32_t va_size = 32; int32_t tbi = 0; - bool is_user; TCR *tcr = regime_tcr(env, mmu_idx); /* TODO: @@ -5220,31 +5297,15 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, /* Access flag */ goto do_fault; } + + *prot = get_S1prot(env, mmu_idx, va_size == 64, extract32(attrs, 4, 2), 0, + extract32(attrs, 3, 1), extract32(attrs, 12, 1), + extract32(attrs, 11, 1)); + fault_type = permission_fault; - is_user = regime_is_user(env, mmu_idx); - if (is_user && !(attrs & (1 << 4))) { - /* Unprivileged access not enabled */ + if (!(*prot & (1 << access_type))) { goto do_fault; } - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || - (!is_user && (attrs & (1 << 11)))) { - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally - * treat XN/UXN as UXN for v8. - */ - if (access_type == 2) { - goto do_fault; - } - *prot &= ~PAGE_EXEC; - } - if (attrs & (1 << 5)) { - /* Write access forbidden */ - if (access_type == 1) { - goto do_fault; - } - *prot &= ~PAGE_WRITE; - } *phys_ptr = descaddr; *page_size_ptr = page_size;
This patch makes the following changes to the determination of whether an address is executable, when translating addresses using LPAE. 1. No longer assumes that PL0 can't execute when it can't read. It can in AArch64, a difference from AArch32. 2. Use va_size == 64 to determine we're in AArch64, rather than arm_feature(env, ARM_FEATURE_V8), which is insufficient. 3. Add additional XN determinants - NS && is_secure && (SCR & SCR_SIF) - WXN && (prot & PAGE_WRITE) - AArch64: (prot_PL0 & PAGE_WRITE) - AArch32: UWXN && (prot_PL0 & PAGE_WRITE) - XN determination should also work in secure mode (untested) - XN may even work in EL2 (currently impossible to test) 4. Cleans up the bloated PAGE_EXEC condition - by removing it. The helper get_S1prot is introduced, which also handles short- descriptors and v6 XN determination. It may even work in EL2, when support for that comes, but, as the function name implies, it only works for stage 1 translations. Signed-off-by: Andrew Jones <drjones@redhat.com> --- target-arm/helper.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 25 deletions(-)