diff mbox series

[v2,29/37] powerpc/nohash: Replace pte_user() by pte_read()

Message ID 72cbb5be595e9ef884140def73815ed0b0b37010.1695659959.git.christophe.leroy@csgroup.eu (mailing list archive)
State Accepted
Commit 8e9bd41e4ce1001f5b89e4c9a69f870f39d56c12
Headers show
Series Implement execute-only protection on powerpc | expand

Commit Message

Christophe Leroy Sept. 25, 2023, 6:31 p.m. UTC
pte_user() is now only used in pte_access_permitted() to check
access on vmas. User flag is cleared to make a page unreadable.

So rename it pte_read() and remove pte_user() which isn't used
anymore.

For the time being it checks _PAGE_USER but in the near futur
all plateforms will be converted to _PAGE_READ so lets support
both for now.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/nohash/32/pte-8xx.h |  7 -------
 arch/powerpc/include/asm/nohash/pgtable.h    | 13 +++++++------
 arch/powerpc/mm/ioremap.c                    |  4 ----
 3 files changed, 7 insertions(+), 17 deletions(-)

Comments

Aneesh Kumar K V Oct. 31, 2023, 10:15 a.m. UTC | #1
Christophe Leroy <christophe.leroy@csgroup.eu> writes:

> pte_user() is now only used in pte_access_permitted() to check
> access on vmas. User flag is cleared to make a page unreadable.
>
> So rename it pte_read() and remove pte_user() which isn't used
> anymore.
>
> For the time being it checks _PAGE_USER but in the near futur
> all plateforms will be converted to _PAGE_READ so lets support
> both for now.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> ---
>  arch/powerpc/include/asm/nohash/32/pte-8xx.h |  7 -------
>  arch/powerpc/include/asm/nohash/pgtable.h    | 13 +++++++------
>  arch/powerpc/mm/ioremap.c                    |  4 ----
>  3 files changed, 7 insertions(+), 17 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> index 62c965a4511a..1ee38befd29a 100644
> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> @@ -112,13 +112,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
>  
>  #define pte_mkwrite_novma pte_mkwrite_novma
>  
> -static inline bool pte_user(pte_t pte)
> -{
> -	return !(pte_val(pte) & _PAGE_SH);
> -}
> -
> -#define pte_user pte_user
> -
>  static inline pte_t pte_mkhuge(pte_t pte)
>  {
>  	return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
> index ee677162f9e6..aba56fe3b1c6 100644
> --- a/arch/powerpc/include/asm/nohash/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
> @@ -160,9 +160,6 @@ static inline int pte_write(pte_t pte)
>  	return pte_val(pte) & _PAGE_WRITE;
>  }
>  #endif
> -#ifndef pte_read
> -static inline int pte_read(pte_t pte)		{ return 1; }
> -#endif
>  static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
>  static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
>  static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
> @@ -190,10 +187,14 @@ static inline int pte_young(pte_t pte)
>   * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
>   * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
>   */
> -#ifndef pte_user
> -static inline bool pte_user(pte_t pte)
> +#ifndef pte_read
> +static inline bool pte_read(pte_t pte)
>  {
> +#ifdef _PAGE_READ
> +	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
> +#else
>  	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
> +#endif
>  }
>  #endif
>  
> @@ -208,7 +209,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
>  	 * A read-only access is controlled by _PAGE_USER bit.
>  	 * We have _PAGE_READ set for WRITE and EXECUTE
>  	 */
> -	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
> +	if (!pte_present(pte) || !pte_read(pte))
>  		return false;
>  
>  	if (write && !pte_write(pte))
> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
> index 7823c38f09de..7b0afcabd89f 100644
> --- a/arch/powerpc/mm/ioremap.c
> +++ b/arch/powerpc/mm/ioremap.c
> @@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>  	if (pte_write(pte))
>  		pte = pte_mkdirty(pte);
>  
> -	/* we don't want to let _PAGE_USER leak out */
> -	if (WARN_ON(pte_user(pte)))
> -		return NULL;
>

This check is still valid right? I understand that we want to remove
_PAGE_USER. But then loosing this check is ok? 

> -
>  	if (iowa_is_active())
>  		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
>  	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
> -- 
> 2.41.0
Christophe Leroy Nov. 6, 2023, 1:21 p.m. UTC | #2
Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> 
>> pte_user() is now only used in pte_access_permitted() to check
>> access on vmas. User flag is cleared to make a page unreadable.
>>
>> So rename it pte_read() and remove pte_user() which isn't used
>> anymore.
>>
>> For the time being it checks _PAGE_USER but in the near futur
>> all plateforms will be converted to _PAGE_READ so lets support
>> both for now.
>>
>> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
>> ---
>>   arch/powerpc/include/asm/nohash/32/pte-8xx.h |  7 -------
>>   arch/powerpc/include/asm/nohash/pgtable.h    | 13 +++++++------
>>   arch/powerpc/mm/ioremap.c                    |  4 ----
>>   3 files changed, 7 insertions(+), 17 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>> index 62c965a4511a..1ee38befd29a 100644
>> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>> @@ -112,13 +112,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
>>   
>>   #define pte_mkwrite_novma pte_mkwrite_novma
>>   
>> -static inline bool pte_user(pte_t pte)
>> -{
>> -	return !(pte_val(pte) & _PAGE_SH);
>> -}
>> -
>> -#define pte_user pte_user
>> -
>>   static inline pte_t pte_mkhuge(pte_t pte)
>>   {
>>   	return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
>> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
>> index ee677162f9e6..aba56fe3b1c6 100644
>> --- a/arch/powerpc/include/asm/nohash/pgtable.h
>> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
>> @@ -160,9 +160,6 @@ static inline int pte_write(pte_t pte)
>>   	return pte_val(pte) & _PAGE_WRITE;
>>   }
>>   #endif
>> -#ifndef pte_read
>> -static inline int pte_read(pte_t pte)		{ return 1; }
>> -#endif
>>   static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
>>   static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
>>   static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
>> @@ -190,10 +187,14 @@ static inline int pte_young(pte_t pte)
>>    * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
>>    * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
>>    */
>> -#ifndef pte_user
>> -static inline bool pte_user(pte_t pte)
>> +#ifndef pte_read
>> +static inline bool pte_read(pte_t pte)
>>   {
>> +#ifdef _PAGE_READ
>> +	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
>> +#else
>>   	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
>> +#endif
>>   }
>>   #endif
>>   
>> @@ -208,7 +209,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
>>   	 * A read-only access is controlled by _PAGE_USER bit.
>>   	 * We have _PAGE_READ set for WRITE and EXECUTE
>>   	 */
>> -	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
>> +	if (!pte_present(pte) || !pte_read(pte))
>>   		return false;
>>   
>>   	if (write && !pte_write(pte))
>> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
>> index 7823c38f09de..7b0afcabd89f 100644
>> --- a/arch/powerpc/mm/ioremap.c
>> +++ b/arch/powerpc/mm/ioremap.c
>> @@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>>   	if (pte_write(pte))
>>   		pte = pte_mkdirty(pte);
>>   
>> -	/* we don't want to let _PAGE_USER leak out */
>> -	if (WARN_ON(pte_user(pte)))
>> -		return NULL;
>>
> 
> This check is still valid right? I understand that we want to remove
> _PAGE_USER. But then loosing this check is ok?

Well, we may have to think about it for book3s/64. For all others 
_PAGE_USER is gone and replaced by a check of addresses versus TASK_SIZE.

As ioremap() will map into vmalloc space that address is necesserally 
correct.

> 
>> -
>>   	if (iowa_is_active())
>>   		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
>>   	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
>> -- 
>> 2.41.0
Aneesh Kumar K V Nov. 7, 2023, 1:34 p.m. UTC | #3
Christophe Leroy <christophe.leroy@csgroup.eu> writes:

> Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
>> 
>>> pte_user() is now only used in pte_access_permitted() to check
>>> access on vmas. User flag is cleared to make a page unreadable.
>>>
>>> So rename it pte_read() and remove pte_user() which isn't used
>>> anymore.
>>>
>>> For the time being it checks _PAGE_USER but in the near futur
>>> all plateforms will be converted to _PAGE_READ so lets support
>>> both for now.
>>>
>>> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
>>> ---
>>>   arch/powerpc/include/asm/nohash/32/pte-8xx.h |  7 -------
>>>   arch/powerpc/include/asm/nohash/pgtable.h    | 13 +++++++------
>>>   arch/powerpc/mm/ioremap.c                    |  4 ----
>>>   3 files changed, 7 insertions(+), 17 deletions(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> index 62c965a4511a..1ee38befd29a 100644
>>> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> @@ -112,13 +112,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
>>>   
>>>   #define pte_mkwrite_novma pte_mkwrite_novma
>>>   
>>> -static inline bool pte_user(pte_t pte)
>>> -{
>>> -	return !(pte_val(pte) & _PAGE_SH);
>>> -}
>>> -
>>> -#define pte_user pte_user
>>> -
>>>   static inline pte_t pte_mkhuge(pte_t pte)
>>>   {
>>>   	return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
>>> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
>>> index ee677162f9e6..aba56fe3b1c6 100644
>>> --- a/arch/powerpc/include/asm/nohash/pgtable.h
>>> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
>>> @@ -160,9 +160,6 @@ static inline int pte_write(pte_t pte)
>>>   	return pte_val(pte) & _PAGE_WRITE;
>>>   }
>>>   #endif
>>> -#ifndef pte_read
>>> -static inline int pte_read(pte_t pte)		{ return 1; }
>>> -#endif
>>>   static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
>>>   static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
>>>   static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
>>> @@ -190,10 +187,14 @@ static inline int pte_young(pte_t pte)
>>>    * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
>>>    * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
>>>    */
>>> -#ifndef pte_user
>>> -static inline bool pte_user(pte_t pte)
>>> +#ifndef pte_read
>>> +static inline bool pte_read(pte_t pte)
>>>   {
>>> +#ifdef _PAGE_READ
>>> +	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
>>> +#else
>>>   	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
>>> +#endif
>>>   }
>>>   #endif
>>>   
>>> @@ -208,7 +209,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
>>>   	 * A read-only access is controlled by _PAGE_USER bit.
>>>   	 * We have _PAGE_READ set for WRITE and EXECUTE
>>>   	 */
>>> -	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
>>> +	if (!pte_present(pte) || !pte_read(pte))
>>>   		return false;
>>>   
>>>   	if (write && !pte_write(pte))
>>> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
>>> index 7823c38f09de..7b0afcabd89f 100644
>>> --- a/arch/powerpc/mm/ioremap.c
>>> +++ b/arch/powerpc/mm/ioremap.c
>>> @@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>>>   	if (pte_write(pte))
>>>   		pte = pte_mkdirty(pte);
>>>   
>>> -	/* we don't want to let _PAGE_USER leak out */
>>> -	if (WARN_ON(pte_user(pte)))
>>> -		return NULL;
>>>
>> 
>> This check is still valid right? I understand that we want to remove
>> _PAGE_USER. But then loosing this check is ok?
>
> Well, we may have to think about it for book3s/64. For all others 
> _PAGE_USER is gone and replaced by a check of addresses versus TASK_SIZE.
>
> As ioremap() will map into vmalloc space that address is necesserally 
> correct.
> 
>

We are adding the pte flags check not the map addr check there. Something like this?

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 7c7de7b56df0..b053b86e0069 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1462,5 +1462,11 @@ static inline bool pud_is_leaf(pud_t pud)
 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
 }
 
+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 137dc3c84e45..7b23d2543528 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -223,5 +223,11 @@ static inline pte_t ptep_get(pte_t *ptep)
 
 #endif
 
+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+	return !!(pte_val(pte) & (_PAGE_SH))
+}
+
 #endif /* __KERNEL__ */
 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index f516f0b5b7a8..d31274178aa6 100644
--- a/arch/powerpc/include/asm/nohash/pte-e500.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -105,6 +105,13 @@ static inline pte_t pte_mkexec(pte_t pte)
 }
 #define pte_mkexec pte_mkexec
 
+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+	return !(pte_val(pte) & (_PAGE_BAP_UR | _PAGE_BAP_UW | _PAGE_BAP_UX))
+}
+
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2bfb7dd3b49e..417abe5dcbd8 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -231,6 +231,13 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
 
 #endif /* CONFIG_PPC64 */
 
+#ifndef arch_ioremap_valid_pte
+static inline book arch_ioremap_valid_pte(pte_t pte)
+{
+	return true;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 7b0afcabd89f..1a39e698c3d4 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -50,6 +50,9 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
 	if (pte_write(pte))
 		pte = pte_mkdirty(pte);
 
+	if (WARN_ON(!arch_ioremap_valid_pte(pte)))
+		return NULL;
+
 	if (iowa_is_active())
 		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
 	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
Christophe Leroy Nov. 9, 2023, 6:20 p.m. UTC | #4
Le 07/11/2023 à 14:34, Aneesh Kumar K.V a écrit :
> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> 
>> Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
>>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
>>>
>>>> pte_user() is now only used in pte_access_permitted() to check
>>>> access on vmas. User flag is cleared to make a page unreadable.
>>>>
>>>> So rename it pte_read() and remove pte_user() which isn't used
>>>> anymore.
>>>>
>>>> For the time being it checks _PAGE_USER but in the near futur
>>>> all plateforms will be converted to _PAGE_READ so lets support
>>>> both for now.
>>>>
>>>> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
>>>> ---
>>>>    arch/powerpc/include/asm/nohash/32/pte-8xx.h |  7 -------
>>>>    arch/powerpc/include/asm/nohash/pgtable.h    | 13 +++++++------
>>>>    arch/powerpc/mm/ioremap.c                    |  4 ----
>>>>    3 files changed, 7 insertions(+), 17 deletions(-)
>>>>
>>>> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>>> index 62c965a4511a..1ee38befd29a 100644
>>>> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>>> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>>> @@ -112,13 +112,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
>>>>    
>>>>    #define pte_mkwrite_novma pte_mkwrite_novma
>>>>    
>>>> -static inline bool pte_user(pte_t pte)
>>>> -{
>>>> -	return !(pte_val(pte) & _PAGE_SH);
>>>> -}
>>>> -
>>>> -#define pte_user pte_user
>>>> -
>>>>    static inline pte_t pte_mkhuge(pte_t pte)
>>>>    {
>>>>    	return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
>>>> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
>>>> index ee677162f9e6..aba56fe3b1c6 100644
>>>> --- a/arch/powerpc/include/asm/nohash/pgtable.h
>>>> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
>>>> @@ -160,9 +160,6 @@ static inline int pte_write(pte_t pte)
>>>>    	return pte_val(pte) & _PAGE_WRITE;
>>>>    }
>>>>    #endif
>>>> -#ifndef pte_read
>>>> -static inline int pte_read(pte_t pte)		{ return 1; }
>>>> -#endif
>>>>    static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
>>>>    static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
>>>>    static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
>>>> @@ -190,10 +187,14 @@ static inline int pte_young(pte_t pte)
>>>>     * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
>>>>     * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
>>>>     */
>>>> -#ifndef pte_user
>>>> -static inline bool pte_user(pte_t pte)
>>>> +#ifndef pte_read
>>>> +static inline bool pte_read(pte_t pte)
>>>>    {
>>>> +#ifdef _PAGE_READ
>>>> +	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
>>>> +#else
>>>>    	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
>>>> +#endif
>>>>    }
>>>>    #endif
>>>>    
>>>> @@ -208,7 +209,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
>>>>    	 * A read-only access is controlled by _PAGE_USER bit.
>>>>    	 * We have _PAGE_READ set for WRITE and EXECUTE
>>>>    	 */
>>>> -	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
>>>> +	if (!pte_present(pte) || !pte_read(pte))
>>>>    		return false;
>>>>    
>>>>    	if (write && !pte_write(pte))
>>>> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
>>>> index 7823c38f09de..7b0afcabd89f 100644
>>>> --- a/arch/powerpc/mm/ioremap.c
>>>> +++ b/arch/powerpc/mm/ioremap.c
>>>> @@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>>>>    	if (pte_write(pte))
>>>>    		pte = pte_mkdirty(pte);
>>>>    
>>>> -	/* we don't want to let _PAGE_USER leak out */
>>>> -	if (WARN_ON(pte_user(pte)))
>>>> -		return NULL;
>>>>
>>>
>>> This check is still valid right? I understand that we want to remove
>>> _PAGE_USER. But then loosing this check is ok?
>>
>> Well, we may have to think about it for book3s/64. For all others
>> _PAGE_USER is gone and replaced by a check of addresses versus TASK_SIZE.
>>
>> As ioremap() will map into vmalloc space that address is necesserally
>> correct.
>>
>>
> 
> We are adding the pte flags check not the map addr check there. Something like this?

Well, ok, but then why do we want to do that check for ioremap() and not 
for everything else ? vmap() for instance will not perform any such 
check. All it does is to clear the EXEC bit.

As far as I can see, no other architecture does such a check, so why is 
it needed on powerpc at all ?

Regardless, comments below.

> 
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 7c7de7b56df0..b053b86e0069 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1462,5 +1462,11 @@ static inline bool pud_is_leaf(pud_t pud)
>   	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
>   }
>   
> +#define arch_ioremap_valid_pte arch_ioremap_valid_pte
> +static inline bool arch_ioremap_valid_pte(pte_t pte)
> +{
> +	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> index 137dc3c84e45..7b23d2543528 100644
> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> @@ -223,5 +223,11 @@ static inline pte_t ptep_get(pte_t *ptep)
>   
>   #endif
>   
> +#define arch_ioremap_valid_pte arch_ioremap_valid_pte
> +static inline bool arch_ioremap_valid_pte(pte_t pte)
> +{
> +	return !!(pte_val(pte) & (_PAGE_SH))
> +}
> +

_PAGE_SH bit means the page is shared hence is valid for all PIDs. It 
was used as a trick for pte_user() because all kernel pages have SH bit 
set while user pages have SH bit unset, but clearing SH bit doesn't make 
the page become a user page. User pages are defined by _PMD_USER flag at 
PMD level.

So I think this can be dropped.

If we wanted to be really strict with the verification, we should check 
that the PPP bits won't contain something odd, because some unused 
combinations may provide user access to pages located in PMDs not having 
_PMD_USER. But is that really worth it ?

In case we want that check:
- PAGE_NA is 0x200
- PAGE_RO is 0x600
- PAGE_RW is 0x000
Last possible combination is 0x400, that one will set it RW for 
supervisor + RO for user.


>   #endif /* __KERNEL__ */
>   #endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
> diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
> index f516f0b5b7a8..d31274178aa6 100644
> --- a/arch/powerpc/include/asm/nohash/pte-e500.h
> +++ b/arch/powerpc/include/asm/nohash/pte-e500.h
> @@ -105,6 +105,13 @@ static inline pte_t pte_mkexec(pte_t pte)
>   }
>   #define pte_mkexec pte_mkexec
>   
> +#define arch_ioremap_valid_pte arch_ioremap_valid_pte
> +static inline bool arch_ioremap_valid_pte(pte_t pte)
> +{
> +	return !(pte_val(pte) & (_PAGE_BAP_UR | _PAGE_BAP_UW | _PAGE_BAP_UX))
> +}
> +
> +
>   #endif /* __ASSEMBLY__ */
>   
>   #endif /* __KERNEL__ */
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 2bfb7dd3b49e..417abe5dcbd8 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -231,6 +231,13 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
>   
>   #endif /* CONFIG_PPC64 */
>   
> +#ifndef arch_ioremap_valid_pte
> +static inline book arch_ioremap_valid_pte(pte_t pte)
> +{
> +	return true;
> +}
> +#endif
> +
>   #endif /* __ASSEMBLY__ */
>   
>   #endif /* _ASM_POWERPC_PGTABLE_H */
> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
> index 7b0afcabd89f..1a39e698c3d4 100644
> --- a/arch/powerpc/mm/ioremap.c
> +++ b/arch/powerpc/mm/ioremap.c
> @@ -50,6 +50,9 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>   	if (pte_write(pte))
>   		pte = pte_mkdirty(pte);
>   
> +	if (WARN_ON(!arch_ioremap_valid_pte(pte)))
> +		return NULL;
> +
>   	if (iowa_is_active())
>   		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
>   	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
>
Aneesh Kumar K V Nov. 13, 2023, 10:23 a.m. UTC | #5
Christophe Leroy <christophe.leroy@csgroup.eu> writes:

> Le 07/11/2023 à 14:34, Aneesh Kumar K.V a écrit :
>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
>> 
>>> Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
>>>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:

....

>>
>> 
>> We are adding the pte flags check not the map addr check there. Something like this?
>
> Well, ok, but then why do we want to do that check for ioremap() and not 
> for everything else ? vmap() for instance will not perform any such 
> check. All it does is to clear the EXEC bit.
>
> As far as I can see, no other architecture does such a check, so why is 
> it needed on powerpc at all ?
>
> Regardless, comments below.
>

Looking at ioremap_prot() I am not clear whether we can really use the
flag value argument as is. For ex: x86 does 

pgprot2cachemode(__pgprot(prot_val))

I see that we use ioremap_prot() for generic_access_phys() and with
/dev/mem and __access_remote_vm() we can get called with a user pte
mapping prot flags? 

If such an prot value can be observed then the original change to clear
EXEC and mark it privileged is required?

	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
	pte = pte_exprotect(pte);
	pte = pte_mkprivileged(pte);


We already handle exec in pgprot_nx() and we need add back
pte_mkprivileged()? 


-aneesh
Christophe Leroy Nov. 23, 2023, 3:55 p.m. UTC | #6
Le 13/11/2023 à 11:23, Aneesh Kumar K.V a écrit :
> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> 
>> Le 07/11/2023 à 14:34, Aneesh Kumar K.V a écrit :
>>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
>>>
>>>> Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
>>>>> Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> 
> ....
> 
>>>
>>>
>>> We are adding the pte flags check not the map addr check there. Something like this?
>>
>> Well, ok, but then why do we want to do that check for ioremap() and not
>> for everything else ? vmap() for instance will not perform any such
>> check. All it does is to clear the EXEC bit.
>>
>> As far as I can see, no other architecture does such a check, so why is
>> it needed on powerpc at all ?
>>
>> Regardless, comments below.
>>
> 
> Looking at ioremap_prot() I am not clear whether we can really use the
> flag value argument as is. For ex: x86 does
> 
> pgprot2cachemode(__pgprot(prot_val))
> 
> I see that we use ioremap_prot() for generic_access_phys() and with
> /dev/mem and __access_remote_vm() we can get called with a user pte
> mapping prot flags?

Do you think so ?

If I understand correctly, in those cases ioremap_prot() is called with 
prot flags as returned by the call to phys_mem_access_prot().

> 
> If such an prot value can be observed then the original change to clear
> EXEC and mark it privileged is required?
> 
> 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
> 	pte = pte_exprotect(pte);
> 	pte = pte_mkprivileged(pte);
> 
> 
> We already handle exec in pgprot_nx() and we need add back
> pte_mkprivileged()?

If this is the case for powerpc that's likely the case for most 
architectures. Should we change pgprot_nx() to pgprot_nxu() or have a 
pgprot_nu() in addition ?

Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 62c965a4511a..1ee38befd29a 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -112,13 +112,6 @@  static inline pte_t pte_mkwrite_novma(pte_t pte)
 
 #define pte_mkwrite_novma pte_mkwrite_novma
 
-static inline bool pte_user(pte_t pte)
-{
-	return !(pte_val(pte) & _PAGE_SH);
-}
-
-#define pte_user pte_user
-
 static inline pte_t pte_mkhuge(pte_t pte)
 {
 	return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index ee677162f9e6..aba56fe3b1c6 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -160,9 +160,6 @@  static inline int pte_write(pte_t pte)
 	return pte_val(pte) & _PAGE_WRITE;
 }
 #endif
-#ifndef pte_read
-static inline int pte_read(pte_t pte)		{ return 1; }
-#endif
 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
@@ -190,10 +187,14 @@  static inline int pte_young(pte_t pte)
  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
  * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
  */
-#ifndef pte_user
-static inline bool pte_user(pte_t pte)
+#ifndef pte_read
+static inline bool pte_read(pte_t pte)
 {
+#ifdef _PAGE_READ
+	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
+#else
 	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+#endif
 }
 #endif
 
@@ -208,7 +209,7 @@  static inline bool pte_access_permitted(pte_t pte, bool write)
 	 * A read-only access is controlled by _PAGE_USER bit.
 	 * We have _PAGE_READ set for WRITE and EXECUTE
 	 */
-	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+	if (!pte_present(pte) || !pte_read(pte))
 		return false;
 
 	if (write && !pte_write(pte))
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 7823c38f09de..7b0afcabd89f 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -50,10 +50,6 @@  void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
 	if (pte_write(pte))
 		pte = pte_mkdirty(pte);
 
-	/* we don't want to let _PAGE_USER leak out */
-	if (WARN_ON(pte_user(pte)))
-		return NULL;
-
 	if (iowa_is_active())
 		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
 	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);