Message ID | 20240313042118.230397-12-rmclure@linux.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Support page table check PowerPC | expand |
Le 13/03/2024 à 05:21, Rohan McLure a écrit : > In the new set_ptes() API, set_pte_at() (a special case of set_ptes()) > is intended to be instrumented by the page table check facility. There > are however several other routines that constitute the API for setting > page table entries, including set_pmd_at() among others. Such routines > are themselves implemented in terms of set_ptes_at(). > > A future patch providing support for page table checking on powerpc > must take care to avoid duplicate calls to > page_table_check_p{te,md,ud}_set(). Allow for assignment of pte entries > without instrumentation through the set_pte_at_unchecked() routine > introduced in this patch. > > Cause API-facing routines that call set_pte_at() to instead call > set_pte_at_unchecked(), which will remain uninstrumented by page > table check. set_ptes() is itself implemented by calls to > __set_pte_at(), so this eliminates redundant code. > > Also prefer set_pte_at_unchecked() in early-boot usages which should not be > instrumented. > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> > --- > v9: New patch > v10: don't reuse __set_pte_at(), as that will not apply filters. Instead > use new set_pte_at_unchecked(). Are filters needed at all in those usecases ? > --- > arch/powerpc/include/asm/pgtable.h | 2 ++ > arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- > arch/powerpc/mm/book3s64/pgtable.c | 6 +++--- > arch/powerpc/mm/book3s64/radix_pgtable.c | 8 ++++---- > arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- > arch/powerpc/mm/pgtable.c | 7 +++++++ > arch/powerpc/mm/pgtable_32.c | 2 +- > 7 files changed, 19 insertions(+), 10 deletions(-) > > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 3741a63fb82e..6ff1d8cfa216 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -44,6 +44,8 @@ struct mm_struct; > void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, > pte_t pte, unsigned int nr); > #define set_ptes set_ptes > +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, pte_t pte); > #define update_mmu_cache(vma, addr, ptep) \ > update_mmu_cache_range(NULL, vma, addr, ptep, 1) > > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 988948d69bc1..871472f99a01 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -165,7 +165,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > ptep = pte_alloc_kernel(pmdp, ea); > if (!ptep) > return -ENOMEM; > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); > + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); > } else { > /* > * If the mm subsystem is not fully up, we cannot create a > diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c > index 3438ab72c346..25082ab6018b 100644 > --- a/arch/powerpc/mm/book3s64/pgtable.c > +++ b/arch/powerpc/mm/book3s64/pgtable.c > @@ -116,7 +116,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, > WARN_ON(!(pmd_large(pmd))); > #endif > trace_hugepage_set_pmd(addr, pmd_val(pmd)); > - return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); > + return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); > } > > void set_pud_at(struct mm_struct *mm, unsigned long addr, > @@ -133,7 +133,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr, > WARN_ON(!(pud_large(pud))); > #endif > trace_hugepage_set_pud(addr, pud_val(pud)); > - return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud)); > + return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud)); > } > > static void do_serialize(void *arg) > @@ -539,7 +539,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, > if (radix_enabled()) > return radix__ptep_modify_prot_commit(vma, addr, > ptep, old_pte, pte); > - set_pte_at(vma->vm_mm, addr, ptep, pte); > + set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte); > } > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c > index 46fa46ce6526..c661e42bb2f1 100644 > --- a/arch/powerpc/mm/book3s64/radix_pgtable.c > +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c > @@ -109,7 +109,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, > ptep = pte_offset_kernel(pmdp, ea); > > set_the_pte: > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); > + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pfn, flags)); > asm volatile("ptesync": : :"memory"); > return 0; > } > @@ -1522,7 +1522,7 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, > (atomic_read(&mm->context.copros) > 0)) > radix__flush_tlb_page(vma, addr); > > - set_pte_at(mm, addr, ptep, pte); > + set_pte_at_unchecked(mm, addr, ptep, pte); > } > > int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) > @@ -1533,7 +1533,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) > if (!radix_enabled()) > return 0; > > - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud); > + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pud); > > return 1; > } > @@ -1580,7 +1580,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) > if (!radix_enabled()) > return 0; > > - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); > + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pmd); > > return 1; > } > diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c > index 1c5e4ecbebeb..10d487b2b991 100644 > --- a/arch/powerpc/mm/nohash/book3e_pgtable.c > +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c > @@ -111,7 +111,7 @@ int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot) > } > ptep = pte_offset_kernel(pmdp, ea); > } > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); > + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); > > smp_wmb(); > return 0; > diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c > index e8e0289d7ab0..352679cf2684 100644 > --- a/arch/powerpc/mm/pgtable.c > +++ b/arch/powerpc/mm/pgtable.c > @@ -227,6 +227,13 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, > } > } > > +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, pte_t pte) > +{ No need of the VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); which is in set_ptes() ? > + pte = set_pte_filter(pte, addr); > + __set_pte_at(mm, addr, ptep, pte, 0); > +} > + > void unmap_kernel_page(unsigned long va) > { > pmd_t *pmdp = pmd_off_k(va); > diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c > index 4be97b4a44f9..a5a26faf91ec 100644 > --- a/arch/powerpc/mm/pgtable_32.c > +++ b/arch/powerpc/mm/pgtable_32.c > @@ -89,7 +89,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) > * hash table > */ > BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); > - set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); > + set_pte_at_unchecked(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); > } > smp_wmb(); > return err;
On Wed, 2024-03-13 at 11:30 +0000, Christophe Leroy wrote: > > > Le 13/03/2024 à 05:21, Rohan McLure a écrit : > > In the new set_ptes() API, set_pte_at() (a special case of > > set_ptes()) > > is intended to be instrumented by the page table check facility. > > There > > are however several other routines that constitute the API for > > setting > > page table entries, including set_pmd_at() among others. Such > > routines > > are themselves implemented in terms of set_ptes_at(). > > > > A future patch providing support for page table checking on powerpc > > must take care to avoid duplicate calls to > > page_table_check_p{te,md,ud}_set(). Allow for assignment of pte > > entries > > without instrumentation through the set_pte_at_unchecked() routine > > introduced in this patch. > > > > Cause API-facing routines that call set_pte_at() to instead call > > set_pte_at_unchecked(), which will remain uninstrumented by page > > table check. set_ptes() is itself implemented by calls to > > __set_pte_at(), so this eliminates redundant code. > > > > Also prefer set_pte_at_unchecked() in early-boot usages which > > should not be > > instrumented. > > > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> > > --- > > v9: New patch > > v10: don't reuse __set_pte_at(), as that will not apply filters. > > Instead > > use new set_pte_at_unchecked(). > > Are filters needed at all in those usecases ? I'm just retaining the original semantics of these calls. I think another patch can replace this call with __set_pte_at() if filters are deemed unnecessary. > > > --- > > arch/powerpc/include/asm/pgtable.h | 2 ++ > > arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- > > arch/powerpc/mm/book3s64/pgtable.c | 6 +++--- > > arch/powerpc/mm/book3s64/radix_pgtable.c | 8 ++++---- > > arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- > > arch/powerpc/mm/pgtable.c | 7 +++++++ > > arch/powerpc/mm/pgtable_32.c | 2 +- > > 7 files changed, 19 insertions(+), 10 deletions(-) > > > > diff --git a/arch/powerpc/include/asm/pgtable.h > > b/arch/powerpc/include/asm/pgtable.h > > index 3741a63fb82e..6ff1d8cfa216 100644 > > --- a/arch/powerpc/include/asm/pgtable.h > > +++ b/arch/powerpc/include/asm/pgtable.h > > @@ -44,6 +44,8 @@ struct mm_struct; > > void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t > > *ptep, > > pte_t pte, unsigned int nr); > > #define set_ptes set_ptes > > +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long > > addr, > > + pte_t *ptep, pte_t pte); > > #define update_mmu_cache(vma, addr, ptep) \ > > update_mmu_cache_range(NULL, vma, addr, ptep, 1) > > > > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c > > b/arch/powerpc/mm/book3s64/hash_pgtable.c > > index 988948d69bc1..871472f99a01 100644 > > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > > @@ -165,7 +165,7 @@ int hash__map_kernel_page(unsigned long ea, > > unsigned long pa, pgprot_t prot) > > ptep = pte_alloc_kernel(pmdp, ea); > > if (!ptep) > > return -ENOMEM; > > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> > > PAGE_SHIFT, prot)); > > + set_pte_at_unchecked(&init_mm, ea, ptep, > > pfn_pte(pa >> PAGE_SHIFT, prot)); > > } else { > > /* > > * If the mm subsystem is not fully up, we cannot > > create a > > diff --git a/arch/powerpc/mm/book3s64/pgtable.c > > b/arch/powerpc/mm/book3s64/pgtable.c > > index 3438ab72c346..25082ab6018b 100644 > > --- a/arch/powerpc/mm/book3s64/pgtable.c > > +++ b/arch/powerpc/mm/book3s64/pgtable.c > > @@ -116,7 +116,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned > > long addr, > > WARN_ON(!(pmd_large(pmd))); > > #endif > > trace_hugepage_set_pmd(addr, pmd_val(pmd)); > > - return set_pte_at(mm, addr, pmdp_ptep(pmdp), > > pmd_pte(pmd)); > > + return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), > > pmd_pte(pmd)); > > } > > > > void set_pud_at(struct mm_struct *mm, unsigned long addr, > > @@ -133,7 +133,7 @@ void set_pud_at(struct mm_struct *mm, unsigned > > long addr, > > WARN_ON(!(pud_large(pud))); > > #endif > > trace_hugepage_set_pud(addr, pud_val(pud)); > > - return set_pte_at(mm, addr, pudp_ptep(pudp), > > pud_pte(pud)); > > + return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), > > pud_pte(pud)); > > } > > > > static void do_serialize(void *arg) > > @@ -539,7 +539,7 @@ void ptep_modify_prot_commit(struct > > vm_area_struct *vma, unsigned long addr, > > if (radix_enabled()) > > return radix__ptep_modify_prot_commit(vma, addr, > > ptep, > > old_pte, pte); > > - set_pte_at(vma->vm_mm, addr, ptep, pte); > > + set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte); > > } > > > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c > > b/arch/powerpc/mm/book3s64/radix_pgtable.c > > index 46fa46ce6526..c661e42bb2f1 100644 > > --- a/arch/powerpc/mm/book3s64/radix_pgtable.c > > +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c > > @@ -109,7 +109,7 @@ static int early_map_kernel_page(unsigned long > > ea, unsigned long pa, > > ptep = pte_offset_kernel(pmdp, ea); > > > > set_the_pte: > > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); > > + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pfn, > > flags)); > > asm volatile("ptesync": : :"memory"); > > return 0; > > } > > @@ -1522,7 +1522,7 @@ void radix__ptep_modify_prot_commit(struct > > vm_area_struct *vma, > > (atomic_read(&mm->context.copros) > 0)) > > radix__flush_tlb_page(vma, addr); > > > > - set_pte_at(mm, addr, ptep, pte); > > + set_pte_at_unchecked(mm, addr, ptep, pte); > > } > > > > int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) > > @@ -1533,7 +1533,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t > > addr, pgprot_t prot) > > if (!radix_enabled()) > > return 0; > > > > - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud); > > + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, > > new_pud); > > > > return 1; > > } > > @@ -1580,7 +1580,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t > > addr, pgprot_t prot) > > if (!radix_enabled()) > > return 0; > > > > - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); > > + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, > > new_pmd); > > > > return 1; > > } > > diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c > > b/arch/powerpc/mm/nohash/book3e_pgtable.c > > index 1c5e4ecbebeb..10d487b2b991 100644 > > --- a/arch/powerpc/mm/nohash/book3e_pgtable.c > > +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c > > @@ -111,7 +111,7 @@ int __ref map_kernel_page(unsigned long ea, > > phys_addr_t pa, pgprot_t prot) > > } > > ptep = pte_offset_kernel(pmdp, ea); > > } > > - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, > > prot)); > > + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> > > PAGE_SHIFT, prot)); > > > > smp_wmb(); > > return 0; > > diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c > > index e8e0289d7ab0..352679cf2684 100644 > > --- a/arch/powerpc/mm/pgtable.c > > +++ b/arch/powerpc/mm/pgtable.c > > @@ -227,6 +227,13 @@ void set_ptes(struct mm_struct *mm, unsigned > > long addr, pte_t *ptep, > > } > > } > > > > +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long > > addr, > > + pte_t *ptep, pte_t pte) > > +{ > > No need of the > > VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); > > which is in set_ptes() ? Good spot, I'll include that check in this routine. > > > + pte = set_pte_filter(pte, addr); > > + __set_pte_at(mm, addr, ptep, pte, 0); > > +} > > + > > void unmap_kernel_page(unsigned long va) > > { > > pmd_t *pmdp = pmd_off_k(va); > > diff --git a/arch/powerpc/mm/pgtable_32.c > > b/arch/powerpc/mm/pgtable_32.c > > index 4be97b4a44f9..a5a26faf91ec 100644 > > --- a/arch/powerpc/mm/pgtable_32.c > > +++ b/arch/powerpc/mm/pgtable_32.c > > @@ -89,7 +89,7 @@ int __ref map_kernel_page(unsigned long va, > > phys_addr_t pa, pgprot_t prot) > > * hash table > > */ > > BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && > > pgprot_val(prot)); > > - set_pte_at(&init_mm, va, pg, pfn_pte(pa >> > > PAGE_SHIFT, prot)); > > + set_pte_at_unchecked(&init_mm, va, pg, pfn_pte(pa > > >> PAGE_SHIFT, prot)); > > } > > smp_wmb(); > > return err;
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 3741a63fb82e..6ff1d8cfa216 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -44,6 +44,8 @@ struct mm_struct; void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr); #define set_ptes set_ptes +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 988948d69bc1..871472f99a01 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -165,7 +165,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); } else { /* * If the mm subsystem is not fully up, we cannot create a diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 3438ab72c346..25082ab6018b 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -116,7 +116,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, WARN_ON(!(pmd_large(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); - return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); + return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } void set_pud_at(struct mm_struct *mm, unsigned long addr, @@ -133,7 +133,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr, WARN_ON(!(pud_large(pud))); #endif trace_hugepage_set_pud(addr, pud_val(pud)); - return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud)); + return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud)); } static void do_serialize(void *arg) @@ -539,7 +539,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, if (radix_enabled()) return radix__ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); - set_pte_at(vma->vm_mm, addr, ptep, pte); + set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 46fa46ce6526..c661e42bb2f1 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -109,7 +109,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, ptep = pte_offset_kernel(pmdp, ea); set_the_pte: - set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pfn, flags)); asm volatile("ptesync": : :"memory"); return 0; } @@ -1522,7 +1522,7 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, (atomic_read(&mm->context.copros) > 0)) radix__flush_tlb_page(vma, addr); - set_pte_at(mm, addr, ptep, pte); + set_pte_at_unchecked(mm, addr, ptep, pte); } int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) @@ -1533,7 +1533,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) if (!radix_enabled()) return 0; - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud); + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pud); return 1; } @@ -1580,7 +1580,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) if (!radix_enabled()) return 0; - set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); + set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pmd); return 1; } diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 1c5e4ecbebeb..10d487b2b991 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -111,7 +111,7 @@ int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot) } ptep = pte_offset_kernel(pmdp, ea); } - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); + set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); smp_wmb(); return 0; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index e8e0289d7ab0..352679cf2684 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -227,6 +227,13 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, } } +void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + pte = set_pte_filter(pte, addr); + __set_pte_at(mm, addr, ptep, pte, 0); +} + void unmap_kernel_page(unsigned long va) { pmd_t *pmdp = pmd_off_k(va); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 4be97b4a44f9..a5a26faf91ec 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -89,7 +89,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) * hash table */ BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); - set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); + set_pte_at_unchecked(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); } smp_wmb(); return err;
In the new set_ptes() API, set_pte_at() (a special case of set_ptes()) is intended to be instrumented by the page table check facility. There are however several other routines that constitute the API for setting page table entries, including set_pmd_at() among others. Such routines are themselves implemented in terms of set_ptes_at(). A future patch providing support for page table checking on powerpc must take care to avoid duplicate calls to page_table_check_p{te,md,ud}_set(). Allow for assignment of pte entries without instrumentation through the set_pte_at_unchecked() routine introduced in this patch. Cause API-facing routines that call set_pte_at() to instead call set_pte_at_unchecked(), which will remain uninstrumented by page table check. set_ptes() is itself implemented by calls to __set_pte_at(), so this eliminates redundant code. Also prefer set_pte_at_unchecked() in early-boot usages which should not be instrumented. Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> --- v9: New patch v10: don't reuse __set_pte_at(), as that will not apply filters. Instead use new set_pte_at_unchecked(). --- arch/powerpc/include/asm/pgtable.h | 2 ++ arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- arch/powerpc/mm/book3s64/pgtable.c | 6 +++--- arch/powerpc/mm/book3s64/radix_pgtable.c | 8 ++++---- arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- arch/powerpc/mm/pgtable.c | 7 +++++++ arch/powerpc/mm/pgtable_32.c | 2 +- 7 files changed, 19 insertions(+), 10 deletions(-)