Message ID | 1467779659.13965.101.camel@kernel.crashing.org (mailing list archive) |
---|---|
State | RFC |
Headers | show |
On 06/07/16 14:34, Benjamin Herrenschmidt wrote: > Just using the hash ops won't work anymore since radix will have > NULL in there. Instead create an mmu_cleanup_all() function which > will do the right thing based on the MMU mode. > > For Radix, for now I clear UPRT and the PTCR, effectively switching > back to Radix with no partition table setup. > > Currently set it to NULL on BookE thought it might be a good idea > to wipe the TLB there (Scott ?) > > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> <snip> > + > +/* For use by kexec */ > +void mmu_cleanup_all(void) > +{ > + if (radix_enabled()) > + radix__mmu_cleanup_all(); Should this be more than just radix -- cpu_has_feature(CPU_FTR_ARCH_300)? > + else if (mmu_hash_ops.hpte_clear_all) > + mmu_hash_ops.hpte_clear_all(); > +} > diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c > index 3ababda..f127baa 100644 > --- a/arch/powerpc/mm/pgtable-radix.c > +++ b/arch/powerpc/mm/pgtable-radix.c > @@ -366,6 +366,16 @@ void radix__early_init_mmu_secondary(void) > } > } > > +void radix__mmu_cleanup_all(void) > +{ > + unsigned long lpcr; > + if (!firmware_has_feature(FW_FEATURE_LPAR)) { > + lpcr = mfspr(SPRN_LPCR); > + mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); > + mtspr(SPRN_PTCR, 0); Free up the patb data structure here? We also need tlbie's to invalidate any caches pointing to the patb entries Balbir Singh.
On Fri, 2016-07-08 at 12:55 +1000, Balbir Singh wrote: > + > > +/* For use by kexec */ > > +void mmu_cleanup_all(void) > > +{ > > + if (radix_enabled()) > > + radix__mmu_cleanup_all(); > > > Should this be more than just radix -- cpu_has_feature(CPU_FTR_ARCH_300)? I don't understand... We need to cleanup the radix if we use a radix, otherwise we cleanup the hash ... Basically this will switch back to hash mode before kexec. > > + else if (mmu_hash_ops.hpte_clear_all) > > + mmu_hash_ops.hpte_clear_all(); > > +} > > diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c > > index 3ababda..f127baa 100644 > > --- a/arch/powerpc/mm/pgtable-radix.c > > +++ b/arch/powerpc/mm/pgtable-radix.c > > @@ -366,6 +366,16 @@ void radix__early_init_mmu_secondary(void) > > } > > } > > > > +void radix__mmu_cleanup_all(void) > > +{ > > + unsigned long lpcr; > > + if (!firmware_has_feature(FW_FEATURE_LPAR)) { > > + lpcr = mfspr(SPRN_LPCR); > > + mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); > > + mtspr(SPRN_PTCR, 0); > > Free up the patb data structure here? Why do we care freeing it ? We're about to kexec. > We also need tlbie's to invalidate any caches pointing to the patb entries Yes, that would be definitely a useful addition, I shuld have mentioned this is a stub so we don't crash due to a NULL pointer, but more work is needed to properly support kexec, which I'll let Aneesh do. On the other hand for safety, the new kernel should also flush its TLB on entry I reckon. Especially since I don't think we do the cleanup on crashdump. Cheers, Ben.
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 6a90efe..b5c97c1 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -135,5 +135,8 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, return hash__setup_initial_memory_limit(first_memblock_base, first_memblock_size); } + +extern void radix__mmu_cleanup_all(void); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index cd4f04a..b62a8d4 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -313,6 +313,9 @@ extern int book3e_htw_mode; * return 1, indicating that the tlb requires preloading. */ #define HUGETLB_NEED_PRELOAD + +#define mmu_cleanup_all NULL + #endif #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e53ebeb..50f9aac 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -148,6 +148,9 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; * make it match the size our of bolted TLB area */ extern u64 ppc64_rma_size; + +/* Cleanup function used by kexec */ +extern void mmu_cleanup_all(void); #endif /* CONFIG_PPC64 */ struct mm_struct; diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 833a79d..0595ede 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -54,9 +54,6 @@ int default_machine_kexec_prepare(struct kimage *image) const unsigned long *basep; const unsigned int *sizep; - if (!mmu_hash_ops.hpte_clear_all) - return -ENOENT; - /* * Since we use the kernel fault handlers and paging code to * handle the virtual mode, we must make sure no destination @@ -379,12 +376,7 @@ void default_machine_kexec(struct kimage *image) */ kexec_sequence(&kexec_stack, image->start, image, page_address(image->control_code_page), -#ifdef CONFIG_PPC_BOOK3E - NULL -#else - mmu_hash_ops.hpte_clear_all -#endif - ); + mmu_cleanup_all); /* NOTREACHED */ } diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 85c6070..b654e3c 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -115,3 +115,12 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, return; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* For use by kexec */ +void mmu_cleanup_all(void) +{ + if (radix_enabled()) + radix__mmu_cleanup_all(); + else if (mmu_hash_ops.hpte_clear_all) + mmu_hash_ops.hpte_clear_all(); +} diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 3ababda..f127baa 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -366,6 +366,16 @@ void radix__early_init_mmu_secondary(void) } } +void radix__mmu_cleanup_all(void) +{ + unsigned long lpcr; + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); + mtspr(SPRN_PTCR, 0); + } +} + void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) {
Just using the hash ops won't work anymore since radix will have NULL in there. Instead create an mmu_cleanup_all() function which will do the right thing based on the MMU mode. For Radix, for now I clear UPRT and the PTCR, effectively switching back to Radix with no partition table setup. Currently set it to NULL on BookE thought it might be a good idea to wipe the TLB there (Scott ?) Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> --- Currently only compile tested arch/powerpc/include/asm/book3s/64/mmu.h | 3 +++ arch/powerpc/include/asm/mmu-book3e.h | 3 +++ arch/powerpc/include/asm/mmu.h | 3 +++ arch/powerpc/kernel/machine_kexec_64.c | 10 +--------- arch/powerpc/mm/pgtable-book3s64.c | 9 +++++++++ arch/powerpc/mm/pgtable-radix.c | 10 ++++++++++ 6 files changed, 29 insertions(+), 9 deletions(-)