diff mbox series

[v2,13/16] powerpc/64s: Move hash MMU code under a new Kconfig name

Message ID 20211021035417.2157804-14-npiggin@gmail.com (mailing list archive)
State Superseded
Headers show
Series powerpc: Make hash MMU code build configurable | expand

Commit Message

Nicholas Piggin Oct. 21, 2021, 3:54 a.m. UTC
Introduce a new option CONFIG_PPC_64S_HASH_MMU, and make 64s hash
code depend on it.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/Kconfig                          |  2 +-
 arch/powerpc/include/asm/book3s/64/mmu.h      | 19 +++++++++--
 .../include/asm/book3s/64/tlbflush-hash.h     |  7 ++++
 arch/powerpc/include/asm/book3s/pgtable.h     |  4 +++
 arch/powerpc/include/asm/mmu.h                | 16 +++++++--
 arch/powerpc/include/asm/mmu_context.h        |  2 ++
 arch/powerpc/include/asm/paca.h               |  8 +++++
 arch/powerpc/kernel/asm-offsets.c             |  2 ++
 arch/powerpc/kernel/dt_cpu_ftrs.c             | 14 +++++---
 arch/powerpc/kernel/entry_64.S                |  4 +--
 arch/powerpc/kernel/exceptions-64s.S          | 16 +++++++++
 arch/powerpc/kernel/mce.c                     |  2 +-
 arch/powerpc/kernel/mce_power.c               | 10 ++++--
 arch/powerpc/kernel/paca.c                    | 18 ++++------
 arch/powerpc/kernel/process.c                 | 13 +++----
 arch/powerpc/kernel/prom.c                    |  2 ++
 arch/powerpc/kernel/setup_64.c                |  5 +++
 arch/powerpc/kexec/core_64.c                  |  4 +--
 arch/powerpc/kexec/ranges.c                   |  4 +++
 arch/powerpc/mm/book3s64/Makefile             | 15 ++++----
 arch/powerpc/mm/book3s64/hugetlbpage.c        |  2 ++
 arch/powerpc/mm/book3s64/mmu_context.c        | 34 +++++++++++++++----
 arch/powerpc/mm/book3s64/radix_pgtable.c      |  4 +++
 arch/powerpc/mm/copro_fault.c                 |  2 ++
 arch/powerpc/mm/ioremap.c                     | 13 ++++---
 arch/powerpc/mm/pgtable.c                     | 10 ++++--
 arch/powerpc/mm/ptdump/Makefile               |  2 +-
 arch/powerpc/platforms/Kconfig.cputype        |  4 +++
 arch/powerpc/platforms/powernv/idle.c         |  2 ++
 arch/powerpc/platforms/powernv/setup.c        |  2 ++
 arch/powerpc/platforms/pseries/lpar.c         | 11 ++++--
 arch/powerpc/platforms/pseries/lparcfg.c      |  2 +-
 arch/powerpc/platforms/pseries/mobility.c     |  6 ++++
 arch/powerpc/platforms/pseries/ras.c          |  2 ++
 arch/powerpc/platforms/pseries/reconfig.c     |  2 ++
 arch/powerpc/platforms/pseries/setup.c        |  6 ++--
 arch/powerpc/xmon/xmon.c                      |  8 +++--
 drivers/misc/lkdtm/Makefile                   |  2 +-
 drivers/misc/lkdtm/core.c                     |  2 +-
 39 files changed, 219 insertions(+), 64 deletions(-)

Comments

Christophe Leroy Oct. 21, 2021, 5:43 a.m. UTC | #1
Le 21/10/2021 à 05:54, Nicholas Piggin a écrit :
> Introduce a new option CONFIG_PPC_64S_HASH_MMU, and make 64s hash
> code depend on it.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   arch/powerpc/Kconfig                          |  2 +-
>   arch/powerpc/include/asm/book3s/64/mmu.h      | 19 +++++++++--
>   .../include/asm/book3s/64/tlbflush-hash.h     |  7 ++++
>   arch/powerpc/include/asm/book3s/pgtable.h     |  4 +++
>   arch/powerpc/include/asm/mmu.h                | 16 +++++++--
>   arch/powerpc/include/asm/mmu_context.h        |  2 ++
>   arch/powerpc/include/asm/paca.h               |  8 +++++
>   arch/powerpc/kernel/asm-offsets.c             |  2 ++
>   arch/powerpc/kernel/dt_cpu_ftrs.c             | 14 +++++---
>   arch/powerpc/kernel/entry_64.S                |  4 +--
>   arch/powerpc/kernel/exceptions-64s.S          | 16 +++++++++
>   arch/powerpc/kernel/mce.c                     |  2 +-
>   arch/powerpc/kernel/mce_power.c               | 10 ++++--
>   arch/powerpc/kernel/paca.c                    | 18 ++++------
>   arch/powerpc/kernel/process.c                 | 13 +++----
>   arch/powerpc/kernel/prom.c                    |  2 ++
>   arch/powerpc/kernel/setup_64.c                |  5 +++
>   arch/powerpc/kexec/core_64.c                  |  4 +--
>   arch/powerpc/kexec/ranges.c                   |  4 +++
>   arch/powerpc/mm/book3s64/Makefile             | 15 ++++----
>   arch/powerpc/mm/book3s64/hugetlbpage.c        |  2 ++
>   arch/powerpc/mm/book3s64/mmu_context.c        | 34 +++++++++++++++----
>   arch/powerpc/mm/book3s64/radix_pgtable.c      |  4 +++
>   arch/powerpc/mm/copro_fault.c                 |  2 ++
>   arch/powerpc/mm/ioremap.c                     | 13 ++++---
>   arch/powerpc/mm/pgtable.c                     | 10 ++++--
>   arch/powerpc/mm/ptdump/Makefile               |  2 +-
>   arch/powerpc/platforms/Kconfig.cputype        |  4 +++
>   arch/powerpc/platforms/powernv/idle.c         |  2 ++
>   arch/powerpc/platforms/powernv/setup.c        |  2 ++
>   arch/powerpc/platforms/pseries/lpar.c         | 11 ++++--
>   arch/powerpc/platforms/pseries/lparcfg.c      |  2 +-
>   arch/powerpc/platforms/pseries/mobility.c     |  6 ++++
>   arch/powerpc/platforms/pseries/ras.c          |  2 ++
>   arch/powerpc/platforms/pseries/reconfig.c     |  2 ++
>   arch/powerpc/platforms/pseries/setup.c        |  6 ++--
>   arch/powerpc/xmon/xmon.c                      |  8 +++--
>   drivers/misc/lkdtm/Makefile                   |  2 +-
>   drivers/misc/lkdtm/core.c                     |  2 +-
>   39 files changed, 219 insertions(+), 64 deletions(-)

I'm still unconfortable with the quantity of files impacted in that commit.

> 

> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index c02f42d1031e..d94ebae386b6 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h

> @@ -193,8 +198,15 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
>   extern int mmu_linear_psize;
>   extern int mmu_virtual_psize;
>   extern int mmu_vmalloc_psize;
> -extern int mmu_vmemmap_psize;
>   extern int mmu_io_psize;
> +#else /* CONFIG_PPC_64S_HASH_MMU */
> +#ifdef CONFIG_PPC_64K_PAGES

Avoid nested #ifdefs and do

#elif defined(CONFIG_PPC_64K_PAGES)

> +#define mmu_virtual_psize MMU_PAGE_64K
> +#else
> +#define mmu_virtual_psize MMU_PAGE_4K
> +#endif
> +#endif
> +extern int mmu_vmemmap_psize;
>   
>   /* MMU initialization */
>   void mmu_early_init_devtree(void);
> @@ -233,7 +245,8 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
>   	 * know which translations we will pick. Hence go with hash
>   	 * restrictions.
>   	 */
> -	return hash__setup_initial_memory_limit(first_memblock_base,
> +	if (!radix_enabled())
> +		return hash__setup_initial_memory_limit(first_memblock_base,
>   					   first_memblock_size);
>   }
>   
> @@ -255,6 +268,7 @@ static inline void radix_init_pseries(void) { }
>   void cleanup_cpu_mmu_context(void);
>   #endif
>   
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
>   {
>   	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
> @@ -274,6 +288,7 @@ static inline unsigned long get_user_vsid(mm_context_t *ctx,
>   
>   	return get_vsid(context, ea, ssize);
>   }
> +#endif
>   
>   #endif /* __ASSEMBLY__ */
>   #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
> diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
> index 3b95769739c7..06f4bd09eecf 100644
> --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
> @@ -112,8 +112,15 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
>   
>   struct mmu_gather;
>   extern void hash__tlb_flush(struct mmu_gather *tlb);
> +extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,

'extern' is superfluous.

> +				unsigned long addr);
> +
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   /* Private function for use by PCI IO mapping code */
>   extern void __flush_hash_table_range(unsigned long start, unsigned long end);
>   extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
>   				unsigned long addr);
> +#else
> +static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
> +#endif
>   #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */

> diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
> index 8abe8e42e045..226417f26145 100644
> --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -157,7 +157,7 @@ DECLARE_PER_CPU(int, next_tlbcam_idx);
>   
>   enum {
>   	MMU_FTRS_POSSIBLE =
> -#if defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_BOOK3S_604)
> +#if defined(CONFIG_PPC_BOOK3S_604)
>   		MMU_FTR_HPTE_TABLE |
>   #endif
>   #ifdef CONFIG_PPC_8xx
> @@ -184,15 +184,18 @@ enum {
>   		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
>   #endif
>   #ifdef CONFIG_PPC_BOOK3S_64
> +		MMU_FTR_KERNEL_RO |
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
>   		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
>   		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
> -		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
> +		MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
>   #endif
>   #ifdef CONFIG_PPC_RADIX_MMU
>   		MMU_FTR_TYPE_RADIX |
>   		MMU_FTR_GTSE |
>   #endif /* CONFIG_PPC_RADIX_MMU */
> +#endif
>   #ifdef CONFIG_PPC_KUAP
>   	MMU_FTR_BOOK3S_KUAP |
>   #endif /* CONFIG_PPC_KUAP */
> @@ -223,6 +226,13 @@ enum {
>   #ifdef CONFIG_E500
>   #define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_FSL_E
>   #endif
> +#ifdef CONFIG_PPC_BOOK3S_64

No need of this CONFIG_PPC_BOOK3S_64 ifdef, it is necessarily defined if 
either CONFIG_PPC_RADIX_MMU or CONFIG_PPC_64S_HASH_MMU is defined


> +#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
> +#define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_RADIX
> +#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
> +#define MMU_FTRS_ALWAYS		MMU_FTR_HPTE_TABLE
> +#endif
> +#endif
>   
>   #ifndef MMU_FTRS_ALWAYS
>   #define MMU_FTRS_ALWAYS		0

> diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
> index 046c99e31d01..65b695e9401e 100644
> --- a/arch/powerpc/kernel/exceptions-64s.S
> +++ b/arch/powerpc/kernel/exceptions-64s.S
> @@ -1369,11 +1369,15 @@ EXC_COMMON_BEGIN(data_access_common)
>   	addi	r3,r1,STACK_FRAME_OVERHEAD
>   	andis.	r0,r4,DSISR_DABRMATCH@h
>   	bne-	1f
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   BEGIN_MMU_FTR_SECTION
>   	bl	do_hash_fault
>   MMU_FTR_SECTION_ELSE
>   	bl	do_page_fault
>   ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
> +#else
> +	bl	do_page_fault
> +#endif

Maybe we could always branch to do_page_fault() and get redirected to 
do_hash_fault() from do_page_fault() if BOOK3S64 && !radix_enabled() ?

Another solution is to make it a GAS macro ?

>   	b	interrupt_return_srr
>   
>   1:	bl	do_break
> @@ -1416,6 +1420,7 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
>   EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
>   EXC_COMMON_BEGIN(data_access_slb_common)
>   	GEN_COMMON data_access_slb
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   BEGIN_MMU_FTR_SECTION
>   	/* HPT case, do SLB fault */
>   	addi	r3,r1,STACK_FRAME_OVERHEAD
> @@ -1428,6 +1433,9 @@ MMU_FTR_SECTION_ELSE
>   	/* Radix case, access is outside page table range */
>   	li	r3,-EFAULT
>   ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
> +#else
> +	li	r3,-EFAULT
> +#endif
>   	std	r3,RESULT(r1)
>   	addi	r3,r1,STACK_FRAME_OVERHEAD
>   	bl	do_bad_segment_interrupt
> @@ -1462,11 +1470,15 @@ EXC_VIRT_END(instruction_access, 0x4400, 0x80)
>   EXC_COMMON_BEGIN(instruction_access_common)
>   	GEN_COMMON instruction_access
>   	addi	r3,r1,STACK_FRAME_OVERHEAD
> +#ifdef CONFIG_PPC_64S_HASH_MMU
>   BEGIN_MMU_FTR_SECTION
>   	bl	do_hash_fault
>   MMU_FTR_SECTION_ELSE
>   	bl	do_page_fault
>   ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
> +#else
> +	bl	do_page_fault
> +#endif

Same as above.

>   	b	interrupt_return_srr
>   
>   

> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
> index 7d4bcbc3124e..59b1a1833143 100644
> --- a/arch/powerpc/kernel/setup_64.c
> +++ b/arch/powerpc/kernel/setup_64.c
> @@ -887,6 +887,8 @@ void __init setup_per_cpu_areas(void)
>   	} else if (radix_enabled()) {
>   		atom_size = PAGE_SIZE;
>   	} else {
> +#ifdef CONFIG_PPC_64S_HASH_MMU

Use IS_ENABLED()


> +
>   		/*
>   		 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
>   		 * to group units.  For larger mappings, use 1M atom which
> @@ -896,6 +898,9 @@ void __init setup_per_cpu_areas(void)
>   			atom_size = PAGE_SIZE;
>   		else
>   			atom_size = 1 << 20;
> +#else
> +		BUILD_BUG(); // radix_enabled() should be constant true
> +#endif
>   	}
>   
>   	if (pcpu_chosen_fc != PCPU_FC_PAGE) {

> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index cd16b407f47e..ab105d33e0b0 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -81,9 +81,6 @@ static struct page *maybe_pte_to_page(pte_t pte)
>   
>   static pte_t set_pte_filter_hash(pte_t pte)
>   {
> -	if (radix_enabled())
> -		return pte;
> -
>   	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
>   	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
>   				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
> @@ -112,6 +109,9 @@ static inline pte_t set_pte_filter(pte_t pte)
>   {
>   	struct page *pg;
>   
> +	if (radix_enabled())
> +		return pte;
> +
>   	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
>   		return set_pte_filter_hash(pte);
>   
> @@ -144,6 +144,10 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
>   {
>   	struct page *pg;
>   
> +#ifdef CONFIG_PPC_BOOK3S_64
> +	return pte;
> +#endif

Aren't you changing the behaviour here for RADIX ?

Anyway, can use IS_ENABLED()

> +
>   	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
>   		return pte;
>   
> diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile
> index 4050cbb55acf..b533caaf0910 100644
> --- a/arch/powerpc/mm/ptdump/Makefile
> +++ b/arch/powerpc/mm/ptdump/Makefile
> @@ -10,5 +10,5 @@ obj-$(CONFIG_PPC_BOOK3S_64)	+= book3s64.o
>   
>   ifdef CONFIG_PTDUMP_DEBUGFS
>   obj-$(CONFIG_PPC_BOOK3S_32)	+= bats.o segment_regs.o
> -obj-$(CONFIG_PPC_BOOK3S_64)	+= hashpagetable.o
> +obj-$(CONFIG_PPC_64S_HASH_MMU)	+= hashpagetable.o
>   endif
> diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
> index a208997ade88..01726e7f2c7f 100644
> --- a/arch/powerpc/platforms/Kconfig.cputype
> +++ b/arch/powerpc/platforms/Kconfig.cputype
> @@ -105,6 +105,7 @@ config PPC_BOOK3S_64
>   	select HAVE_MOVE_PMD
>   	select HAVE_MOVE_PUD
>   	select IRQ_WORK
> +	select PPC_64S_HASH_MMU

Not needed

>   	select PPC_MM_SLICES
>   	select PPC_HAVE_KUEP
>   	select PPC_HAVE_KUAP
> @@ -364,6 +365,9 @@ config SPE
>   
>   	  If in doubt, say Y here.
>   
> +config PPC_64S_HASH_MMU
> +	bool

Add
	depends on PPC_BOOK3S_64
	default y

> +
>   config PPC_RADIX_MMU
>   	bool "Radix MMU Support"
>   	depends on PPC_BOOK3S_64

> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
> index dd8241c009e5..33de8d798c95 100644
> --- a/arch/powerpc/xmon/xmon.c
> +++ b/arch/powerpc/xmon/xmon.c
> @@ -1160,9 +1160,11 @@ cmds(struct pt_regs *excp)
>   			show_tasks();
>   			break;
>   #ifdef CONFIG_PPC_BOOK3S
> +#ifdef CONFIG_PPC_64S_HASH_MMU

And BOOK3S/32 ???

>   		case 'u':
>   			dump_segments();
>   			break;
> +#endif
>   #elif defined(CONFIG_44x)
>   		case 'u':
>   			dump_tlb_44x();
Nicholas Piggin Oct. 21, 2021, 7:33 a.m. UTC | #2
Excerpts from Christophe Leroy's message of October 21, 2021 3:43 pm:
> 
> 
> Le 21/10/2021 à 05:54, Nicholas Piggin a écrit :
>> Introduce a new option CONFIG_PPC_64S_HASH_MMU, and make 64s hash
>> code depend on it.
>> 
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>>   arch/powerpc/Kconfig                          |  2 +-
>>   arch/powerpc/include/asm/book3s/64/mmu.h      | 19 +++++++++--
>>   .../include/asm/book3s/64/tlbflush-hash.h     |  7 ++++
>>   arch/powerpc/include/asm/book3s/pgtable.h     |  4 +++
>>   arch/powerpc/include/asm/mmu.h                | 16 +++++++--
>>   arch/powerpc/include/asm/mmu_context.h        |  2 ++
>>   arch/powerpc/include/asm/paca.h               |  8 +++++
>>   arch/powerpc/kernel/asm-offsets.c             |  2 ++
>>   arch/powerpc/kernel/dt_cpu_ftrs.c             | 14 +++++---
>>   arch/powerpc/kernel/entry_64.S                |  4 +--
>>   arch/powerpc/kernel/exceptions-64s.S          | 16 +++++++++
>>   arch/powerpc/kernel/mce.c                     |  2 +-
>>   arch/powerpc/kernel/mce_power.c               | 10 ++++--
>>   arch/powerpc/kernel/paca.c                    | 18 ++++------
>>   arch/powerpc/kernel/process.c                 | 13 +++----
>>   arch/powerpc/kernel/prom.c                    |  2 ++
>>   arch/powerpc/kernel/setup_64.c                |  5 +++
>>   arch/powerpc/kexec/core_64.c                  |  4 +--
>>   arch/powerpc/kexec/ranges.c                   |  4 +++
>>   arch/powerpc/mm/book3s64/Makefile             | 15 ++++----
>>   arch/powerpc/mm/book3s64/hugetlbpage.c        |  2 ++
>>   arch/powerpc/mm/book3s64/mmu_context.c        | 34 +++++++++++++++----
>>   arch/powerpc/mm/book3s64/radix_pgtable.c      |  4 +++
>>   arch/powerpc/mm/copro_fault.c                 |  2 ++
>>   arch/powerpc/mm/ioremap.c                     | 13 ++++---
>>   arch/powerpc/mm/pgtable.c                     | 10 ++++--
>>   arch/powerpc/mm/ptdump/Makefile               |  2 +-
>>   arch/powerpc/platforms/Kconfig.cputype        |  4 +++
>>   arch/powerpc/platforms/powernv/idle.c         |  2 ++
>>   arch/powerpc/platforms/powernv/setup.c        |  2 ++
>>   arch/powerpc/platforms/pseries/lpar.c         | 11 ++++--
>>   arch/powerpc/platforms/pseries/lparcfg.c      |  2 +-
>>   arch/powerpc/platforms/pseries/mobility.c     |  6 ++++
>>   arch/powerpc/platforms/pseries/ras.c          |  2 ++
>>   arch/powerpc/platforms/pseries/reconfig.c     |  2 ++
>>   arch/powerpc/platforms/pseries/setup.c        |  6 ++--
>>   arch/powerpc/xmon/xmon.c                      |  8 +++--
>>   drivers/misc/lkdtm/Makefile                   |  2 +-
>>   drivers/misc/lkdtm/core.c                     |  2 +-
>>   39 files changed, 219 insertions(+), 64 deletions(-)
> 
> I'm still unconfortable with the quantity of files impacted in that commit.

Hmm. Splitting it into N partial patches that have the same result
doesn't seem better to me. There's a few other little things that are
be better split out, but size of the patch will not shrink much.

>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
>> index c02f42d1031e..d94ebae386b6 100644
>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> 
>> @@ -193,8 +198,15 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
>>   extern int mmu_linear_psize;
>>   extern int mmu_virtual_psize;
>>   extern int mmu_vmalloc_psize;
>> -extern int mmu_vmemmap_psize;
>>   extern int mmu_io_psize;
>> +#else /* CONFIG_PPC_64S_HASH_MMU */
>> +#ifdef CONFIG_PPC_64K_PAGES
> 
> Avoid nested #ifdefs and do
> 
> #elif defined(CONFIG_PPC_64K_PAGES)

I sort of like the nesting because this is the !HASH block. But I don't 
care much so I can do it your way if you prefer.

>> @@ -223,6 +226,13 @@ enum {
>>   #ifdef CONFIG_E500
>>   #define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_FSL_E
>>   #endif
>> +#ifdef CONFIG_PPC_BOOK3S_64
> 
> No need of this CONFIG_PPC_BOOK3S_64 ifdef, it is necessarily defined if 
> either CONFIG_PPC_RADIX_MMU or CONFIG_PPC_64S_HASH_MMU is defined

Good point.

>> +#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
>> +#define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_RADIX
>> +#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
>> +#define MMU_FTRS_ALWAYS		MMU_FTR_HPTE_TABLE
>> +#endif
>> +#endif
>>   
>>   #ifndef MMU_FTRS_ALWAYS
>>   #define MMU_FTRS_ALWAYS		0
> 
>> diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
>> index 046c99e31d01..65b695e9401e 100644
>> --- a/arch/powerpc/kernel/exceptions-64s.S
>> +++ b/arch/powerpc/kernel/exceptions-64s.S
>> @@ -1369,11 +1369,15 @@ EXC_COMMON_BEGIN(data_access_common)
>>   	addi	r3,r1,STACK_FRAME_OVERHEAD
>>   	andis.	r0,r4,DSISR_DABRMATCH@h
>>   	bne-	1f
>> +#ifdef CONFIG_PPC_64S_HASH_MMU
>>   BEGIN_MMU_FTR_SECTION
>>   	bl	do_hash_fault
>>   MMU_FTR_SECTION_ELSE
>>   	bl	do_page_fault
>>   ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
>> +#else
>> +	bl	do_page_fault
>> +#endif
> 
> Maybe we could always branch to do_page_fault() and get redirected to 
> do_hash_fault() from do_page_fault() if BOOK3S64 && !radix_enabled() ?

Not trivial because of do_hash_fault().

A book3s_64_do_page_fault() raw hander which calls do_hash_fault or 
do_page_fault if necessary, but that's less optimal and we already
have this gunk so might as well keep it.

> 
> Another solution is to make it a GAS macro ?

For this I don't find it too bad. Another macro means another thing to 
look up. Maybe if we had more callers but for now this isn't too 
terrible.

>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>> index 7d4bcbc3124e..59b1a1833143 100644
>> --- a/arch/powerpc/kernel/setup_64.c
>> +++ b/arch/powerpc/kernel/setup_64.c
>> @@ -887,6 +887,8 @@ void __init setup_per_cpu_areas(void)
>>   	} else if (radix_enabled()) {
>>   		atom_size = PAGE_SIZE;
>>   	} else {
>> +#ifdef CONFIG_PPC_64S_HASH_MMU
> 
> Use IS_ENABLED()

Can't because of mmu_linear_psize.

>> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
>> index cd16b407f47e..ab105d33e0b0 100644
>> --- a/arch/powerpc/mm/pgtable.c
>> +++ b/arch/powerpc/mm/pgtable.c
>> @@ -81,9 +81,6 @@ static struct page *maybe_pte_to_page(pte_t pte)
>>   
>>   static pte_t set_pte_filter_hash(pte_t pte)
>>   {
>> -	if (radix_enabled())
>> -		return pte;
>> -
>>   	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
>>   	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
>>   				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
>> @@ -112,6 +109,9 @@ static inline pte_t set_pte_filter(pte_t pte)
>>   {
>>   	struct page *pg;
>>   
>> +	if (radix_enabled())
>> +		return pte;
>> +
>>   	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
>>   		return set_pte_filter_hash(pte);
>>   
>> @@ -144,6 +144,10 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
>>   {
>>   	struct page *pg;
>>   
>> +#ifdef CONFIG_PPC_BOOK3S_64
>> +	return pte;
>> +#endif
> 
> Aren't you changing the behaviour here for RADIX ?

No, 64s always has MMU_FTR_HPTE_TABLE until now. I should actually split 
this change out.

> Anyway, can use IS_ENABLED()

Good point.

> 
>> +
>>   	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
>>   		return pte;
>>   
>> diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile
>> index 4050cbb55acf..b533caaf0910 100644
>> --- a/arch/powerpc/mm/ptdump/Makefile
>> +++ b/arch/powerpc/mm/ptdump/Makefile
>> @@ -10,5 +10,5 @@ obj-$(CONFIG_PPC_BOOK3S_64)	+= book3s64.o
>>   
>>   ifdef CONFIG_PTDUMP_DEBUGFS
>>   obj-$(CONFIG_PPC_BOOK3S_32)	+= bats.o segment_regs.o
>> -obj-$(CONFIG_PPC_BOOK3S_64)	+= hashpagetable.o
>> +obj-$(CONFIG_PPC_64S_HASH_MMU)	+= hashpagetable.o
>>   endif
>> diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
>> index a208997ade88..01726e7f2c7f 100644
>> --- a/arch/powerpc/platforms/Kconfig.cputype
>> +++ b/arch/powerpc/platforms/Kconfig.cputype
>> @@ -105,6 +105,7 @@ config PPC_BOOK3S_64
>>   	select HAVE_MOVE_PMD
>>   	select HAVE_MOVE_PUD
>>   	select IRQ_WORK
>> +	select PPC_64S_HASH_MMU
> 
> Not needed
> 
>>   	select PPC_MM_SLICES
>>   	select PPC_HAVE_KUEP
>>   	select PPC_HAVE_KUAP
>> @@ -364,6 +365,9 @@ config SPE
>>   
>>   	  If in doubt, say Y here.
>>   
>> +config PPC_64S_HASH_MMU
>> +	bool
> 
> Add
> 	depends on PPC_BOOK3S_64
> 	default y
> 
>> +
>>   config PPC_RADIX_MMU
>>   	bool "Radix MMU Support"
>>   	depends on PPC_BOOK3S_64
> 
>> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
>> index dd8241c009e5..33de8d798c95 100644
>> --- a/arch/powerpc/xmon/xmon.c
>> +++ b/arch/powerpc/xmon/xmon.c
>> @@ -1160,9 +1160,11 @@ cmds(struct pt_regs *excp)
>>   			show_tasks();
>>   			break;
>>   #ifdef CONFIG_PPC_BOOK3S
>> +#ifdef CONFIG_PPC_64S_HASH_MMU
> 
> And BOOK3S/32 ???

Oops, good catch.

Thanks,
Nick

> 
>>   		case 'u':
>>   			dump_segments();
>>   			break;
>> +#endif
>>   #elif defined(CONFIG_44x)
>>   		case 'u':
>>   			dump_tlb_44x();
diff mbox series

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ba5b66189358..4b34e408a3e6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -838,7 +838,7 @@  config FORCE_MAX_ZONEORDER
 config PPC_SUBPAGE_PROT
 	bool "Support setting protections for 4k subpages (subpage_prot syscall)"
 	default n
-	depends on PPC_BOOK3S_64 && PPC_64K_PAGES
+	depends on PPC_64S_HASH_MMU && PPC_64K_PAGES
 	help
 	  This option adds support for system call to allow user programs
 	  to set access permissions (read/write, readonly, or no access)
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index c02f42d1031e..d94ebae386b6 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -98,7 +98,9 @@  typedef struct {
 		 * from EA and new context ids to build the new VAs.
 		 */
 		mm_context_id_t id;
+#ifdef CONFIG_PPC_64S_HASH_MMU
 		mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+#endif
 	};
 
 	/* Number of bits in the mm_cpumask */
@@ -110,7 +112,9 @@  typedef struct {
 	/* Number of user space windows opened in process mm_context */
 	atomic_t vas_windows;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	struct hash_mm_context *hash_context;
+#endif
 
 	void __user *vdso;
 	/*
@@ -133,6 +137,7 @@  typedef struct {
 #endif
 } mm_context_t;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
 {
 	return ctx->hash_context->user_psize;
@@ -193,8 +198,15 @@  static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 extern int mmu_linear_psize;
 extern int mmu_virtual_psize;
 extern int mmu_vmalloc_psize;
-extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
+#else /* CONFIG_PPC_64S_HASH_MMU */
+#ifdef CONFIG_PPC_64K_PAGES
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#define mmu_virtual_psize MMU_PAGE_4K
+#endif
+#endif
+extern int mmu_vmemmap_psize;
 
 /* MMU initialization */
 void mmu_early_init_devtree(void);
@@ -233,7 +245,8 @@  static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 	 * know which translations we will pick. Hence go with hash
 	 * restrictions.
 	 */
-	return hash__setup_initial_memory_limit(first_memblock_base,
+	if (!radix_enabled())
+		return hash__setup_initial_memory_limit(first_memblock_base,
 					   first_memblock_size);
 }
 
@@ -255,6 +268,7 @@  static inline void radix_init_pseries(void) { }
 void cleanup_cpu_mmu_context(void);
 #endif
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
 {
 	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
@@ -274,6 +288,7 @@  static inline unsigned long get_user_vsid(mm_context_t *ctx,
 
 	return get_vsid(context, ea, ssize);
 }
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 3b95769739c7..06f4bd09eecf 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -112,8 +112,15 @@  static inline void hash__flush_tlb_kernel_range(unsigned long start,
 
 struct mmu_gather;
 extern void hash__tlb_flush(struct mmu_gather *tlb);
+extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
+				unsigned long addr);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /* Private function for use by PCI IO mapping code */
 extern void __flush_hash_table_range(unsigned long start, unsigned long end);
 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
 				unsigned long addr);
+#else
+static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
+#endif
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index ad130e15a126..818d7c851d36 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -25,6 +25,7 @@  extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				     unsigned long size, pgprot_t vma_prot);
 #define __HAVE_PHYS_MEM_ACCESS_PROT
 
+#if defined(CONFIG_PPC_HASH_MMU_NATIVE) || defined(CONFIG_PPC_64S_HASH_MMU)
 /*
  * This gets called at the end of handling a page fault, when
  * the kernel has put a new PTE into the page table for the process.
@@ -35,6 +36,9 @@  extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  * waiting for the inevitable extra hash-table miss exception.
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+#else
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 8abe8e42e045..226417f26145 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -157,7 +157,7 @@  DECLARE_PER_CPU(int, next_tlbcam_idx);
 
 enum {
 	MMU_FTRS_POSSIBLE =
-#if defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_BOOK3S_604)
+#if defined(CONFIG_PPC_BOOK3S_604)
 		MMU_FTR_HPTE_TABLE |
 #endif
 #ifdef CONFIG_PPC_8xx
@@ -184,15 +184,18 @@  enum {
 		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
+		MMU_FTR_KERNEL_RO |
+#ifdef CONFIG_PPC_64S_HASH_MMU
 		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
 		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
 		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
-		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
+		MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
 #endif
 #ifdef CONFIG_PPC_RADIX_MMU
 		MMU_FTR_TYPE_RADIX |
 		MMU_FTR_GTSE |
 #endif /* CONFIG_PPC_RADIX_MMU */
+#endif
 #ifdef CONFIG_PPC_KUAP
 	MMU_FTR_BOOK3S_KUAP |
 #endif /* CONFIG_PPC_KUAP */
@@ -223,6 +226,13 @@  enum {
 #ifdef CONFIG_E500
 #define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_FSL_E
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS		MMU_FTR_TYPE_RADIX
+#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS		MMU_FTR_HPTE_TABLE
+#endif
+#endif
 
 #ifndef MMU_FTRS_ALWAYS
 #define MMU_FTRS_ALWAYS		0
@@ -329,7 +339,7 @@  static __always_inline bool radix_enabled(void)
 	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
 }
 
-static inline bool early_radix_enabled(void)
+static __always_inline bool early_radix_enabled(void)
 {
 	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
 }
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 9ba6b585337f..e46394d27785 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -75,6 +75,7 @@  extern void hash__reserve_context_id(int id);
 extern void __destroy_context(int context_id);
 static inline void mmu_context_init(void) { }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static inline int alloc_extended_context(struct mm_struct *mm,
 					 unsigned long ea)
 {
@@ -100,6 +101,7 @@  static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
 		return true;
 	return false;
 }
+#endif
 
 #else
 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index dc05a862e72a..295573a82c66 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -97,7 +97,9 @@  struct paca_struct {
 					/* this becomes non-zero. */
 	u8 kexec_state;		/* set when kexec down has irqs off */
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	struct slb_shadow *slb_shadow_ptr;
+#endif
 	struct dtl_entry *dispatch_log;
 	struct dtl_entry *dispatch_log_end;
 #endif
@@ -110,6 +112,7 @@  struct paca_struct {
 	/* used for most interrupts/exceptions */
 	u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/* SLB related definitions */
 	u16 vmalloc_sllp;
 	u8 slb_cache_ptr;
@@ -120,6 +123,7 @@  struct paca_struct {
 	u32 slb_used_bitmap;		/* Bitmaps for first 32 SLB entries. */
 	u32 slb_kern_bitmap;
 	u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #ifdef CONFIG_PPC_BOOK3E
@@ -149,6 +153,7 @@  struct paca_struct {
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_PPC_64S_HASH_MMU
 #ifdef CONFIG_PPC_MM_SLICES
 	unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
 	unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
@@ -156,6 +161,7 @@  struct paca_struct {
 	u16 mm_ctx_user_psize;
 	u16 mm_ctx_sllp;
 #endif
+#endif
 #endif
 
 	/*
@@ -268,9 +274,11 @@  struct paca_struct {
 #endif /* CONFIG_PPC_PSERIES */
 
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/* Capture SLB related old contents in MCE handler. */
 	struct slb_entry *mce_faulty_slbs;
 	u16 slb_save_cache_ptr;
+#endif
 #endif /* CONFIG_PPC_BOOK3S_64 */
 #ifdef CONFIG_STACKPROTECTOR
 	unsigned long canary;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e563d3222d69..c54fdfcfd72b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -220,10 +220,12 @@  int main(void)
 	OFFSET(PACA_EXGEN, paca_struct, exgen);
 	OFFSET(PACA_EXMC, paca_struct, exmc);
 	OFFSET(PACA_EXNMI, paca_struct, exnmi);
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
 	OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
 	OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
 	OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
+#endif
 	OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 358aee7c2d79..f1b86d687095 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -271,6 +271,9 @@  static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
 {
 	u64 lpcr;
 
+	if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+		return 0;
+
 	lpcr = mfspr(SPRN_LPCR);
 	lpcr &= ~LPCR_ISL;
 
@@ -290,6 +293,9 @@  static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
 {
 	u64 lpcr;
 
+	if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+		return 0;
+
 	lpcr = mfspr(SPRN_LPCR);
 	lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
 	mtspr(SPRN_LPCR, lpcr);
@@ -303,15 +309,15 @@  static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
 
 static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
 {
-#ifdef CONFIG_PPC_RADIX_MMU
+	if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
+		return 0;
+
+	cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
 	cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
-	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
 	cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
 	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
 
 	return 1;
-#endif
-	return 0;
 }
 
 static int __init feat_enable_dscr(struct dt_cpu_feature *f)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 70cff7b49e17..9581906b5ee9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -180,7 +180,7 @@  _GLOBAL(_switch)
 #endif
 
 	ld	r8,KSP(r4)	/* new stack pointer */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
 	b	2f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
@@ -232,7 +232,7 @@  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 	slbmte	r7,r0
 	isync
 2:
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
 	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 046c99e31d01..65b695e9401e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1369,11 +1369,15 @@  EXC_COMMON_BEGIN(data_access_common)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	andis.	r0,r4,DSISR_DABRMATCH@h
 	bne-	1f
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
 	bl	do_hash_fault
 MMU_FTR_SECTION_ELSE
 	bl	do_page_fault
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+	bl	do_page_fault
+#endif
 	b	interrupt_return_srr
 
 1:	bl	do_break
@@ -1416,6 +1420,7 @@  EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
 EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
 EXC_COMMON_BEGIN(data_access_slb_common)
 	GEN_COMMON data_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
 	/* HPT case, do SLB fault */
 	addi	r3,r1,STACK_FRAME_OVERHEAD
@@ -1428,6 +1433,9 @@  MMU_FTR_SECTION_ELSE
 	/* Radix case, access is outside page table range */
 	li	r3,-EFAULT
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+	li	r3,-EFAULT
+#endif
 	std	r3,RESULT(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_bad_segment_interrupt
@@ -1462,11 +1470,15 @@  EXC_VIRT_END(instruction_access, 0x4400, 0x80)
 EXC_COMMON_BEGIN(instruction_access_common)
 	GEN_COMMON instruction_access
 	addi	r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
 	bl	do_hash_fault
 MMU_FTR_SECTION_ELSE
 	bl	do_page_fault
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+	bl	do_page_fault
+#endif
 	b	interrupt_return_srr
 
 
@@ -1496,6 +1508,7 @@  EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
 EXC_COMMON_BEGIN(instruction_access_slb_common)
 	GEN_COMMON instruction_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
 	/* HPT case, do SLB fault */
 	addi	r3,r1,STACK_FRAME_OVERHEAD
@@ -1508,6 +1521,9 @@  MMU_FTR_SECTION_ELSE
 	/* Radix case, access is outside page table range */
 	li	r3,-EFAULT
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+	li	r3,-EFAULT
+#endif
 	std	r3,RESULT(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_bad_segment_interrupt
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index fd829f7f25a4..2503dd4713b9 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -586,7 +586,7 @@  void machine_check_print_event_info(struct machine_check_event *evt,
 		mc_error_class[evt->error_class] : "Unknown";
 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/* Display faulty slb contents for SLB errors. */
 	if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
 		slb_dump_contents(local_paca->mce_faulty_slbs);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index cf5263b648fc..a48ff18d6d65 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -77,7 +77,7 @@  static bool mce_in_guest(void)
 }
 
 /* flush SLBs and reload */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void flush_and_reload_slb(void)
 {
 	if (early_radix_enabled())
@@ -99,7 +99,7 @@  void flush_and_reload_slb(void)
 
 void flush_erat(void)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
 		flush_and_reload_slb();
 		return;
@@ -114,7 +114,7 @@  void flush_erat(void)
 
 static int mce_flush(int what)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (what == MCE_FLUSH_SLB) {
 		flush_and_reload_slb();
 		return 1;
@@ -499,8 +499,10 @@  static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
 			/* attempt to correct the error */
 			switch (table[i].error_type) {
 			case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
 				if (local_paca->in_mce == 1)
 					slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
 				handled = mce_flush(MCE_FLUSH_SLB);
 				break;
 			case MCE_ERROR_TYPE_ERAT:
@@ -588,8 +590,10 @@  static int mce_handle_derror(struct pt_regs *regs,
 			/* attempt to correct the error */
 			switch (table[i].error_type) {
 			case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
 				if (local_paca->in_mce == 1)
 					slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
 				if (mce_flush(MCE_FLUSH_SLB))
 					handled = 1;
 				break;
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 9bd30cac852b..813930374d24 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -139,8 +139,7 @@  static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
 }
 #endif /* CONFIG_PPC_PSERIES */
 
-#ifdef CONFIG_PPC_BOOK3S_64
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /*
  * 3 persistent SLBs are allocated here.  The buffer will be zero
  * initially, hence will all be invaild until we actually write them.
@@ -169,8 +168,7 @@  static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
 
 	return s;
 }
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 #ifdef CONFIG_PPC_PSERIES
 /**
@@ -226,7 +224,7 @@  void __init initialise_paca(struct paca_struct *new_paca, int cpu)
 	new_paca->kexec_state = KEXEC_STATE_NONE;
 	new_paca->__current = &init_task;
 	new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	new_paca->slb_shadow_ptr = NULL;
 #endif
 
@@ -307,7 +305,7 @@  void __init allocate_paca(int cpu)
 #ifdef CONFIG_PPC_PSERIES
 	paca->lppaca_ptr = new_lppaca(cpu, limit);
 #endif
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
 #endif
 #ifdef CONFIG_PPC_PSERIES
@@ -328,7 +326,7 @@  void __init free_unused_pacas(void)
 	paca_nr_cpu_ids = nr_cpu_ids;
 	paca_ptrs_size = new_ptrs_size;
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (early_radix_enabled()) {
 		/* Ugly fixup, see new_slb_shadow() */
 		memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
@@ -341,9 +339,9 @@  void __init free_unused_pacas(void)
 			paca_ptrs_size + paca_struct_size, nr_cpu_ids);
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void copy_mm_to_paca(struct mm_struct *mm)
 {
-#ifdef CONFIG_PPC_BOOK3S
 	mm_context_t *context = &mm->context;
 
 #ifdef CONFIG_PPC_MM_SLICES
@@ -356,7 +354,5 @@  void copy_mm_to_paca(struct mm_struct *mm)
 	get_paca()->mm_ctx_user_psize = context->user_psize;
 	get_paca()->mm_ctx_sllp = context->sllp;
 #endif
-#else /* !CONFIG_PPC_BOOK3S */
-	return;
-#endif
 }
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 50436b52c213..48482aaa9388 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1206,7 +1206,7 @@  struct task_struct *__switch_to(struct task_struct *prev,
 {
 	struct thread_struct *new_thread, *old_thread;
 	struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	struct ppc64_tlb_batch *batch;
 #endif
 
@@ -1215,7 +1215,7 @@  struct task_struct *__switch_to(struct task_struct *prev,
 
 	WARN_ON(!irqs_disabled());
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	batch = this_cpu_ptr(&ppc64_tlb_batch);
 	if (batch->active) {
 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
@@ -1294,6 +1294,7 @@  struct task_struct *__switch_to(struct task_struct *prev,
 	 */
 
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/*
 	 * This applies to a process that was context switched while inside
 	 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
@@ -1305,6 +1306,7 @@  struct task_struct *__switch_to(struct task_struct *prev,
 		batch = this_cpu_ptr(&ppc64_tlb_batch);
 		batch->active = 1;
 	}
+#endif
 
 	/*
 	 * Math facilities are masked out of the child MSR in copy_thread.
@@ -1655,7 +1657,7 @@  int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 
 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
 {
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	unsigned long sp_vsid;
 	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
 
@@ -2302,10 +2304,9 @@  unsigned long arch_randomize_brk(struct mm_struct *mm)
 	 * the heap, we can put it above 1TB so it is backed by a 1TB
 	 * segment. Otherwise the heap will be in the bottom 1TB
 	 * which always uses 256MB segments and this may result in a
-	 * performance penalty. We don't need to worry about radix. For
-	 * radix, mmu_highuser_ssize remains unchanged from 256MB.
+	 * performance penalty.
 	 */
-	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+	if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
 #endif
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 2e67588f6f6e..2197404cdcc4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -234,6 +234,7 @@  static void __init check_cpu_pa_features(unsigned long node)
 #ifdef CONFIG_PPC_BOOK3S_64
 static void __init init_mmu_slb_size(unsigned long node)
 {
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	const __be32 *slb_size_ptr;
 
 	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
@@ -241,6 +242,7 @@  static void __init init_mmu_slb_size(unsigned long node)
 
 	if (slb_size_ptr)
 		mmu_slb_size = be32_to_cpup(slb_size_ptr);
+#endif
 }
 #else
 #define init_mmu_slb_size(node) do { } while(0)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7d4bcbc3124e..59b1a1833143 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -887,6 +887,8 @@  void __init setup_per_cpu_areas(void)
 	} else if (radix_enabled()) {
 		atom_size = PAGE_SIZE;
 	} else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+
 		/*
 		 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
 		 * to group units.  For larger mappings, use 1M atom which
@@ -896,6 +898,9 @@  void __init setup_per_cpu_areas(void)
 			atom_size = PAGE_SIZE;
 		else
 			atom_size = 1 << 20;
+#else
+		BUILD_BUG(); // radix_enabled() should be constant true
+#endif
 	}
 
 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 89c069d664a5..90b45613b194 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -378,7 +378,7 @@  void default_machine_kexec(struct kimage *image)
 	/* NOTREACHED */
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 static unsigned long htab_size;
@@ -420,4 +420,4 @@  static int __init export_htab_values(void)
 	return 0;
 }
 late_initcall(export_htab_values);
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index 6b81c852feab..92d831621fa0 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -306,10 +306,14 @@  int add_initrd_mem_range(struct crash_mem **mem_ranges)
  */
 int add_htab_mem_range(struct crash_mem **mem_ranges)
 {
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!htab_address)
 		return 0;
 
 	return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
+#else
+	return 0;
+#endif
 }
 #endif
 
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index 501efadb287f..2d50cac499c5 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -2,20 +2,23 @@ 
 
 ccflags-y	:= $(NO_MINIMAL_TOC)
 
+obj-y				+= mmu_context.o pgtable.o trace.o
+ifdef CONFIG_PPC_64S_HASH_MMU
 CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-
-obj-y				+= hash_pgtable.o hash_utils.o slb.o \
-				   mmu_context.o pgtable.o hash_tlb.o trace.o
+obj-y				+= hash_pgtable.o hash_utils.o hash_tlb.o slb.o
 obj-$(CONFIG_PPC_HASH_MMU_NATIVE)	+= hash_native.o
-obj-$(CONFIG_PPC_RADIX_MMU)	+= radix_pgtable.o radix_tlb.o
 obj-$(CONFIG_PPC_4K_PAGES)	+= hash_4k.o
 obj-$(CONFIG_PPC_64K_PAGES)	+= hash_64k.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
+obj-$(CONFIG_PPC_SUBPAGE_PROT)	+= subpage_prot.o
+endif
+
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
+
+obj-$(CONFIG_PPC_RADIX_MMU)	+= radix_pgtable.o radix_tlb.o
 ifdef CONFIG_HUGETLB_PAGE
 obj-$(CONFIG_PPC_RADIX_MMU)	+= radix_hugetlbpage.o
 endif
-obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
-obj-$(CONFIG_PPC_SUBPAGE_PROT)	+= subpage_prot.o
 obj-$(CONFIG_SPAPR_TCE_IOMMU)	+= iommu_api.o
 obj-$(CONFIG_PPC_PKEY)	+= pkeys.o
 
diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c
index a688e1324ae5..95b2a283fd6e 100644
--- a/arch/powerpc/mm/book3s64/hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hugetlbpage.c
@@ -16,6 +16,7 @@ 
 unsigned int hpage_shift;
 EXPORT_SYMBOL(hpage_shift);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 		     pte_t *ptep, unsigned long trap, unsigned long flags,
 		     int ssize, unsigned int shift, unsigned int mmu_psize)
@@ -122,6 +123,7 @@  int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
 	return 0;
 }
+#endif
 
 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 				  unsigned long addr, pte_t *ptep)
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index c10fc8a72fb3..ae8d6c91dcd9 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -31,6 +31,7 @@  static int alloc_context_id(int min_id, int max_id)
 	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void hash__reserve_context_id(int id)
 {
 	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
@@ -50,7 +51,9 @@  int hash__alloc_context_id(void)
 	return alloc_context_id(MIN_USER_CONTEXT, max);
 }
 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+#endif
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static int realloc_context_ids(mm_context_t *ctx)
 {
 	int i, id;
@@ -144,12 +147,21 @@  static int hash__init_new_context(struct mm_struct *mm)
 	return index;
 }
 
+void slb_setup_new_exec(void);
+
 void hash__setup_new_exec(void)
 {
 	slice_setup_new_exec();
 
 	slb_setup_new_exec();
 }
+#else
+static inline int hash__init_new_context(struct mm_struct *mm)
+{
+	BUILD_BUG();
+	return 0;
+}
+#endif
 
 static int radix__init_new_context(struct mm_struct *mm)
 {
@@ -175,7 +187,9 @@  static int radix__init_new_context(struct mm_struct *mm)
 	 */
 	asm volatile("ptesync;isync" : : : "memory");
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	mm->context.hash_context = NULL;
+#endif
 
 	return index;
 }
@@ -213,14 +227,22 @@  EXPORT_SYMBOL_GPL(__destroy_context);
 
 static void destroy_contexts(mm_context_t *ctx)
 {
-	int index, context_id;
+	if (radix_enabled()) {
+		ida_free(&mmu_context_ida, ctx->id);
+	} else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+		int index, context_id;
 
-	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
-		context_id = ctx->extended_id[index];
-		if (context_id)
-			ida_free(&mmu_context_ida, context_id);
+		for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
+			context_id = ctx->extended_id[index];
+			if (context_id)
+				ida_free(&mmu_context_ida, context_id);
+		}
+		kfree(ctx->hash_context);
+#else
+		BUILD_BUG(); // radix_enabled() should be constant true
+#endif
 	}
-	kfree(ctx->hash_context);
 }
 
 static void pmd_frag_destroy(void *pmd_frag)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index ae20add7954a..123146d1500f 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -334,8 +334,10 @@  static void __init radix_init_pgtable(void)
 	phys_addr_t start, end;
 	u64 i;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/* We don't support slb for radix */
 	mmu_slb_size = 0;
+#endif
 
 	/*
 	 * Create the linear mapping
@@ -588,6 +590,7 @@  void __init radix__early_init_mmu(void)
 {
 	unsigned long lpcr;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 #ifdef CONFIG_PPC_64K_PAGES
 	/* PAGE_SIZE mappings */
 	mmu_virtual_psize = MMU_PAGE_64K;
@@ -604,6 +607,7 @@  void __init radix__early_init_mmu(void)
 		mmu_vmemmap_psize = MMU_PAGE_2M;
 	} else
 		mmu_vmemmap_psize = mmu_virtual_psize;
+#endif
 #endif
 	/*
 	 * initialize page table size
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 8acd00178956..c1cb21a00884 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -82,6 +82,7 @@  int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 }
 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 {
 	u64 vsid, vsidkey;
@@ -146,3 +147,4 @@  void copro_flush_all_slbs(struct mm_struct *mm)
 	cxl_slbia(mm);
 }
 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
+#endif
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 730c3bbe4759..eafa6a2d33a2 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -113,11 +113,16 @@  unsigned long memremap_compat_align(void)
 	// 1GB maximum possible size of the linear mapping.
 	return max(SUBSECTION_SIZE, 1UL << 30);
 #else
-	unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
-
-	if (radix_enabled())
+	if (radix_enabled()) {
 		return SUBSECTION_SIZE;
-	return max(SUBSECTION_SIZE, 1UL << shift);
+	} else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+		unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
+		return max(SUBSECTION_SIZE, 1UL << shift);
+#else
+		BUILD_BUG(); // radix_enabled() should be constant true
+#endif
+	}
 #endif
 }
 EXPORT_SYMBOL_GPL(memremap_compat_align);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index cd16b407f47e..ab105d33e0b0 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -81,9 +81,6 @@  static struct page *maybe_pte_to_page(pte_t pte)
 
 static pte_t set_pte_filter_hash(pte_t pte)
 {
-	if (radix_enabled())
-		return pte;
-
 	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
 				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
@@ -112,6 +109,9 @@  static inline pte_t set_pte_filter(pte_t pte)
 {
 	struct page *pg;
 
+	if (radix_enabled())
+		return pte;
+
 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
 		return set_pte_filter_hash(pte);
 
@@ -144,6 +144,10 @@  static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
 {
 	struct page *pg;
 
+#ifdef CONFIG_PPC_BOOK3S_64
+	return pte;
+#endif
+
 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
 		return pte;
 
diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile
index 4050cbb55acf..b533caaf0910 100644
--- a/arch/powerpc/mm/ptdump/Makefile
+++ b/arch/powerpc/mm/ptdump/Makefile
@@ -10,5 +10,5 @@  obj-$(CONFIG_PPC_BOOK3S_64)	+= book3s64.o
 
 ifdef CONFIG_PTDUMP_DEBUGFS
 obj-$(CONFIG_PPC_BOOK3S_32)	+= bats.o segment_regs.o
-obj-$(CONFIG_PPC_BOOK3S_64)	+= hashpagetable.o
+obj-$(CONFIG_PPC_64S_HASH_MMU)	+= hashpagetable.o
 endif
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a208997ade88..01726e7f2c7f 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -105,6 +105,7 @@  config PPC_BOOK3S_64
 	select HAVE_MOVE_PMD
 	select HAVE_MOVE_PUD
 	select IRQ_WORK
+	select PPC_64S_HASH_MMU
 	select PPC_MM_SLICES
 	select PPC_HAVE_KUEP
 	select PPC_HAVE_KUAP
@@ -364,6 +365,9 @@  config SPE
 
 	  If in doubt, say Y here.
 
+config PPC_64S_HASH_MMU
+	bool
+
 config PPC_RADIX_MMU
 	bool "Radix MMU Support"
 	depends on PPC_BOOK3S_64
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index e3ffdc8e8567..fa1915d29462 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -492,12 +492,14 @@  static unsigned long power7_idle_insn(unsigned long type)
 
 	mtspr(SPRN_SPRG3,	local_paca->sprg_vdso);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/*
 	 * The SLB has to be restored here, but it sometimes still
 	 * contains entries, so the __ variant must be used to prevent
 	 * multi hits.
 	 */
 	__slb_restore_bolted_realmode();
+#endif
 
 	return srr1;
 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index a8db3f153063..c6dbfa2e075a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -207,6 +207,7 @@  static void __init pnv_init(void)
 #endif
 		add_preferred_console("hvc", 0, NULL);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!radix_enabled()) {
 		size_t size = sizeof(struct slb_entry) * mmu_slb_size;
 		int i;
@@ -219,6 +220,7 @@  static void __init pnv_init(void)
 						cpu_to_node(i));
 		}
 	}
+#endif
 }
 
 static void __init pnv_init_IRQ(void)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 06d6a824c0dc..fac5d86777db 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -58,6 +58,7 @@  EXPORT_SYMBOL(plpar_hcall);
 EXPORT_SYMBOL(plpar_hcall9);
 EXPORT_SYMBOL(plpar_hcall_norets);
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 /*
  * H_BLOCK_REMOVE supported block size for this page size in segment who's base
  * page size is that page size.
@@ -66,6 +67,7 @@  EXPORT_SYMBOL(plpar_hcall_norets);
  * page size.
  */
 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
+#endif
 
 /*
  * Due to the involved complexity, and that the current hypervisor is only
@@ -689,7 +691,7 @@  void vpa_init(int cpu)
 		return;
 	}
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	/*
 	 * PAPR says this feature is SLB-Buffer but firmware never
 	 * reports that.  All SPLPAR support SLB shadow buffer.
@@ -702,7 +704,7 @@  void vpa_init(int cpu)
 			       "cpu %d (hw %d) of area %lx failed with %ld\n",
 			       cpu, hwcpu, addr, ret);
 	}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 	/*
 	 * Register dispatch trace log, if one has been allocated.
@@ -740,6 +742,8 @@  static int pseries_lpar_register_process_table(unsigned long base,
 	return rc;
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
+
 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
 				     unsigned long vpn, unsigned long pa,
 				     unsigned long rflags, unsigned long vflags,
@@ -1730,6 +1734,7 @@  void __init hpte_init_pseries(void)
 	if (cpu_has_feature(CPU_FTR_ARCH_300))
 		pseries_lpar_register_process_table(0, 0, 0);
 }
+#endif /* CONFIG_PPC_64S_HASH_MMU */
 
 #ifdef CONFIG_PPC_RADIX_MMU
 void radix_init_pseries(void)
@@ -1932,6 +1937,7 @@  int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
 	return rc;
 }
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
 {
 	unsigned long protovsid;
@@ -1992,6 +1998,7 @@  static int __init reserve_vrma_context_id(void)
 	return 0;
 }
 machine_device_initcall(pseries, reserve_vrma_context_id);
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 /* debugfs file interface for vpa data */
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 3354c00914fa..c7940fcfc911 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -531,7 +531,7 @@  static int pseries_lparcfg_data(struct seq_file *m, void *v)
 	seq_printf(m, "shared_processor_mode=%d\n",
 		   lppaca_shared_proc(get_lppaca()));
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!radix_enabled())
 		seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 #endif
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index e83e0891272d..aec1971e16a1 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -417,11 +417,15 @@  static void prod_others(void)
 
 static u16 clamp_slb_size(void)
 {
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	u16 prev = mmu_slb_size;
 
 	slb_set_size(SLB_MIN_SIZE);
 
 	return prev;
+#else
+	return 0;
+#endif
 }
 
 static int do_suspend(void)
@@ -446,7 +450,9 @@  static int do_suspend(void)
 	ret = rtas_ibm_suspend_me(&status);
 	if (ret != 0) {
 		pr_err("ibm,suspend-me error: %d\n", status);
+#ifdef CONFIG_PPC_64S_HASH_MMU
 		slb_set_size(saved_slb_size);
+#endif
 	}
 
 	return ret;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 56092dccfdb8..74c9b1b5bc66 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -526,6 +526,7 @@  static int mce_handle_err_realmode(int disposition, u8 error_type)
 			disposition = RTAS_DISP_FULLY_RECOVERED;
 			break;
 		case	MC_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
 			/*
 			 * Store the old slb content in paca before flushing.
 			 * Print this when we go to virtual mode.
@@ -538,6 +539,7 @@  static int mce_handle_err_realmode(int disposition, u8 error_type)
 				slb_save_contents(local_paca->mce_faulty_slbs);
 			flush_and_reload_slb();
 			disposition = RTAS_DISP_FULLY_RECOVERED;
+#endif
 			break;
 		default:
 			break;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7f7369fec46b..80dae18d6621 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -337,8 +337,10 @@  static int do_update_property(char *buf, size_t bufsize)
 	if (!newprop)
 		return -ENOMEM;
 
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
 		slb_set_size(*(int *)value);
+#endif
 
 	return of_update_property(np, newprop);
 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f79126f16258..a7f3c0d50fc9 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -113,7 +113,7 @@  static void __init fwnmi_init(void)
 	u8 *mce_data_buf;
 	unsigned int i;
 	int nr_cpus = num_possible_cpus();
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	struct slb_entry *slb_ptr;
 	size_t size;
 #endif
@@ -153,7 +153,7 @@  static void __init fwnmi_init(void)
 						(RTAS_ERROR_LOG_MAX * i);
 	}
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!radix_enabled()) {
 		/* Allocate per cpu area to save old slb contents during MCE */
 		size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
@@ -802,7 +802,9 @@  static void __init pSeries_setup_arch(void)
 	fwnmi_init();
 
 	pseries_setup_security_mitigations();
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	pseries_lpar_read_hblkrm_characteristics();
+#endif
 
 	/* By default, only probe PCI (can be overridden by rtas_pci) */
 	pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index dd8241c009e5..33de8d798c95 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1160,9 +1160,11 @@  cmds(struct pt_regs *excp)
 			show_tasks();
 			break;
 #ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_PPC_64S_HASH_MMU
 		case 'u':
 			dump_segments();
 			break;
+#endif
 #elif defined(CONFIG_44x)
 		case 'u':
 			dump_tlb_44x();
@@ -2608,7 +2610,7 @@  static void dump_tracing(void)
 static void dump_one_paca(int cpu)
 {
 	struct paca_struct *p;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	int i = 0;
 #endif
 
@@ -2650,6 +2652,7 @@  static void dump_one_paca(int cpu)
 	DUMP(p, cpu_start, "%#-*x");
 	DUMP(p, kexec_state, "%#-*x");
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	if (!early_radix_enabled()) {
 		for (i = 0; i < SLB_NUM_BOLTED; i++) {
 			u64 esid, vsid;
@@ -2677,6 +2680,7 @@  static void dump_one_paca(int cpu)
 				       22, "slb_cache", i, p->slb_cache[i]);
 		}
 	}
+#endif
 
 	DUMP(p, rfi_flush_fallback_area, "%-*px");
 #endif
@@ -3741,7 +3745,7 @@  static void xmon_print_symbol(unsigned long address, const char *mid,
 	printf("%s", after);
 }
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 void dump_segments(void)
 {
 	int i;
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index aa12097668d3..3ba36ff4e94a 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -11,7 +11,7 @@  lkdtm-$(CONFIG_LKDTM)		+= usercopy.o
 lkdtm-$(CONFIG_LKDTM)		+= stackleak.o
 lkdtm-$(CONFIG_LKDTM)		+= cfi.o
 lkdtm-$(CONFIG_LKDTM)		+= fortify.o
-lkdtm-$(CONFIG_PPC_BOOK3S_64)	+= powerpc.o
+lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
 
 KASAN_SANITIZE_rodata.o		:= n
 KASAN_SANITIZE_stackleak.o	:= n
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index fe6fd34b8caf..46d483b918a5 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -181,7 +181,7 @@  static const struct crashtype crashtypes[] = {
 	CRASHTYPE(FORTIFIED_SUBOBJECT),
 	CRASHTYPE(FORTIFIED_STRSCPY),
 	CRASHTYPE(DOUBLE_FAULT),
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 	CRASHTYPE(PPC_SLB_MULTIHIT),
 #endif
 };