diff mbox series

[1/3] fixup mmu_features immediately after getting cpu pa features.

Message ID 20211004151142.256251-2-sourabhjain@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series Update crashkernel offset to allow kernel to boot on large config LPARs | expand
Related show

Commit Message

Sourabh Jain Oct. 4, 2021, 3:11 p.m. UTC
From: Mahesh Salgaonkar <mahesh@linux.ibm.com>

On system with radix support available, early_radix_enabled() starts
returning true for a small window (until mmu_early_init_devtree() is
called) even when radix mode disabled on kernel command line. This causes
ppc64_bolted_size() to return ULONG_MAX in HPT mode instead of supported
segment size, during boot cpu paca allocation.

With kernel command line = "... disable_radix":

early_init_devtree:			  <- early_radix_enabled() = false
  early_init_dt_scan_cpus:		  <- early_radix_enabled() = false
      ...
      check_cpu_pa_features:		  <- early_radix_enabled() = false
      ...				^ <- early_radix_enabled() = TRUE
      allocate_paca:			| <- early_radix_enabled() = TRUE
          ...                           |
          ppc64_bolted_size:		| <- early_radix_enabled() = TRUE
              if (early_radix_enabled())| <- early_radix_enabled() = TRUE
                  return ULONG_MAX;     |
      ...                               |
  ...					| <- early_radix_enabled() = TRUE
  ...					| <- early_radix_enabled() = TRUE
  mmu_early_init_devtree()              V
  ...					  <- early_radix_enabled() = false

So far we have not seen any issue because allocate_paca() takes minimum of
ppc64_bolted_size and rma_size while allocating paca. However it is better
to close this window by fixing up the mmu features as early as possible.
This fixes early_radix_enabled() and ppc64_bolted_size() to return valid
values in radix disable mode. This patch will help subsequent patch to
depend on early_radix_enabled() check while detecting supported segment
size in HPT mode.

Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Signed-off-by: Sourabh Jain <sourabhjain@linux.ibm.com>
Reported-and-tested-by: Abdul haleem <abdhalee@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
 arch/powerpc/include/asm/mmu.h           | 1 +
 arch/powerpc/kernel/prom.c               | 1 +
 arch/powerpc/mm/init_64.c                | 5 ++++-
 4 files changed, 7 insertions(+), 1 deletion(-)

Comments

Aneesh Kumar K V Oct. 4, 2021, 3:32 p.m. UTC | #1
On 10/4/21 20:41, Sourabh Jain wrote:
> From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
> 
> On system with radix support available, early_radix_enabled() starts
> returning true for a small window (until mmu_early_init_devtree() is
> called) even when radix mode disabled on kernel command line. This causes
> ppc64_bolted_size() to return ULONG_MAX in HPT mode instead of supported
> segment size, during boot cpu paca allocation.
> 
> With kernel command line = "... disable_radix":
> 
> early_init_devtree:			  <- early_radix_enabled() = false
>    early_init_dt_scan_cpus:		  <- early_radix_enabled() = false
>        ...
>        check_cpu_pa_features:		  <- early_radix_enabled() = false
>        ...				^ <- early_radix_enabled() = TRUE
>        allocate_paca:			| <- early_radix_enabled() = TRUE
>            ...                           |
>            ppc64_bolted_size:		| <- early_radix_enabled() = TRUE
>                if (early_radix_enabled())| <- early_radix_enabled() = TRUE
>                    return ULONG_MAX;     |
>        ...                               |
>    ...					| <- early_radix_enabled() = TRUE
>    ...					| <- early_radix_enabled() = TRUE
>    mmu_early_init_devtree()              V
>    ...					  <- early_radix_enabled() = false
> 
> So far we have not seen any issue because allocate_paca() takes minimum of
> ppc64_bolted_size and rma_size while allocating paca. However it is better
> to close this window by fixing up the mmu features as early as possible.
> This fixes early_radix_enabled() and ppc64_bolted_size() to return valid
> values in radix disable mode. This patch will help subsequent patch to
> depend on early_radix_enabled() check while detecting supported segment
> size in HPT mode.
> 
> Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
> Signed-off-by: Sourabh Jain <sourabhjain@linux.ibm.com>
> Reported-and-tested-by: Abdul haleem <abdhalee@linux.vnet.ibm.com>
> ---
>   arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
>   arch/powerpc/include/asm/mmu.h           | 1 +
>   arch/powerpc/kernel/prom.c               | 1 +
>   arch/powerpc/mm/init_64.c                | 5 ++++-
>   4 files changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index c02f42d1031e..69a89fa1330d 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -197,6 +197,7 @@ extern int mmu_vmemmap_psize;
>   extern int mmu_io_psize;
>   
>   /* MMU initialization */
> +void mmu_cpu_feature_fixup(void);
>   void mmu_early_init_devtree(void);
>   void hash__early_init_devtree(void);
>   void radix__early_init_devtree(void);
> diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
> index 8abe8e42e045..c8eafd401fe9 100644
> --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -401,6 +401,7 @@ extern void early_init_mmu(void);
>   extern void early_init_mmu_secondary(void);
>   extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
>   				       phys_addr_t first_memblock_size);
> +static inline void mmu_cpu_feature_fixup(void) { }
>   static inline void mmu_early_init_devtree(void) { }
>   
>   static inline void pkey_early_init_devtree(void) {}
> diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
> index 2e67588f6f6e..1727a3abe6c1 100644
> --- a/arch/powerpc/kernel/prom.c
> +++ b/arch/powerpc/kernel/prom.c
> @@ -380,6 +380,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
>   		check_cpu_pa_features(node);
>   	}
>   
> +	mmu_cpu_feature_fixup();

can you do that call inside check_cpu_pa_features? or is it because we 
have the same issue with baremetal platforms?

Can we also rename this to indicate we are sanitizing the feature flag 
based on kernel command line.  Something like

/* Update cpu features based on kernel command line */
update_cpu_features();

>   	identical_pvr_fixup(node);
>   	init_mmu_slb_size(node);
>   
> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
> index 386be136026e..9ed452605a2c 100644
> --- a/arch/powerpc/mm/init_64.c
> +++ b/arch/powerpc/mm/init_64.c
> @@ -437,12 +437,15 @@ static void __init early_check_vec5(void)
>   	}
>   }
>   
> -void __init mmu_early_init_devtree(void)
> +void __init mmu_cpu_feature_fixup(void)
>   {
>   	/* Disable radix mode based on kernel command line. */
>   	if (disable_radix)
>   		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
> +}
>   
> +void __init mmu_early_init_devtree(void)
> +{
>   	/*
>   	 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
>   	 * When running bare-metal, we can use radix if we like
>
Mahesh J Salgaonkar Oct. 5, 2021, 5:54 a.m. UTC | #2
On 2021-10-04 21:02:21 Mon, Aneesh Kumar K.V wrote:
> On 10/4/21 20:41, Sourabh Jain wrote:
> > From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
> > 
> > On system with radix support available, early_radix_enabled() starts
> > returning true for a small window (until mmu_early_init_devtree() is
> > called) even when radix mode disabled on kernel command line. This causes
> > ppc64_bolted_size() to return ULONG_MAX in HPT mode instead of supported
> > segment size, during boot cpu paca allocation.
> > 
> > With kernel command line = "... disable_radix":
> > 
> > early_init_devtree:			  <- early_radix_enabled() = false
> >    early_init_dt_scan_cpus:		  <- early_radix_enabled() = false
> >        ...
> >        check_cpu_pa_features:		  <- early_radix_enabled() = false
> >        ...				^ <- early_radix_enabled() = TRUE
> >        allocate_paca:			| <- early_radix_enabled() = TRUE
> >            ...                           |
> >            ppc64_bolted_size:		| <- early_radix_enabled() = TRUE
> >                if (early_radix_enabled())| <- early_radix_enabled() = TRUE
> >                    return ULONG_MAX;     |
> >        ...                               |
> >    ...					| <- early_radix_enabled() = TRUE
> >    ...					| <- early_radix_enabled() = TRUE
> >    mmu_early_init_devtree()              V
> >    ...					  <- early_radix_enabled() = false
> > 
> > So far we have not seen any issue because allocate_paca() takes minimum of
> > ppc64_bolted_size and rma_size while allocating paca. However it is better
> > to close this window by fixing up the mmu features as early as possible.
> > This fixes early_radix_enabled() and ppc64_bolted_size() to return valid
> > values in radix disable mode. This patch will help subsequent patch to
> > depend on early_radix_enabled() check while detecting supported segment
> > size in HPT mode.
> > 
> > Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
> > Signed-off-by: Sourabh Jain <sourabhjain@linux.ibm.com>
> > Reported-and-tested-by: Abdul haleem <abdhalee@linux.vnet.ibm.com>
> > ---
> >   arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
> >   arch/powerpc/include/asm/mmu.h           | 1 +
> >   arch/powerpc/kernel/prom.c               | 1 +
> >   arch/powerpc/mm/init_64.c                | 5 ++++-
> >   4 files changed, 7 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> > index c02f42d1031e..69a89fa1330d 100644
> > --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> > +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> > @@ -197,6 +197,7 @@ extern int mmu_vmemmap_psize;
> >   extern int mmu_io_psize;
> >   /* MMU initialization */
> > +void mmu_cpu_feature_fixup(void);
> >   void mmu_early_init_devtree(void);
> >   void hash__early_init_devtree(void);
> >   void radix__early_init_devtree(void);
> > diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
> > index 8abe8e42e045..c8eafd401fe9 100644
> > --- a/arch/powerpc/include/asm/mmu.h
> > +++ b/arch/powerpc/include/asm/mmu.h
> > @@ -401,6 +401,7 @@ extern void early_init_mmu(void);
> >   extern void early_init_mmu_secondary(void);
> >   extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
> >   				       phys_addr_t first_memblock_size);
> > +static inline void mmu_cpu_feature_fixup(void) { }
> >   static inline void mmu_early_init_devtree(void) { }
> >   static inline void pkey_early_init_devtree(void) {}
> > diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
> > index 2e67588f6f6e..1727a3abe6c1 100644
> > --- a/arch/powerpc/kernel/prom.c
> > +++ b/arch/powerpc/kernel/prom.c
> > @@ -380,6 +380,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
> >   		check_cpu_pa_features(node);
> >   	}
> > +	mmu_cpu_feature_fixup();
> 
> can you do that call inside check_cpu_pa_features? or is it because we have
> the same issue with baremetal platforms?

Yup same issue exist on baremetal as well in case of dt_cpu_ftrs_in_use
is true. Hence calling it after the if (!dt_cpu_ftrs_in_use) code block
takes care of both pseries and baremetal platforms.

> 
> Can we also rename this to indicate we are sanitizing the feature flag based
> on kernel command line.  Something like
> 
> /* Update cpu features based on kernel command line */
> update_cpu_features();

Sure will do.

Thanks for your review.
-Mahesh.
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index c02f42d1031e..69a89fa1330d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -197,6 +197,7 @@  extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
 
 /* MMU initialization */
+void mmu_cpu_feature_fixup(void);
 void mmu_early_init_devtree(void);
 void hash__early_init_devtree(void);
 void radix__early_init_devtree(void);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 8abe8e42e045..c8eafd401fe9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -401,6 +401,7 @@  extern void early_init_mmu(void);
 extern void early_init_mmu_secondary(void);
 extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 				       phys_addr_t first_memblock_size);
+static inline void mmu_cpu_feature_fixup(void) { }
 static inline void mmu_early_init_devtree(void) { }
 
 static inline void pkey_early_init_devtree(void) {}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 2e67588f6f6e..1727a3abe6c1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -380,6 +380,7 @@  static int __init early_init_dt_scan_cpus(unsigned long node,
 		check_cpu_pa_features(node);
 	}
 
+	mmu_cpu_feature_fixup();
 	identical_pvr_fixup(node);
 	init_mmu_slb_size(node);
 
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 386be136026e..9ed452605a2c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -437,12 +437,15 @@  static void __init early_check_vec5(void)
 	}
 }
 
-void __init mmu_early_init_devtree(void)
+void __init mmu_cpu_feature_fixup(void)
 {
 	/* Disable radix mode based on kernel command line. */
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+}
 
+void __init mmu_early_init_devtree(void)
+{
 	/*
 	 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
 	 * When running bare-metal, we can use radix if we like