diff mbox series

[2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe

Message ID 20221013230710.1987253-2-npiggin@gmail.com (mailing list archive)
State Accepted
Headers show
Series [1/3] powerpc/64s: Add lockdep for HPTE lock | expand

Commit Message

Nicholas Piggin Oct. 13, 2022, 11:07 p.m. UTC
With kfence enabled, there are several cases where HPTE and TLBIE locks
are called from softirq context, for example:

  WARNING: inconsistent lock state
  6.0.0-11845-g0cbbc95b12ac #1 Tainted: G                 N
  --------------------------------
  inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
  swapper/0/1 [HC0[0]:SC0[0]:HE1:SE1] takes:
  c000000002734de8 (native_tlbie_lock){+.?.}-{2:2}, at: .native_hpte_updateboltedpp+0x1a4/0x600
  {IN-SOFTIRQ-W} state was registered at:
    .lock_acquire+0x20c/0x520
    ._raw_spin_lock+0x4c/0x70
    .native_hpte_invalidate+0x62c/0x840
    .hash__kernel_map_pages+0x450/0x640
    .kfence_protect+0x58/0xc0
    .kfence_guarded_free+0x374/0x5a0
    .__slab_free+0x3d0/0x630
    .put_cred_rcu+0xcc/0x120
    .rcu_core+0x3c4/0x14e0
    .__do_softirq+0x1dc/0x7dc
    .do_softirq_own_stack+0x40/0x60

Fix this by consistently disabling irqs while taking either of these
locks. Don't just disable bh because several of the more common cases
already disable irqs, so this just makes the locks always irq-safe.

Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_native.c | 27 ++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

Comments

Guenter Roeck Oct. 14, 2022, 12:18 a.m. UTC | #1
On Fri, Oct 14, 2022 at 09:07:09AM +1000, Nicholas Piggin wrote:
> With kfence enabled, there are several cases where HPTE and TLBIE locks
> are called from softirq context, for example:
> 
>   WARNING: inconsistent lock state
>   6.0.0-11845-g0cbbc95b12ac #1 Tainted: G                 N
>   --------------------------------
>   inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
>   swapper/0/1 [HC0[0]:SC0[0]:HE1:SE1] takes:
>   c000000002734de8 (native_tlbie_lock){+.?.}-{2:2}, at: .native_hpte_updateboltedpp+0x1a4/0x600
>   {IN-SOFTIRQ-W} state was registered at:
>     .lock_acquire+0x20c/0x520
>     ._raw_spin_lock+0x4c/0x70
>     .native_hpte_invalidate+0x62c/0x840
>     .hash__kernel_map_pages+0x450/0x640
>     .kfence_protect+0x58/0xc0
>     .kfence_guarded_free+0x374/0x5a0
>     .__slab_free+0x3d0/0x630
>     .put_cred_rcu+0xcc/0x120
>     .rcu_core+0x3c4/0x14e0
>     .__do_softirq+0x1dc/0x7dc
>     .do_softirq_own_stack+0x40/0x60
> 
> Fix this by consistently disabling irqs while taking either of these
> locks. Don't just disable bh because several of the more common cases
> already disable irqs, so this just makes the locks always irq-safe.
> 
> Reported-by: Guenter Roeck <linux@roeck-us.net>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Tested-by: Guenter Roeck <linux@roeck-us.net>

> ---
>  arch/powerpc/mm/book3s64/hash_native.c | 27 ++++++++++++++++++++++++--
>  1 file changed, 25 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> index 02d0c210a1ce..065f9c542a63 100644
> --- a/arch/powerpc/mm/book3s64/hash_native.c
> +++ b/arch/powerpc/mm/book3s64/hash_native.c
> @@ -268,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  {
>  	struct hash_pte *hptep = htab_address + hpte_group;
>  	unsigned long hpte_v, hpte_r;
> +	unsigned long flags;
>  	int i;
>  
> +	local_irq_save(flags);
> +
>  	if (!(vflags & HPTE_V_BOLTED)) {
>  		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
>  			" rflags=%lx, vflags=%lx, psize=%d)\n",
> @@ -288,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  		hptep++;
>  	}
>  
> -	if (i == HPTES_PER_GROUP)
> +	if (i == HPTES_PER_GROUP) {
> +		local_irq_restore(flags);
>  		return -1;
> +	}
>  
>  	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
>  	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
> @@ -304,7 +309,6 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  		hpte_v = hpte_old_to_new_v(hpte_v);
>  	}
>  
> -	release_hpte_lock();
>  	hptep->r = cpu_to_be64(hpte_r);
>  	/* Guarantee the second dword is visible before the valid bit */
>  	eieio();
> @@ -312,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  	 * Now set the first dword including the valid bit
>  	 * NOTE: this also unlocks the hpte
>  	 */
> +	release_hpte_lock();
>  	hptep->v = cpu_to_be64(hpte_v);
>  
>  	__asm__ __volatile__ ("ptesync" : : : "memory");
>  
> +	local_irq_restore(flags);
> +
>  	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
>  }
>  
> @@ -366,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
>  	struct hash_pte *hptep = htab_address + slot;
>  	unsigned long hpte_v, want_v;
>  	int ret = 0, local = 0;
> +	unsigned long irqflags;
> +
> +	local_irq_save(irqflags);
>  
>  	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
>  
> @@ -409,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
>  	if (!(flags & HPTE_NOHPTE_UPDATE))
>  		tlbie(vpn, bpsize, apsize, ssize, local);
>  
> +	local_irq_restore(irqflags);
> +
>  	return ret;
>  }
>  
> @@ -472,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
>  	unsigned long vsid;
>  	long slot;
>  	struct hash_pte *hptep;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
>  
>  	vsid = get_kernel_vsid(ea, ssize);
>  	vpn = hpt_vpn(ea, vsid, ssize);
> @@ -490,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
>  	 * actual page size will be same.
>  	 */
>  	tlbie(vpn, psize, psize, ssize, 0);
> +
> +	local_irq_restore(flags);
>  }
>  
>  /*
> @@ -503,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
>  	unsigned long vsid;
>  	long slot;
>  	struct hash_pte *hptep;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
>  
>  	vsid = get_kernel_vsid(ea, ssize);
>  	vpn = hpt_vpn(ea, vsid, ssize);
> @@ -520,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
>  
>  	/* Invalidate the TLB */
>  	tlbie(vpn, psize, psize, ssize, 0);
> +
> +	local_irq_restore(flags);
> +
>  	return 0;
>  }
>  
> -- 
> 2.37.2
>
diff mbox series

Patch

diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 02d0c210a1ce..065f9c542a63 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -268,8 +268,11 @@  static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 {
 	struct hash_pte *hptep = htab_address + hpte_group;
 	unsigned long hpte_v, hpte_r;
+	unsigned long flags;
 	int i;
 
+	local_irq_save(flags);
+
 	if (!(vflags & HPTE_V_BOLTED)) {
 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
 			" rflags=%lx, vflags=%lx, psize=%d)\n",
@@ -288,8 +291,10 @@  static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		hptep++;
 	}
 
-	if (i == HPTES_PER_GROUP)
+	if (i == HPTES_PER_GROUP) {
+		local_irq_restore(flags);
 		return -1;
+	}
 
 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
@@ -304,7 +309,6 @@  static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		hpte_v = hpte_old_to_new_v(hpte_v);
 	}
 
-	release_hpte_lock();
 	hptep->r = cpu_to_be64(hpte_r);
 	/* Guarantee the second dword is visible before the valid bit */
 	eieio();
@@ -312,10 +316,13 @@  static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 	 * Now set the first dword including the valid bit
 	 * NOTE: this also unlocks the hpte
 	 */
+	release_hpte_lock();
 	hptep->v = cpu_to_be64(hpte_v);
 
 	__asm__ __volatile__ ("ptesync" : : : "memory");
 
+	local_irq_restore(flags);
+
 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
 }
 
@@ -366,6 +373,9 @@  static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 	struct hash_pte *hptep = htab_address + slot;
 	unsigned long hpte_v, want_v;
 	int ret = 0, local = 0;
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
 
 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 
@@ -409,6 +419,8 @@  static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 	if (!(flags & HPTE_NOHPTE_UPDATE))
 		tlbie(vpn, bpsize, apsize, ssize, local);
 
+	local_irq_restore(irqflags);
+
 	return ret;
 }
 
@@ -472,6 +484,9 @@  static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 	unsigned long vsid;
 	long slot;
 	struct hash_pte *hptep;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 	vsid = get_kernel_vsid(ea, ssize);
 	vpn = hpt_vpn(ea, vsid, ssize);
@@ -490,6 +505,8 @@  static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 	 * actual page size will be same.
 	 */
 	tlbie(vpn, psize, psize, ssize, 0);
+
+	local_irq_restore(flags);
 }
 
 /*
@@ -503,6 +520,9 @@  static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 	unsigned long vsid;
 	long slot;
 	struct hash_pte *hptep;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 	vsid = get_kernel_vsid(ea, ssize);
 	vpn = hpt_vpn(ea, vsid, ssize);
@@ -520,6 +540,9 @@  static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 
 	/* Invalidate the TLB */
 	tlbie(vpn, psize, psize, ssize, 0);
+
+	local_irq_restore(flags);
+
 	return 0;
 }