diff mbox series

[Xenial,2/3] UBUNTU: SAUCE: s390/mm: fix local TLB flushing vs. detach of an mm address space

Message ID 1505209542-17445-3-git-send-email-stefan.bader@canonical.com
State New
Headers show
Series None | expand

Commit Message

Stefan Bader Sept. 12, 2017, 9:45 a.m. UTC
From: Martin Schwidefsky <schwidefsky@de.ibm.com>

BugLink: http://bugs.launchpad.net/bugs/1708399

The local TLB flushing code keeps an additional mask in the mm.context,
the cpu_attach_mask. At the time a global flush of an address space is
done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
future global flushes in case the mm is used by a single CPU only after
the flush.

Trouble is that the reset of the mm_cpumask is racy against the detach
of an mm address space by switch_mm. The current order is first the
global TLB flush and then the copy of the cpu_attach_mask to the
mm_cpumask. The order needs to be the other way around.

Cc: <stable@vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
(backported from b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
[merged with "s390/mm,kvm: flush gmap address space with IDTE"]
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
---
 arch/s390/include/asm/tlbflush.h | 56 ++++++++++++----------------------------
 1 file changed, 16 insertions(+), 40 deletions(-)

Comments

Colin Ian King Sept. 12, 2017, 11:58 a.m. UTC | #1
On 12/09/17 10:45, Stefan Bader wrote:
> From: Martin Schwidefsky <schwidefsky@de.ibm.com>
> 
> BugLink: http://bugs.launchpad.net/bugs/1708399
> 
> The local TLB flushing code keeps an additional mask in the mm.context,
> the cpu_attach_mask. At the time a global flush of an address space is
> done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
> future global flushes in case the mm is used by a single CPU only after
> the flush.
> 
> Trouble is that the reset of the mm_cpumask is racy against the detach
> of an mm address space by switch_mm. The current order is first the
> global TLB flush and then the copy of the cpu_attach_mask to the
> mm_cpumask. The order needs to be the other way around.
> 
> Cc: <stable@vger.kernel.org>
> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
> (backported from b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
> [merged with "s390/mm,kvm: flush gmap address space with IDTE"]
> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
> ---
>  arch/s390/include/asm/tlbflush.h | 56 ++++++++++++----------------------------
>  1 file changed, 16 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
> index 80868c84..d54cc83 100644
> --- a/arch/s390/include/asm/tlbflush.h
> +++ b/arch/s390/include/asm/tlbflush.h
> @@ -47,47 +47,31 @@ static inline void __tlb_flush_global(void)
>  }
>  
>  /*
> - * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
> - * this implicates multiple ASCEs!).
> + * Flush TLB entries for a specific ASCE on all CPUs.
>   */
> -static inline void __tlb_flush_full(struct mm_struct *mm)
> +static inline void __tlb_flush_mm(struct mm_struct * mm)
>  {
> +	/*
> +	 * If the machine has IDTE we prefer to do a per mm flush
> +	 * on all cpus instead of doing a local flush if the mm
> +	 * only ran on the local cpu.
> +	 */
>  	preempt_disable();
>  	atomic_add(0x10000, &mm->context.attach_count);
> -	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
> -		/* Local TLB flush */
> -		__tlb_flush_local();
> +	/* Reset TLB flush mask */
> +	if (MACHINE_HAS_TLB_LC)
> +		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> +	barrier();
> +	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
> +		__tlb_flush_idte(mm->context.asce);
>  	} else {
>  		/* Global TLB flush */
>  		__tlb_flush_global();
> -		/* Reset TLB flush mask */
> -		if (MACHINE_HAS_TLB_LC)
> -			cpumask_copy(mm_cpumask(mm),
> -				     &mm->context.cpu_attach_mask);
>  	}
>  	atomic_sub(0x10000, &mm->context.attach_count);
>  	preempt_enable();
>  }
>  
> -/*
> - * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
> - * when more than one asce (e.g. gmap) ran on this mm.
> - */
> -static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
> -{
> -	preempt_disable();
> -	atomic_add(0x10000, &mm->context.attach_count);
> -	if (MACHINE_HAS_IDTE)
> -		__tlb_flush_idte(asce);
> -	else
> -		__tlb_flush_global();
> -	/* Reset TLB flush mask */
> -	if (MACHINE_HAS_TLB_LC)
> -		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> -	atomic_sub(0x10000, &mm->context.attach_count);
> -	preempt_enable();
> -}
> -
>  static inline void __tlb_flush_kernel(void)
>  {
>  	if (MACHINE_HAS_IDTE)
> @@ -97,7 +81,6 @@ static inline void __tlb_flush_kernel(void)
>  }
>  #else
>  #define __tlb_flush_global()	__tlb_flush_local()
> -#define __tlb_flush_full(mm)	__tlb_flush_local()
>  
>  /*
>   * Flush TLB entries for a specific ASCE on all CPUs.
> @@ -111,21 +94,14 @@ static inline void __tlb_flush_kernel(void)
>  {
>  	__tlb_flush_local();
>  }
> -#endif
>  
>  static inline void __tlb_flush_mm(struct mm_struct * mm)
>  {
> -	/*
> -	 * If the machine has IDTE we prefer to do a per mm flush
> -	 * on all cpus instead of doing a local flush if the mm
> -	 * only ran on the local cpu.
> -	 */
> -	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
> -		__tlb_flush_asce(mm, mm->context.asce);
> -	else
> -		__tlb_flush_full(mm);
> +	__tlb_flush_local();
>  }
>  
> +#endif
> +
>  static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
>  {
>  	if (mm->context.flush_mm) {
> 

Maybe I'm being a bit slow here, but I'm finding this hard to see how
this is related to linux-next commit
b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659

Do you mind explaining this backport?

Colin
Stefan Bader Sept. 12, 2017, 12:18 p.m. UTC | #2
On 12.09.2017 13:58, Colin Ian King wrote:
> On 12/09/17 10:45, Stefan Bader wrote:
>> From: Martin Schwidefsky <schwidefsky@de.ibm.com>
>>
>> BugLink: http://bugs.launchpad.net/bugs/1708399
>>
>> The local TLB flushing code keeps an additional mask in the mm.context,
>> the cpu_attach_mask. At the time a global flush of an address space is
>> done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
>> future global flushes in case the mm is used by a single CPU only after
>> the flush.
>>
>> Trouble is that the reset of the mm_cpumask is racy against the detach
>> of an mm address space by switch_mm. The current order is first the
>> global TLB flush and then the copy of the cpu_attach_mask to the
>> mm_cpumask. The order needs to be the other way around.
>>
>> Cc: <stable@vger.kernel.org>
>> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
>> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
>> (backported from b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
>> [merged with "s390/mm,kvm: flush gmap address space with IDTE"]
>> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
>> ---
>>  arch/s390/include/asm/tlbflush.h | 56 ++++++++++++----------------------------
>>  1 file changed, 16 insertions(+), 40 deletions(-)
>>
>> diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
>> index 80868c84..d54cc83 100644
>> --- a/arch/s390/include/asm/tlbflush.h
>> +++ b/arch/s390/include/asm/tlbflush.h
>> @@ -47,47 +47,31 @@ static inline void __tlb_flush_global(void)
>>  }
>>  
>>  /*
>> - * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
>> - * this implicates multiple ASCEs!).
>> + * Flush TLB entries for a specific ASCE on all CPUs.
>>   */
>> -static inline void __tlb_flush_full(struct mm_struct *mm)
>> +static inline void __tlb_flush_mm(struct mm_struct * mm)
>>  {
>> +	/*
>> +	 * If the machine has IDTE we prefer to do a per mm flush
>> +	 * on all cpus instead of doing a local flush if the mm
>> +	 * only ran on the local cpu.
>> +	 */
>>  	preempt_disable();
>>  	atomic_add(0x10000, &mm->context.attach_count);
>> -	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
>> -		/* Local TLB flush */
>> -		__tlb_flush_local();
>> +	/* Reset TLB flush mask */
>> +	if (MACHINE_HAS_TLB_LC)
>> +		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
>> +	barrier();
>> +	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
>> +		__tlb_flush_idte(mm->context.asce);
>>  	} else {
>>  		/* Global TLB flush */
>>  		__tlb_flush_global();
>> -		/* Reset TLB flush mask */
>> -		if (MACHINE_HAS_TLB_LC)
>> -			cpumask_copy(mm_cpumask(mm),
>> -				     &mm->context.cpu_attach_mask);
>>  	}
>>  	atomic_sub(0x10000, &mm->context.attach_count);
>>  	preempt_enable();
>>  }
>>  
>> -/*
>> - * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
>> - * when more than one asce (e.g. gmap) ran on this mm.
>> - */
>> -static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
>> -{
>> -	preempt_disable();
>> -	atomic_add(0x10000, &mm->context.attach_count);
>> -	if (MACHINE_HAS_IDTE)
>> -		__tlb_flush_idte(asce);
>> -	else
>> -		__tlb_flush_global();
>> -	/* Reset TLB flush mask */
>> -	if (MACHINE_HAS_TLB_LC)
>> -		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
>> -	atomic_sub(0x10000, &mm->context.attach_count);
>> -	preempt_enable();
>> -}
>> -
>>  static inline void __tlb_flush_kernel(void)
>>  {
>>  	if (MACHINE_HAS_IDTE)
>> @@ -97,7 +81,6 @@ static inline void __tlb_flush_kernel(void)
>>  }
>>  #else
>>  #define __tlb_flush_global()	__tlb_flush_local()
>> -#define __tlb_flush_full(mm)	__tlb_flush_local()
>>  
>>  /*
>>   * Flush TLB entries for a specific ASCE on all CPUs.
>> @@ -111,21 +94,14 @@ static inline void __tlb_flush_kernel(void)
>>  {
>>  	__tlb_flush_local();
>>  }
>> -#endif
>>  
>>  static inline void __tlb_flush_mm(struct mm_struct * mm)
>>  {
>> -	/*
>> -	 * If the machine has IDTE we prefer to do a per mm flush
>> -	 * on all cpus instead of doing a local flush if the mm
>> -	 * only ran on the local cpu.
>> -	 */
>> -	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
>> -		__tlb_flush_asce(mm, mm->context.asce);
>> -	else
>> -		__tlb_flush_full(mm);
>> +	__tlb_flush_local();
>>  }
>>  
>> +#endif
>> +
>>  static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
>>  {
>>  	if (mm->context.flush_mm) {
>>
> 
> Maybe I'm being a bit slow here, but I'm finding this hard to see how
> this is related to linux-next commit
> b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659
> 
> Do you mind explaining this backport?

It is a merge with the other commit mentioned above. Not the most obvious but
since it comes from the s390x maintainers I crossed my eyes and hope.

-Stefan

> 
> Colin
>
Colin Ian King Sept. 12, 2017, 12:28 p.m. UTC | #3
On 12/09/17 13:18, Stefan Bader wrote:
> On 12.09.2017 13:58, Colin Ian King wrote:
>> On 12/09/17 10:45, Stefan Bader wrote:
>>> From: Martin Schwidefsky <schwidefsky@de.ibm.com>
>>>
>>> BugLink: http://bugs.launchpad.net/bugs/1708399
>>>
>>> The local TLB flushing code keeps an additional mask in the mm.context,
>>> the cpu_attach_mask. At the time a global flush of an address space is
>>> done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
>>> future global flushes in case the mm is used by a single CPU only after
>>> the flush.
>>>
>>> Trouble is that the reset of the mm_cpumask is racy against the detach
>>> of an mm address space by switch_mm. The current order is first the
>>> global TLB flush and then the copy of the cpu_attach_mask to the
>>> mm_cpumask. The order needs to be the other way around.
>>>
>>> Cc: <stable@vger.kernel.org>
>>> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
>>> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
>>> (backported from b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
>>> [merged with "s390/mm,kvm: flush gmap address space with IDTE"]
>>> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
>>> ---
>>>  arch/s390/include/asm/tlbflush.h | 56 ++++++++++++----------------------------
>>>  1 file changed, 16 insertions(+), 40 deletions(-)
>>>
>>> diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
>>> index 80868c84..d54cc83 100644
>>> --- a/arch/s390/include/asm/tlbflush.h
>>> +++ b/arch/s390/include/asm/tlbflush.h
>>> @@ -47,47 +47,31 @@ static inline void __tlb_flush_global(void)
>>>  }
>>>  
>>>  /*
>>> - * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
>>> - * this implicates multiple ASCEs!).
>>> + * Flush TLB entries for a specific ASCE on all CPUs.
>>>   */
>>> -static inline void __tlb_flush_full(struct mm_struct *mm)
>>> +static inline void __tlb_flush_mm(struct mm_struct * mm)
>>>  {
>>> +	/*
>>> +	 * If the machine has IDTE we prefer to do a per mm flush
>>> +	 * on all cpus instead of doing a local flush if the mm
>>> +	 * only ran on the local cpu.
>>> +	 */
>>>  	preempt_disable();
>>>  	atomic_add(0x10000, &mm->context.attach_count);
>>> -	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
>>> -		/* Local TLB flush */
>>> -		__tlb_flush_local();
>>> +	/* Reset TLB flush mask */
>>> +	if (MACHINE_HAS_TLB_LC)
>>> +		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
>>> +	barrier();
>>> +	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
>>> +		__tlb_flush_idte(mm->context.asce);
>>>  	} else {
>>>  		/* Global TLB flush */
>>>  		__tlb_flush_global();
>>> -		/* Reset TLB flush mask */
>>> -		if (MACHINE_HAS_TLB_LC)
>>> -			cpumask_copy(mm_cpumask(mm),
>>> -				     &mm->context.cpu_attach_mask);
>>>  	}
>>>  	atomic_sub(0x10000, &mm->context.attach_count);
>>>  	preempt_enable();
>>>  }
>>>  
>>> -/*
>>> - * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
>>> - * when more than one asce (e.g. gmap) ran on this mm.
>>> - */
>>> -static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
>>> -{
>>> -	preempt_disable();
>>> -	atomic_add(0x10000, &mm->context.attach_count);
>>> -	if (MACHINE_HAS_IDTE)
>>> -		__tlb_flush_idte(asce);
>>> -	else
>>> -		__tlb_flush_global();
>>> -	/* Reset TLB flush mask */
>>> -	if (MACHINE_HAS_TLB_LC)
>>> -		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
>>> -	atomic_sub(0x10000, &mm->context.attach_count);
>>> -	preempt_enable();
>>> -}
>>> -
>>>  static inline void __tlb_flush_kernel(void)
>>>  {
>>>  	if (MACHINE_HAS_IDTE)
>>> @@ -97,7 +81,6 @@ static inline void __tlb_flush_kernel(void)
>>>  }
>>>  #else
>>>  #define __tlb_flush_global()	__tlb_flush_local()
>>> -#define __tlb_flush_full(mm)	__tlb_flush_local()
>>>  
>>>  /*
>>>   * Flush TLB entries for a specific ASCE on all CPUs.
>>> @@ -111,21 +94,14 @@ static inline void __tlb_flush_kernel(void)
>>>  {
>>>  	__tlb_flush_local();
>>>  }
>>> -#endif
>>>  
>>>  static inline void __tlb_flush_mm(struct mm_struct * mm)
>>>  {
>>> -	/*
>>> -	 * If the machine has IDTE we prefer to do a per mm flush
>>> -	 * on all cpus instead of doing a local flush if the mm
>>> -	 * only ran on the local cpu.
>>> -	 */
>>> -	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
>>> -		__tlb_flush_asce(mm, mm->context.asce);
>>> -	else
>>> -		__tlb_flush_full(mm);
>>> +	__tlb_flush_local();
>>>  }
>>>  
>>> +#endif
>>> +
>>>  static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
>>>  {
>>>  	if (mm->context.flush_mm) {
>>>
>>
>> Maybe I'm being a bit slow here, but I'm finding this hard to see how
>> this is related to linux-next commit
>> b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659
>>
>> Do you mind explaining this backport?
> 
> It is a merge with the other commit mentioned above. Not the most obvious but
> since it comes from the s390x maintainers I crossed my eyes and hope.

OK, got it. Thanks for the explanation.

Colin

> 
> -Stefan
> 
>>
>> Colin
>>
> 
> 
> 
>
Colin Ian King Sept. 12, 2017, 12:29 p.m. UTC | #4
On 12/09/17 10:45, Stefan Bader wrote:
> From: Martin Schwidefsky <schwidefsky@de.ibm.com>
> 
> BugLink: http://bugs.launchpad.net/bugs/1708399
> 
> The local TLB flushing code keeps an additional mask in the mm.context,
> the cpu_attach_mask. At the time a global flush of an address space is
> done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
> future global flushes in case the mm is used by a single CPU only after
> the flush.
> 
> Trouble is that the reset of the mm_cpumask is racy against the detach
> of an mm address space by switch_mm. The current order is first the
> global TLB flush and then the copy of the cpu_attach_mask to the
> mm_cpumask. The order needs to be the other way around.
> 
> Cc: <stable@vger.kernel.org>
> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
> (backported from b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
> [merged with "s390/mm,kvm: flush gmap address space with IDTE"]
> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
> ---
>  arch/s390/include/asm/tlbflush.h | 56 ++++++++++++----------------------------
>  1 file changed, 16 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
> index 80868c84..d54cc83 100644
> --- a/arch/s390/include/asm/tlbflush.h
> +++ b/arch/s390/include/asm/tlbflush.h
> @@ -47,47 +47,31 @@ static inline void __tlb_flush_global(void)
>  }
>  
>  /*
> - * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
> - * this implicates multiple ASCEs!).
> + * Flush TLB entries for a specific ASCE on all CPUs.
>   */
> -static inline void __tlb_flush_full(struct mm_struct *mm)
> +static inline void __tlb_flush_mm(struct mm_struct * mm)
>  {
> +	/*
> +	 * If the machine has IDTE we prefer to do a per mm flush
> +	 * on all cpus instead of doing a local flush if the mm
> +	 * only ran on the local cpu.
> +	 */
>  	preempt_disable();
>  	atomic_add(0x10000, &mm->context.attach_count);
> -	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
> -		/* Local TLB flush */
> -		__tlb_flush_local();
> +	/* Reset TLB flush mask */
> +	if (MACHINE_HAS_TLB_LC)
> +		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> +	barrier();
> +	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
> +		__tlb_flush_idte(mm->context.asce);
>  	} else {
>  		/* Global TLB flush */
>  		__tlb_flush_global();
> -		/* Reset TLB flush mask */
> -		if (MACHINE_HAS_TLB_LC)
> -			cpumask_copy(mm_cpumask(mm),
> -				     &mm->context.cpu_attach_mask);
>  	}
>  	atomic_sub(0x10000, &mm->context.attach_count);
>  	preempt_enable();
>  }
>  
> -/*
> - * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
> - * when more than one asce (e.g. gmap) ran on this mm.
> - */
> -static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
> -{
> -	preempt_disable();
> -	atomic_add(0x10000, &mm->context.attach_count);
> -	if (MACHINE_HAS_IDTE)
> -		__tlb_flush_idte(asce);
> -	else
> -		__tlb_flush_global();
> -	/* Reset TLB flush mask */
> -	if (MACHINE_HAS_TLB_LC)
> -		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> -	atomic_sub(0x10000, &mm->context.attach_count);
> -	preempt_enable();
> -}
> -
>  static inline void __tlb_flush_kernel(void)
>  {
>  	if (MACHINE_HAS_IDTE)
> @@ -97,7 +81,6 @@ static inline void __tlb_flush_kernel(void)
>  }
>  #else
>  #define __tlb_flush_global()	__tlb_flush_local()
> -#define __tlb_flush_full(mm)	__tlb_flush_local()
>  
>  /*
>   * Flush TLB entries for a specific ASCE on all CPUs.
> @@ -111,21 +94,14 @@ static inline void __tlb_flush_kernel(void)
>  {
>  	__tlb_flush_local();
>  }
> -#endif
>  
>  static inline void __tlb_flush_mm(struct mm_struct * mm)
>  {
> -	/*
> -	 * If the machine has IDTE we prefer to do a per mm flush
> -	 * on all cpus instead of doing a local flush if the mm
> -	 * only ran on the local cpu.
> -	 */
> -	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
> -		__tlb_flush_asce(mm, mm->context.asce);
> -	else
> -		__tlb_flush_full(mm);
> +	__tlb_flush_local();
>  }
>  
> +#endif
> +
>  static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
>  {
>  	if (mm->context.flush_mm) {
> 

Acked-by: Colin Ian King <colin.king@canonical.com>
diff mbox series

Patch

diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 80868c84..d54cc83 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -47,47 +47,31 @@  static inline void __tlb_flush_global(void)
 }
 
 /*
- * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
- * this implicates multiple ASCEs!).
+ * Flush TLB entries for a specific ASCE on all CPUs.
  */
-static inline void __tlb_flush_full(struct mm_struct *mm)
+static inline void __tlb_flush_mm(struct mm_struct * mm)
 {
+	/*
+	 * If the machine has IDTE we prefer to do a per mm flush
+	 * on all cpus instead of doing a local flush if the mm
+	 * only ran on the local cpu.
+	 */
 	preempt_disable();
 	atomic_add(0x10000, &mm->context.attach_count);
-	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
-		/* Local TLB flush */
-		__tlb_flush_local();
+	/* Reset TLB flush mask */
+	if (MACHINE_HAS_TLB_LC)
+		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+	barrier();
+	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
+		__tlb_flush_idte(mm->context.asce);
 	} else {
 		/* Global TLB flush */
 		__tlb_flush_global();
-		/* Reset TLB flush mask */
-		if (MACHINE_HAS_TLB_LC)
-			cpumask_copy(mm_cpumask(mm),
-				     &mm->context.cpu_attach_mask);
 	}
 	atomic_sub(0x10000, &mm->context.attach_count);
 	preempt_enable();
 }
 
-/*
- * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
- * when more than one asce (e.g. gmap) ran on this mm.
- */
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
-{
-	preempt_disable();
-	atomic_add(0x10000, &mm->context.attach_count);
-	if (MACHINE_HAS_IDTE)
-		__tlb_flush_idte(asce);
-	else
-		__tlb_flush_global();
-	/* Reset TLB flush mask */
-	if (MACHINE_HAS_TLB_LC)
-		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
-	atomic_sub(0x10000, &mm->context.attach_count);
-	preempt_enable();
-}
-
 static inline void __tlb_flush_kernel(void)
 {
 	if (MACHINE_HAS_IDTE)
@@ -97,7 +81,6 @@  static inline void __tlb_flush_kernel(void)
 }
 #else
 #define __tlb_flush_global()	__tlb_flush_local()
-#define __tlb_flush_full(mm)	__tlb_flush_local()
 
 /*
  * Flush TLB entries for a specific ASCE on all CPUs.
@@ -111,21 +94,14 @@  static inline void __tlb_flush_kernel(void)
 {
 	__tlb_flush_local();
 }
-#endif
 
 static inline void __tlb_flush_mm(struct mm_struct * mm)
 {
-	/*
-	 * If the machine has IDTE we prefer to do a per mm flush
-	 * on all cpus instead of doing a local flush if the mm
-	 * only ran on the local cpu.
-	 */
-	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
-		__tlb_flush_asce(mm, mm->context.asce);
-	else
-		__tlb_flush_full(mm);
+	__tlb_flush_local();
 }
 
+#endif
+
 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 {
 	if (mm->context.flush_mm) {