diff mbox

[2/3] powerpc/mm: handle VM_FAULT_RETRY earlier

Message ID 42011273e3edc4176dc8045b3956dfd218259143.1487090656.git.ldufour@linux.vnet.ibm.com (mailing list archive)
State Accepted
Headers show

Commit Message

Laurent Dufour Feb. 14, 2017, 4:45 p.m. UTC
In do_page_fault() if handle_mm_fault() returns VM_FAULT_RETRY, retry
the page fault handling before anything else.

This would simplify the handling of the mmap_sem lock in this part of
the code.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 arch/powerpc/mm/fault.c | 67 ++++++++++++++++++++++++++++---------------------
 1 file changed, 38 insertions(+), 29 deletions(-)

Comments

Aneesh Kumar K.V March 21, 2017, 9:12 a.m. UTC | #1
Laurent Dufour <ldufour@linux.vnet.ibm.com> writes:

> In do_page_fault() if handle_mm_fault() returns VM_FAULT_RETRY, retry
> the page fault handling before anything else.
>
> This would simplify the handling of the mmap_sem lock in this part of
> the code.
>
> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
> ---
>  arch/powerpc/mm/fault.c | 67 ++++++++++++++++++++++++++++---------------------
>  1 file changed, 38 insertions(+), 29 deletions(-)
>
> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
> index ee09604bbe12..2a6bc7e6e69a 100644
> --- a/arch/powerpc/mm/fault.c
> +++ b/arch/powerpc/mm/fault.c
> @@ -434,6 +434,26 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
>  	 * the fault.
>  	 */
>  	fault = handle_mm_fault(vma, address, flags);
> +
> +	/*
> +	 * Handle the retry right now, the mmap_sem has been released in that
> +	 * case.
> +	 */
> +	if (unlikely(fault & VM_FAULT_RETRY)) {
> +		/* We retry only once */
> +		if (flags & FAULT_FLAG_ALLOW_RETRY) {
> +			/*
> +			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
> +			 * of starvation.
> +			 */
> +			flags &= ~FAULT_FLAG_ALLOW_RETRY;
> +			flags |= FAULT_FLAG_TRIED;
> +			if (!fatal_signal_pending(current))
> +				goto retry;
> +		}
> +		/* We will enter mm_fault_error() below */
> +	}
> +
>  	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
>  		if (fault & VM_FAULT_SIGSEGV)
>  			goto bad_area;
> @@ -445,38 +465,27 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
>  	}

We could make it further simpler, by handling the FAULT_RETRY without
FLAG_ALLOW_RETRY set earlier. But i guess that can be done later ?

Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>


>
>  	/*
> -	 * Major/minor page fault accounting is only done on the
> -	 * initial attempt. If we go through a retry, it is extremely
> -	 * likely that the page will be found in page cache at that point.
> +	 * Major/minor page fault accounting.
>  	 */
> -	if (flags & FAULT_FLAG_ALLOW_RETRY) {
> -		if (fault & VM_FAULT_MAJOR) {
> -			current->maj_flt++;
> -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
> -				      regs, address);
> +	if (fault & VM_FAULT_MAJOR) {
> +		current->maj_flt++;
> +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
> +			      regs, address);
>  #ifdef CONFIG_PPC_SMLPAR
> -			if (firmware_has_feature(FW_FEATURE_CMO)) {
> -				u32 page_ins;
> -
> -				preempt_disable();
> -				page_ins = be32_to_cpu(get_lppaca()->page_ins);
> -				page_ins += 1 << PAGE_FACTOR;
> -				get_lppaca()->page_ins = cpu_to_be32(page_ins);
> -				preempt_enable();
> -			}
> -#endif /* CONFIG_PPC_SMLPAR */
> -		} else {
> -			current->min_flt++;
> -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
> -				      regs, address);
> -		}
> -		if (fault & VM_FAULT_RETRY) {
> -			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
> -			 * of starvation. */
> -			flags &= ~FAULT_FLAG_ALLOW_RETRY;
> -			flags |= FAULT_FLAG_TRIED;
> -			goto retry;
> +		if (firmware_has_feature(FW_FEATURE_CMO)) {
> +			u32 page_ins;
> +
> +			preempt_disable();
> +			page_ins = be32_to_cpu(get_lppaca()->page_ins);
> +			page_ins += 1 << PAGE_FACTOR;
> +			get_lppaca()->page_ins = cpu_to_be32(page_ins);
> +			preempt_enable();
>  		}
> +#endif /* CONFIG_PPC_SMLPAR */
> +	} else {
> +		current->min_flt++;
> +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
> +			      regs, address);
>  	}
>
>  	up_read(&mm->mmap_sem);
> -- 
> 2.7.4
Laurent Dufour March 21, 2017, 9:57 a.m. UTC | #2
On 21/03/2017 10:12, Aneesh Kumar K.V wrote:
> Laurent Dufour <ldufour@linux.vnet.ibm.com> writes:
> 
>> In do_page_fault() if handle_mm_fault() returns VM_FAULT_RETRY, retry
>> the page fault handling before anything else.
>>
>> This would simplify the handling of the mmap_sem lock in this part of
>> the code.
>>
>> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
>> ---
>>  arch/powerpc/mm/fault.c | 67 ++++++++++++++++++++++++++++---------------------
>>  1 file changed, 38 insertions(+), 29 deletions(-)
>>
>> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
>> index ee09604bbe12..2a6bc7e6e69a 100644
>> --- a/arch/powerpc/mm/fault.c
>> +++ b/arch/powerpc/mm/fault.c
>> @@ -434,6 +434,26 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
>>  	 * the fault.
>>  	 */
>>  	fault = handle_mm_fault(vma, address, flags);
>> +
>> +	/*
>> +	 * Handle the retry right now, the mmap_sem has been released in that
>> +	 * case.
>> +	 */
>> +	if (unlikely(fault & VM_FAULT_RETRY)) {
>> +		/* We retry only once */
>> +		if (flags & FAULT_FLAG_ALLOW_RETRY) {
>> +			/*
>> +			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
>> +			 * of starvation.
>> +			 */
>> +			flags &= ~FAULT_FLAG_ALLOW_RETRY;
>> +			flags |= FAULT_FLAG_TRIED;
>> +			if (!fatal_signal_pending(current))
>> +				goto retry;
>> +		}
>> +		/* We will enter mm_fault_error() below */
>> +	}
>> +
>>  	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
>>  		if (fault & VM_FAULT_SIGSEGV)
>>  			goto bad_area;
>> @@ -445,38 +465,27 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
>>  	}
> 
> We could make it further simpler, by handling the FAULT_RETRY without
> FLAG_ALLOW_RETRY set earlier. But i guess that can be done later ?


Thanks for the review,

I agree that double checking against VM_FAULT_RETRY is confusing here.

But handling all the retry path in the first if() statement means that
we'll have to handle part of the mm_fault_error() code and segv here...
Unless we can't identify what is really relevant in that retry path.

It would take time to review all that tricky part, but I agree it should
be simplified later.

> 
> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> 
> 
>>
>>  	/*
>> -	 * Major/minor page fault accounting is only done on the
>> -	 * initial attempt. If we go through a retry, it is extremely
>> -	 * likely that the page will be found in page cache at that point.
>> +	 * Major/minor page fault accounting.
>>  	 */
>> -	if (flags & FAULT_FLAG_ALLOW_RETRY) {
>> -		if (fault & VM_FAULT_MAJOR) {
>> -			current->maj_flt++;
>> -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
>> -				      regs, address);
>> +	if (fault & VM_FAULT_MAJOR) {
>> +		current->maj_flt++;
>> +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
>> +			      regs, address);
>>  #ifdef CONFIG_PPC_SMLPAR
>> -			if (firmware_has_feature(FW_FEATURE_CMO)) {
>> -				u32 page_ins;
>> -
>> -				preempt_disable();
>> -				page_ins = be32_to_cpu(get_lppaca()->page_ins);
>> -				page_ins += 1 << PAGE_FACTOR;
>> -				get_lppaca()->page_ins = cpu_to_be32(page_ins);
>> -				preempt_enable();
>> -			}
>> -#endif /* CONFIG_PPC_SMLPAR */
>> -		} else {
>> -			current->min_flt++;
>> -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
>> -				      regs, address);
>> -		}
>> -		if (fault & VM_FAULT_RETRY) {
>> -			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
>> -			 * of starvation. */
>> -			flags &= ~FAULT_FLAG_ALLOW_RETRY;
>> -			flags |= FAULT_FLAG_TRIED;
>> -			goto retry;
>> +		if (firmware_has_feature(FW_FEATURE_CMO)) {
>> +			u32 page_ins;
>> +
>> +			preempt_disable();
>> +			page_ins = be32_to_cpu(get_lppaca()->page_ins);
>> +			page_ins += 1 << PAGE_FACTOR;
>> +			get_lppaca()->page_ins = cpu_to_be32(page_ins);
>> +			preempt_enable();
>>  		}
>> +#endif /* CONFIG_PPC_SMLPAR */
>> +	} else {
>> +		current->min_flt++;
>> +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
>> +			      regs, address);
>>  	}
>>
>>  	up_read(&mm->mmap_sem);
>> -- 
>> 2.7.4
diff mbox

Patch

diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ee09604bbe12..2a6bc7e6e69a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -434,6 +434,26 @@  int do_page_fault(struct pt_regs *regs, unsigned long address,
 	 * the fault.
 	 */
 	fault = handle_mm_fault(vma, address, flags);
+
+	/*
+	 * Handle the retry right now, the mmap_sem has been released in that
+	 * case.
+	 */
+	if (unlikely(fault & VM_FAULT_RETRY)) {
+		/* We retry only once */
+		if (flags & FAULT_FLAG_ALLOW_RETRY) {
+			/*
+			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+			 * of starvation.
+			 */
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			flags |= FAULT_FLAG_TRIED;
+			if (!fatal_signal_pending(current))
+				goto retry;
+		}
+		/* We will enter mm_fault_error() below */
+	}
+
 	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 		if (fault & VM_FAULT_SIGSEGV)
 			goto bad_area;
@@ -445,38 +465,27 @@  int do_page_fault(struct pt_regs *regs, unsigned long address,
 	}
 
 	/*
-	 * Major/minor page fault accounting is only done on the
-	 * initial attempt. If we go through a retry, it is extremely
-	 * likely that the page will be found in page cache at that point.
+	 * Major/minor page fault accounting.
 	 */
-	if (flags & FAULT_FLAG_ALLOW_RETRY) {
-		if (fault & VM_FAULT_MAJOR) {
-			current->maj_flt++;
-			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-				      regs, address);
+	if (fault & VM_FAULT_MAJOR) {
+		current->maj_flt++;
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+			      regs, address);
 #ifdef CONFIG_PPC_SMLPAR
-			if (firmware_has_feature(FW_FEATURE_CMO)) {
-				u32 page_ins;
-
-				preempt_disable();
-				page_ins = be32_to_cpu(get_lppaca()->page_ins);
-				page_ins += 1 << PAGE_FACTOR;
-				get_lppaca()->page_ins = cpu_to_be32(page_ins);
-				preempt_enable();
-			}
-#endif /* CONFIG_PPC_SMLPAR */
-		} else {
-			current->min_flt++;
-			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-				      regs, address);
-		}
-		if (fault & VM_FAULT_RETRY) {
-			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
-			 * of starvation. */
-			flags &= ~FAULT_FLAG_ALLOW_RETRY;
-			flags |= FAULT_FLAG_TRIED;
-			goto retry;
+		if (firmware_has_feature(FW_FEATURE_CMO)) {
+			u32 page_ins;
+
+			preempt_disable();
+			page_ins = be32_to_cpu(get_lppaca()->page_ins);
+			page_ins += 1 << PAGE_FACTOR;
+			get_lppaca()->page_ins = cpu_to_be32(page_ins);
+			preempt_enable();
 		}
+#endif /* CONFIG_PPC_SMLPAR */
+	} else {
+		current->min_flt++;
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+			      regs, address);
 	}
 
 	up_read(&mm->mmap_sem);