Message ID | 6515aebdbd7adad576f1c97753809547b273504a.1487090656.git.ldufour@linux.vnet.ibm.com (mailing list archive) |
---|---|
State | Accepted |
Headers | show |
Laurent Dufour <ldufour@linux.vnet.ibm.com> writes: > Since the fault retry is now handled earlier, we can release the > mmap_sem lock earlier too and remove later unlocking previously done in > mm_fault_error(). > Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> > Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> > --- > arch/powerpc/mm/fault.c | 19 ++++--------------- > 1 file changed, 4 insertions(+), 15 deletions(-) > > diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c > index 2a6bc7e6e69a..21e06cce8984 100644 > --- a/arch/powerpc/mm/fault.c > +++ b/arch/powerpc/mm/fault.c > @@ -151,13 +151,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) > * continue the pagefault. > */ > if (fatal_signal_pending(current)) { > - /* > - * If we have retry set, the mmap semaphore will have > - * alrady been released in __lock_page_or_retry(). Else > - * we release it now. > - */ > - if (!(fault & VM_FAULT_RETRY)) > - up_read(¤t->mm->mmap_sem); > /* Coming from kernel, we need to deal with uaccess fixups */ > if (user_mode(regs)) > return MM_FAULT_RETURN; > @@ -170,8 +163,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) > > /* Out of memory */ > if (fault & VM_FAULT_OOM) { > - up_read(¤t->mm->mmap_sem); > - > /* > * We ran out of memory, or some other thing happened to us that > * made us unable to handle the page fault gracefully. > @@ -182,10 +173,8 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) > return MM_FAULT_RETURN; > } > > - if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { > - up_read(¤t->mm->mmap_sem); > + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) > return do_sigbus(regs, addr, fault); > - } > > /* We don't understand the fault code, this is fatal */ > BUG(); > @@ -452,11 +441,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, > goto retry; > } > /* We will enter mm_fault_error() below */ > - } > + } else > + up_read(¤t->mm->mmap_sem); > > if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { > if (fault & VM_FAULT_SIGSEGV) > - goto bad_area; > + goto bad_area_nosemaphore; > rc = mm_fault_error(regs, address, fault); > if (rc >= MM_FAULT_RETURN) > goto bail; > @@ -488,7 +478,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, > regs, address); > } > > - up_read(&mm->mmap_sem); > goto bail; > > bad_area: > -- > 2.7.4
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2a6bc7e6e69a..21e06cce8984 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -151,13 +151,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) * continue the pagefault. */ if (fatal_signal_pending(current)) { - /* - * If we have retry set, the mmap semaphore will have - * alrady been released in __lock_page_or_retry(). Else - * we release it now. - */ - if (!(fault & VM_FAULT_RETRY)) - up_read(¤t->mm->mmap_sem); /* Coming from kernel, we need to deal with uaccess fixups */ if (user_mode(regs)) return MM_FAULT_RETURN; @@ -170,8 +163,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) /* Out of memory */ if (fault & VM_FAULT_OOM) { - up_read(¤t->mm->mmap_sem); - /* * We ran out of memory, or some other thing happened to us that * made us unable to handle the page fault gracefully. @@ -182,10 +173,8 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) return MM_FAULT_RETURN; } - if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { - up_read(¤t->mm->mmap_sem); + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) return do_sigbus(regs, addr, fault); - } /* We don't understand the fault code, this is fatal */ BUG(); @@ -452,11 +441,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, goto retry; } /* We will enter mm_fault_error() below */ - } + } else + up_read(¤t->mm->mmap_sem); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (fault & VM_FAULT_SIGSEGV) - goto bad_area; + goto bad_area_nosemaphore; rc = mm_fault_error(regs, address, fault); if (rc >= MM_FAULT_RETURN) goto bail; @@ -488,7 +478,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, regs, address); } - up_read(&mm->mmap_sem); goto bail; bad_area:
Since the fault retry is now handled earlier, we can release the mmap_sem lock earlier too and remove later unlocking previously done in mm_fault_error(). Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> --- arch/powerpc/mm/fault.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-)