From patchwork Tue Mar 8 02:03:32 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeremy Kerr X-Patchwork-Id: 85879 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from chlorine.canonical.com (chlorine.canonical.com [91.189.94.204]) by ozlabs.org (Postfix) with ESMTP id 78EA9B6F10 for ; Tue, 8 Mar 2011 13:03:59 +1100 (EST) Received: from localhost ([127.0.0.1] helo=chlorine.canonical.com) by chlorine.canonical.com with esmtp (Exim 4.71) (envelope-from ) id 1PwmH3-0000M7-DA; Tue, 08 Mar 2011 02:03:45 +0000 Received: from ozlabs.org ([203.10.76.45]) by chlorine.canonical.com with esmtp (Exim 4.71) (envelope-from ) id 1PwmH1-0000M0-18 for kernel-team@lists.ubuntu.com; Tue, 08 Mar 2011 02:03:43 +0000 Received: by ozlabs.org (Postfix, from userid 1023) id A9699B7118; Tue, 8 Mar 2011 13:03:40 +1100 (EST) MIME-Version: 1.0 Subject: [PATCH lucid/fsl-imx51] ARM: 5746/1: Handle possible translation errors in ARMv6/v7 coherent_user_range Message-Id: <1299549812.50179.618988780375.1.gpush@pororo> To: kernel-team@lists.ubuntu.com From: Jeremy Kerr Date: Tue, 08 Mar 2011 10:03:32 +0800 Cc: 605042@bugs.launchpad.net X-BeenThere: kernel-team@lists.ubuntu.com X-Mailman-Version: 2.1.13 Precedence: list List-Id: Kernel team discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: kernel-team-bounces@lists.ubuntu.com Errors-To: kernel-team-bounces@lists.ubuntu.com From: Catalin Marinas BugLink: http://launchpad.net/bugs/605042 This is needed because applications using the sys_cacheflush system call can pass a memory range which isn't mapped yet even though the corresponding vma is valid. The patch also adds unwinding annotations for correct backtraces from the coherent_user_range() functions. Signed-off-by: Catalin Marinas Signed-off-by: Russell King cherry-picked from upstream commit 32cfb1b16f2b68d2296536811cadfffe26a06c1b Signed-off-by: Jeremy Kerr --- arch/arm/mm/cache-v6.S | 20 ++++++++++++++++++-- arch/arm/mm/cache-v7.S | 19 +++++++++++++++++-- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 8f5c13f..295e25d 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -12,6 +12,7 @@ #include #include #include +#include #include "proc-macros.S" @@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) - + UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D line +1: + USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE +2: cmp r0, r1 blo 1b #endif @@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range) mov pc, lr /* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) +ENDPROC(v6_coherent_user_range) +ENDPROC(v6_coherent_kern_range) + +/* * v6_flush_kern_dcache_page(kaddr) * * Ensure that the data held in the page kaddr is written back diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index be93ff0..3290dac 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -13,6 +13,7 @@ #include #include #include +#include #include "proc-macros.S" @@ -147,13 +148,16 @@ ENTRY(v7_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) + UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification +1: + USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification dsb - mcr p15, 0, r0, c7, c5, 1 @ invalidate I line + USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line add r0, r0, r2 +2: cmp r0, r1 blo 1b mov r0, #0 @@ -161,6 +165,17 @@ ENTRY(v7_coherent_user_range) dsb isb mov pc, lr + +/* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range)