From patchwork Fri Dec 16 18:35:35 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mike Kravetz X-Patchwork-Id: 706523 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3tgJv15LD9z9t3K for ; Sat, 17 Dec 2016 05:37:01 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1759315AbcLPSg4 (ORCPT ); Fri, 16 Dec 2016 13:36:56 -0500 Received: from aserp1040.oracle.com ([141.146.126.69]:39051 "EHLO aserp1040.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758855AbcLPSgT (ORCPT ); Fri, 16 Dec 2016 13:36:19 -0500 Received: from aserv0021.oracle.com (aserv0021.oracle.com [141.146.126.233]) by aserp1040.oracle.com (Sentrion-MTA-4.3.2/Sentrion-MTA-4.3.2) with ESMTP id uBGIa5bw025253 (version=TLSv1 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Fri, 16 Dec 2016 18:36:05 GMT Received: from userv0122.oracle.com (userv0122.oracle.com [156.151.31.75]) by aserv0021.oracle.com (8.13.8/8.14.4) with ESMTP id uBGIa4hq022524 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Fri, 16 Dec 2016 18:36:05 GMT Received: from abhmp0002.oracle.com (abhmp0002.oracle.com [141.146.116.8]) by userv0122.oracle.com (8.14.4/8.14.4) with ESMTP id uBGIa4DX020219; Fri, 16 Dec 2016 18:36:04 GMT Received: from monkey.oracle.com (/50.188.161.229) by default (Oracle Beehive Gateway v4.0) with ESMTP ; Fri, 16 Dec 2016 10:36:04 -0800 From: Mike Kravetz To: sparclinux@vger.kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: "David S . Miller" , Bob Picco , Nitin Gupta , Vijay Kumar , Julian Calaby , Adam Buchbinder , "Kirill A . Shutemov" , Michal Hocko , Andrew Morton , Mike Kravetz Subject: [RFC PATCH 12/14] mm: add mmap and shmat arch hooks for shared context Date: Fri, 16 Dec 2016 10:35:35 -0800 Message-Id: <1481913337-9331-13-git-send-email-mike.kravetz@oracle.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1481913337-9331-1-git-send-email-mike.kravetz@oracle.com> References: <1481913337-9331-1-git-send-email-mike.kravetz@oracle.com> X-Source-IP: aserv0021.oracle.com [141.146.126.233] Sender: sparclinux-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: sparclinux@vger.kernel.org Shared context will require some additional checking and processing when mappings are created. To faciliate this, add new mmap hooks arch_pre_mmap_flags and arch_post_mmap to generic mm_hooks. For shmat, a new hook arch_shmat_check is added. Signed-off-by: Mike Kravetz --- arch/powerpc/include/asm/mmu_context.h | 12 ++++++++++++ arch/s390/include/asm/mmu_context.h | 12 ++++++++++++ arch/unicore32/include/asm/mmu_context.h | 12 ++++++++++++ arch/x86/include/asm/mmu_context.h | 12 ++++++++++++ include/asm-generic/mm_hooks.h | 18 +++++++++++++++--- ipc/shm.c | 13 +++++++++++++ mm/mmap.c | 10 ++++++++++ 7 files changed, 86 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 5c45114..d5ce33a 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -133,6 +133,18 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } +static inline unsigned long arch_pre_mmap_flags(struct file *file, + unsigned long flags, + vm_flags_t *vm_flags) +{ + return 0; /* no errors */ +} + +static inline void arch_post_mmap(struct mm_struct *mm, unsigned long addr, + vm_flags_t vm_flags) +{ +} + static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 515fea5..0a2322d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -129,6 +129,18 @@ static inline void activate_mm(struct mm_struct *prev, set_user_asce(next); } +static inline unsigned long arch_pre_mmap_flags(struct file *file, + unsigned long flags, + vm_flags_t *vm_flags) +{ + return 0; /* no errors */ +} + +static inline void arch_post_mmap(struct mm_struct *mm, unsigned long addr, + vm_flags_t vm_flags) +{ +} + static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 62dfc64..8b57b9d 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -81,6 +81,18 @@ do { \ } \ } while (0) +static inline unsigned long arch_pre_mmap_flags(struct file *file, + unsigned long flags, + vm_flags_t *vm_flags) +{ + return 0; /* no errors */ +} + +static inline void arch_post_mmap(struct mm_struct *mm, unsigned long addr, + vm_flags_t vm_flags) +{ +} + static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 8e0a9fe..fe60309 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -151,6 +151,18 @@ do { \ } while (0) #endif +static inline unsigned long arch_pre_mmap_flags(struct file *file, + unsigned long flags, + vm_flags_t *vm_flags) +{ + return 0; /* no errors */ +} + +static inline void arch_post_mmap(struct mm_struct *mm, unsigned long addr, + vm_flags_t vm_flags) +{ +} + static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index cc5d9a1..c742e52 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -1,11 +1,23 @@ /* - * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap - * and arch_unmap to be included in asm-FOO/mmu_context.h for any - * arch FOO which doesn't need to hook these. + * Define generic no-op hooks for mmap and protection related routines + * to be included in asm-FOO/mmu_context.h for any arch FOO which doesn't + * need to hook these. */ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H +static inline unsigned long arch_pre_mmap_flags(struct file *file, + unsigned long flags, + vm_flags_t *vm_flags) +{ + return 0; /* no errors */ +} + +static inline void arch_post_mmap(struct mm_struct *mm, unsigned long addr, + vm_flags_t vm_flags) +{ +} + static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { diff --git a/ipc/shm.c b/ipc/shm.c index dbac886..dab6cd1 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif +#ifndef arch_shmat_check +#define arch_shmat_check(file, shmflg, flags) (0) +#endif + +#ifndef arch_shmat_check +#define arch_shmat_check(file, shmflg, flags) (0) +#endif + void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; @@ -1149,6 +1157,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, goto out_unlock; } + /* arch specific check and possible flag modification */ + err = arch_shmat_check(shp->shm_file, shmflg, &flags); + if (err) + goto out_unlock; + err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; diff --git a/mm/mmap.c b/mm/mmap.c index 1af87c1..7fc946b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1307,6 +1307,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long pgoff, unsigned long *populate) { struct mm_struct *mm = current->mm; + unsigned long ret; int pkey = 0; *populate = 0; @@ -1314,6 +1315,11 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!len) return -EINVAL; + /* arch specific check and possible modification of vm_flags */ + ret = arch_pre_mmap_flags(file, flags, &vm_flags); + if (ret) + return ret; + /* * Does the application expect PROT_READ to imply PROT_EXEC? * @@ -1452,6 +1458,10 @@ unsigned long do_mmap(struct file *file, unsigned long addr, ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; + + if (!IS_ERR_VALUE(addr)) + arch_post_mmap(mm, addr, vm_flags); + return addr; }