From patchwork Wed Mar 30 17:50:54 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Andre Vieira (lists)" X-Patchwork-Id: 603553 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 3qZwDn4X7xz9sBm for ; Thu, 31 Mar 2016 04:51:20 +1100 (AEDT) Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=gcc.gnu.org header.i=@gcc.gnu.org header.b=vBbzZSzv; dkim-atps=neutral DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:to :from:subject:message-id:date:mime-version:content-type; q=dns; s=default; b=ml8hYTRptz2GrwvqE6uFvexb6vOyf0cF748G+gYPnl6GMtj1ey u+42ppoyLnTH6rWGp9hAgdaJJwhkgQB8qYqIhAeDEadR05uhnWlG0jraVD564Igz /w5DmIPocUEtQEGpoyzF+Glri8QdLftoEnLxRtrzokeY+9YRj+V4tbMyQ= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:to :from:subject:message-id:date:mime-version:content-type; s= default; bh=YH//eJAsxOF99AxhLAV87DeEw28=; b=vBbzZSzvTufRFSGcKoWA a+vl50SPR9Glz4t/Jzk5SBhEDzFi94Iwyc3s7EP98lM0AyFISgtOSVKyM/lO+jWQ zuskzKTdQ0Fs5Gk5WFJ2NrWMSjSWZHrjJsHNQymvyJ8lTM3Itvg/UbeZMIZOtT3I c8z3lyYk9cDt9w75z2rAFrE= Received: (qmail 26621 invoked by alias); 30 Mar 2016 17:51:10 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 24577 invoked by uid 89); 30 Mar 2016 17:51:08 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.6 required=5.0 tests=BAYES_00, KAM_LOTSOFHASH, RP_MATCHES_RCVD, SPF_PASS autolearn=ham version=3.3.2 spammy=reg_rtx, sk:padding, negated, 83, 8 X-HELO: foss.arm.com Received: from foss.arm.com (HELO foss.arm.com) (217.140.101.70) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Wed, 30 Mar 2016 17:50:58 +0000 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id C536F3C for ; Wed, 30 Mar 2016 10:49:48 -0700 (PDT) Received: from [10.2.206.221] (e107157-lin.cambridge.arm.com [10.2.206.221]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 2FD6C3F25E for ; Wed, 30 Mar 2016 10:50:56 -0700 (PDT) To: GCC Patches From: "Andre Vieira (lists)" Subject: [PATCH 7/8, GCC, V8M][arm-embedded] ARMv8-M Security Extension's cmse_nonsecure_call: use __gnu_cmse_nonsecure_call Message-ID: <56FC11FE.1020202@arm.com> Date: Wed, 30 Mar 2016 18:50:54 +0100 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.2.0 MIME-Version: 1.0 X-IsSubscribed: yes Hi, Applied https://gcc.gnu.org/ml/gcc-patches/2016-01/msg02334.html on embedded-5-branch using the included patch at revision r234587. *** gcc/ *** 2016-03-30 Andre Vieira Thomas Preud'homme * config/arm/arm.c (detect_cmse_nonsecure_call): New. (cmse_nonsecure_call_clear_caller_saved): New. * config/arm/arm-protos.h (detect_cmse_nonsecure_call): New. * config/arm/arm.md (call): Handle cmse_nonsecure_entry. (call_value): Likewise. (nonsecure_call_internal): New. (nonsecure_call_value_internal): New. * config/arm/thumb1.md (*nonsecure_call_reg_thumb1_v5): New. (*nonsecure_call_value_reg_thumb1_v5): New. * config/arm/thumb2.md (*nonsecure_call_reg_thumb2): New. (*nonsecure_call_value_reg_thumb2): New. * config/arm/unspecs.md (UNSPEC_NONSECURE_MEM): New. *** libgcc/ *** 2016-03-30 Andre Vieira Thomas Preud'homme * config/arm/cmse_nonsecure_call.S: New. * config/arm/t-arm: Compile cmse_nonsecure_call.S *** gcc/testsuite/ *** 2016-03-30 Andre Vieira Thomas Preud'homme * gcc.target/arm/cmse/baseline/cmse-11.c: New. * gcc.target/arm/cmse/baseline/cmse-13.c: New. * gcc.target/arm/cmse/baseline/cmse-6.c: New. * gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c: New. * gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c: New. * gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c: New. * gcc.target/arm/cmse/mainline/hard/cmse-13.c: New. * gcc.target/arm/cmse/mainline/hard/cmse-7.c: New. * gcc.target/arm/cmse/mainline/hard/cmse-8.c: New. * gcc.target/arm/cmse/mainline/soft/cmse-13.c: New. * gcc.target/arm/cmse/mainline/soft/cmse-7.c: New. * gcc.target/arm/cmse/mainline/soft/cmse-8.c: New. * gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c: New. * gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c: New. * gcc.target/arm/cmse/mainline/softfp/cmse-13.c: New. * gcc.target/arm/cmse/mainline/softfp/cmse-7.c: New. * gcc.target/arm/cmse/mainline/softfp/cmse-8.c: New. diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h index 066e2318967e11f0eeba79ef80d990c149992426..27173fea25df0b56bef68656d5f0224c5b817fde 100644 --- a/gcc/config/arm/arm-protos.h +++ b/gcc/config/arm/arm-protos.h @@ -135,6 +135,7 @@ extern int arm_const_double_inline_cost (rtx); extern bool arm_const_double_by_parts (rtx); extern bool arm_const_double_by_immediates (rtx); extern void arm_emit_call_insn (rtx, rtx, bool); +bool detect_cmse_nonsecure_call (tree); extern const char *output_call (rtx *); extern const char *output_call_mem (rtx *); void arm_emit_movpair (rtx, rtx); diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index d13bc2d49508863cf5b45a5f447a70fb468a115c..ec303e871f60485d06e35308b98154c1089bf330 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -17415,6 +17415,129 @@ note_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address, int do_pushes) return; } +/* Saves callee saved registers, clears callee saved registers and caller saved + registers not used to pass arguments before a cmse_nonsecure_call. And + restores the callee saved registers after. */ + +static void +cmse_nonsecure_call_clear_caller_saved (void) +{ + basic_block bb; + + FOR_EACH_BB_FN (bb, cfun) + { + rtx_insn *insn; + + FOR_BB_INSNS (bb, insn) + { + uint64_t to_clear_mask, float_mask; + rtx_insn *seq; + rtx pat, call, unspec, link, reg, cleared_reg, tmp; + unsigned int regno, maxregno; + rtx address; + + if (!NONDEBUG_INSN_P (insn)) + continue; + + if (!CALL_P (insn)) + continue; + + pat = PATTERN (insn); + gcc_assert (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 0); + call = XVECEXP (pat, 0, 0); + + /* Get the real call RTX if the insn sets a value, ie. returns. */ + if (GET_CODE (call) == SET) + call = SET_SRC (call); + + /* Check if it is a cmse_nonsecure_call. */ + unspec = XEXP (call, 0); + if (GET_CODE (unspec) != UNSPEC + || XINT (unspec, 1) != UNSPEC_NONSECURE_MEM) + continue; + + /* Determine the caller-saved registers we need to clear. */ + to_clear_mask = (1LL << (NUM_ARG_REGS)) - 1; + maxregno = NUM_ARG_REGS - 1; + if (TARGET_HARD_FLOAT && TARGET_VFP) + { + float_mask = (1LL << (D7_VFP_REGNUM + 1)) - 1; + float_mask &= ~((1LL << FIRST_VFP_REGNUM) - 1); + to_clear_mask |= float_mask; + maxregno = D7_VFP_REGNUM; + } + + /* Make sure the register used to hold the function address is not + cleared. */ + address = RTVEC_ELT (XVEC (unspec, 0), 0); + gcc_assert (MEM_P (address)); + gcc_assert (REG_P (XEXP (address, 0))); + to_clear_mask &= ~(1LL << REGNO (XEXP (address, 0))); + + /* Set basic block of call insn so that df rescan is performed on + insns inserted here. */ + set_block_for_insn (insn, bb); + df_set_flags (DF_DEFER_INSN_RESCAN); + start_sequence (); + + /* Make sure the scheduler doesn't schedule other insns beyond here. */ + emit_insn (gen_blockage ()); + + /* Get the list of registers used by the function call. */ + link = CALL_INSN_FUNCTION_USAGE (insn); + for (; link != NULL; link = XEXP (link, 1)) + { + pat = XEXP (link, 0); + if (GET_CODE (pat) != USE) + continue; + + reg = XEXP (pat, 0); + to_clear_mask &= ~(1LL << REGNO (reg)); + if (ARM_NUM_REGS (GET_MODE (reg)) > 1) + to_clear_mask &= ~(1LL << (REGNO (reg) + 1)); + } + + /* We use right shift and left shift to clear the LSB of the address + we jump to instead of using bic, to avoid having to use an extra + register on thumb1. */ + cleared_reg = XEXP (address, 0); + tmp = gen_rtx_LSHIFTRT (SImode, cleared_reg, const1_rtx); + emit_insn (gen_rtx_SET (VOIDmode, cleared_reg, tmp)); + tmp = gen_rtx_ASHIFT (SImode, cleared_reg, const1_rtx); + emit_insn (gen_rtx_SET (VOIDmode, cleared_reg, tmp)); + + /* Clearing all registers that leak before doing a non-secure + call. */ + for (regno = 0; regno <= maxregno; regno++) + { + if (!(to_clear_mask & (1LL << regno))) + continue; + + /* If regno is a vfp register, even and its successor is also to + be cleared, use vmov. */ + if (IS_VFP_REGNUM (regno)) + { + if (TARGET_VFP_DOUBLE + && VFP_REGNO_OK_FOR_DOUBLE (regno) + && to_clear_mask & (1LL << (regno + 1))) + emit_move_insn (gen_rtx_REG (DFmode, regno++), + CONST1_RTX (DFmode)); + else + emit_move_insn (gen_rtx_REG (SFmode, regno), + CONST1_RTX (SFmode)); + } + else + emit_move_insn (gen_rtx_REG (SImode, regno), cleared_reg); + } + + seq = get_insns (); + end_sequence (); + emit_insn_before (seq, insn); + + } + } +} + /* Rewrite move insn into subtract of 0 if the condition codes will be useful in next conditional jump insn. */ @@ -17708,6 +17831,8 @@ arm_reorg (void) HOST_WIDE_INT address = 0; Mfix * fix; + if (use_cmse) + cmse_nonsecure_call_clear_caller_saved (); if (TARGET_THUMB1) thumb1_reorg (); else if (TARGET_THUMB2) @@ -18086,6 +18211,23 @@ vfp_emit_fstmd (int base_reg, int count) return count * 8; } +/* Returns true if -mcmse has been passed and the function pointed to by 'addr' + has the cmse_nonsecure_call attribute and returns false otherwise. */ + +bool +detect_cmse_nonsecure_call (tree addr) +{ + if (!addr) + return FALSE; + + tree fntype = TREE_TYPE (addr); + if (use_cmse && lookup_attribute ("cmse_nonsecure_call", + TYPE_ATTRIBUTES (fntype))) + return TRUE; + return FALSE; +} + + /* Emit a call instruction with pattern PAT. ADDR is the address of the call target. */ @@ -25237,7 +25379,8 @@ cmse_nonsecure_entry_clear_before_return (void) /* Also fill the top half of the negated padding_bits_to_clear. */ if (((~padding_bits_to_clear) >> 16) > 0) - emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (SImode, reg_rtx, + emit_insn (gen_rtx_SET (VOIDmode, + gen_rtx_ZERO_EXTRACT (SImode, reg_rtx, GEN_INT (16), GEN_INT (16)), GEN_INT ((~padding_bits_to_clear) >> 16))); diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 0c06c22c504630a284df45e1d501fbd427edfdf7..e774b735976d6bee723ed9da38e96de7ba5c4603 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -7595,6 +7595,7 @@ " { rtx callee, pat; + tree addr = MEM_EXPR (operands[0]); /* In an untyped call, we can get NULL for operand 2. */ if (operands[2] == NULL_RTX) @@ -7609,8 +7610,17 @@ : !REG_P (callee)) XEXP (operands[0], 0) = force_reg (Pmode, callee); - pat = gen_call_internal (operands[0], operands[1], operands[2]); - arm_emit_call_insn (pat, XEXP (operands[0], 0), false); + if (detect_cmse_nonsecure_call (addr)) + { + pat = gen_nonsecure_call_internal (operands[0], operands[1], + operands[2]); + emit_call_insn (pat); + } + else + { + pat = gen_call_internal (operands[0], operands[1], operands[2]); + arm_emit_call_insn (pat, XEXP (operands[0], 0), false); + } DONE; }" ) @@ -7621,6 +7631,24 @@ (use (match_operand 2 "" "")) (clobber (reg:SI LR_REGNUM))])]) +(define_expand "nonsecure_call_internal" + [(parallel [(call (unspec:SI [(match_operand 0 "memory_operand" "")] + UNSPEC_NONSECURE_MEM) + (match_operand 1 "general_operand" "")) + (use (match_operand 2 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (reg:SI 4))])] + "" + " + { + rtx tmp; + tmp = copy_to_suggested_reg (XEXP (operands[0], 0), + gen_rtx_REG (SImode, 4), + SImode); + + operands[0] = replace_equiv_address (operands[0], tmp); + }") + (define_insn "*call_reg_armv5" [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r")) (match_operand 1 "" "")) @@ -7673,6 +7701,7 @@ " { rtx pat, callee; + tree addr = MEM_EXPR (operands[1]); /* In an untyped call, we can get NULL for operand 2. */ if (operands[3] == 0) @@ -7687,9 +7716,18 @@ : !REG_P (callee)) XEXP (operands[1], 0) = force_reg (Pmode, callee); - pat = gen_call_value_internal (operands[0], operands[1], - operands[2], operands[3]); - arm_emit_call_insn (pat, XEXP (operands[1], 0), false); + if (detect_cmse_nonsecure_call (addr)) + { + pat = gen_nonsecure_call_value_internal (operands[0], operands[1], + operands[2], operands[3]); + emit_call_insn (pat); + } + else + { + pat = gen_call_value_internal (operands[0], operands[1], + operands[2], operands[3]); + arm_emit_call_insn (pat, XEXP (operands[1], 0), false); + } DONE; }" ) @@ -7701,6 +7739,25 @@ (use (match_operand 3 "" "")) (clobber (reg:SI LR_REGNUM))])]) +(define_expand "nonsecure_call_value_internal" + [(parallel [(set (match_operand 0 "" "") + (call (unspec:SI [(match_operand 1 "memory_operand" "")] + UNSPEC_NONSECURE_MEM) + (match_operand 2 "general_operand" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (reg:SI 4))])] + "" + " + { + rtx tmp; + tmp = copy_to_suggested_reg (XEXP (operands[1], 0), + gen_rtx_REG (SImode, 4), + SImode); + + operands[1] = replace_equiv_address (operands[1], tmp); + }") + (define_insn "*call_value_reg_armv5" [(set (match_operand 0 "" "") (call (mem:SI (match_operand:SI 1 "s_register_operand" "r")) diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md index 5be15d7297650fe9c81a0fe31f94b65be7aeba14..707cc9d20d560296af0873b4a9ef3ae3613ce906 100644 --- a/gcc/config/arm/thumb1.md +++ b/gcc/config/arm/thumb1.md @@ -1705,6 +1705,19 @@ (set_attr "type" "call")] ) +(define_insn "*nonsecure_call_reg_thumb1_v5" + [(call (unspec:SI [(mem:SI (match_operand:SI 0 "register_operand" "l*r"))] + UNSPEC_NONSECURE_MEM) + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (match_dup 0))] + "TARGET_THUMB1 && use_cmse && !SIBLING_CALL_P (insn)" + "bl\\t__gnu_cmse_nonsecure_call" + [(set_attr "length" "4") + (set_attr "type" "call")] +) + (define_insn "*call_reg_thumb1" [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r")) (match_operand 1 "" "")) @@ -1737,6 +1750,21 @@ (set_attr "type" "call")] ) +(define_insn "*nonsecure_call_value_reg_thumb1_v5" + [(set (match_operand 0 "" "") + (call (unspec:SI + [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))] + UNSPEC_NONSECURE_MEM) + (match_operand 2 "" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (match_dup 1))] + "TARGET_THUMB1 && use_cmse" + "bl\\t__gnu_cmse_nonsecure_call" + [(set_attr "length" "4") + (set_attr "type" "call")] +) + (define_insn "*call_value_reg_thumb1" [(set (match_operand 0 "" "") (call (mem:SI (match_operand:SI 1 "register_operand" "l*r")) diff --git a/gcc/config/arm/thumb2.md b/gcc/config/arm/thumb2.md index 245227e0213fa2af57fae6706c4178470b8a37cb..4d48d7dec58e1394267ea05e701a60909e76ebed 100644 --- a/gcc/config/arm/thumb2.md +++ b/gcc/config/arm/thumb2.md @@ -571,6 +571,19 @@ [(set_attr "type" "call")] ) +(define_insn "*nonsecure_call_reg_thumb2" + [(call (unspec:SI [(mem:SI (match_operand:SI 0 "s_register_operand" "r"))] + UNSPEC_NONSECURE_MEM) + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (match_dup 0))] + "TARGET_THUMB2 && use_cmse" + "bl\\t__gnu_cmse_nonsecure_call" + [(set_attr "length" "4") + (set_attr "type" "call")] +) + (define_insn "*call_value_reg_thumb2" [(set (match_operand 0 "" "") (call (mem:SI (match_operand:SI 1 "register_operand" "l*r")) @@ -582,6 +595,21 @@ [(set_attr "type" "call")] ) +(define_insn "*nonsecure_call_value_reg_thumb2" + [(set (match_operand 0 "" "") + (call + (unspec:SI [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))] + UNSPEC_NONSECURE_MEM) + (match_operand 2 "" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:SI LR_REGNUM)) + (clobber (match_dup 1))] + "TARGET_THUMB2 && use_cmse" + "bl\t__gnu_cmse_nonsecure_call" + [(set_attr "length" "4") + (set_attr "type" "call")] +) + (define_insn "*thumb2_indirect_jump" [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))] diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md index 0ec2c48abea746df7c8006a82b4e9c43ae3ebd86..830857815b9f4800744c6ec7280de7ccbe8c8d89 100644 --- a/gcc/config/arm/unspecs.md +++ b/gcc/config/arm/unspecs.md @@ -83,6 +83,8 @@ ; FPSCR rounding mode and signal inexactness. UNSPEC_VRINTA ; Represent a float to integral float rounding ; towards nearest, ties away from zero. + UNSPEC_NONSECURE_MEM ; Represent non-secure memory in ARMv8-M with + ; security extension ]) (define_c_enum "unspec" [ diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c new file mode 100644 index 0000000000000000000000000000000000000000..3007409ad88b9d2312ece4d55f3695787a1aa566 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_base_ok } */ +/* { dg-add-options arm_arch_v8m_base } */ +/* { dg-options "-mcmse" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (int); + +int +foo (int a) +{ + return bar (bar (a + 1)); +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "movs\tr1, r4" } } */ +/* { dg-final { scan-assembler "movs\tr2, r4" } } */ +/* { dg-final { scan-assembler "movs\tr3, r4" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c new file mode 100644 index 0000000000000000000000000000000000000000..f2b931be5912b421f8427535ded3298883bec639 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c @@ -0,0 +1,25 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_base_ok } */ +/* { dg-add-options arm_arch_v8m_base } */ +/* { dg-options "-mcmse" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double); + +int +foo (int a) +{ + return bar (1.0f, 2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "movs\tr0, r4" } } */ +/* { dg-final { scan-assembler "\n\tmovs\tr1, r4" } } */ +/* { dg-final { scan-assembler-not "\n\tmovs\tr2, r4\n\tmovs\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov" } } */ +/* { dg-final { scan-assembler-not "vmsr" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c new file mode 100644 index 0000000000000000000000000000000000000000..95da045690ae170a101949134482e821f27fe3aa --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c @@ -0,0 +1,21 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_base_ok } */ +/* { dg-add-options arm_arch_v8m_base } */ +/* { dg-options "-mcmse" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Remember dont clear r0 and r1, because we are passing the double parameter + * for bar in them. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "movs\tr2, r4" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c index 2846ce6cc203f1810977656bdc09ff86419b7007..1dd82b5cee735025054e517d1bbd9c6ab1ad462a 100644 --- a/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c +++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c @@ -3,11 +3,19 @@ /* { dg-skip-if "Testing exclusion of -mcmse" { arm-*-* } { "-mcmse" } { "" } } */ +void __attribute__ ((cmse_nonsecure_call)) (*bar) (int); +typedef void __attribute__ ((cmse_nonsecure_call)) baz (int); + int __attribute__ ((cmse_nonsecure_entry)) -foo (int a) +foo (int a, baz b) { + bar (a); + b (a); return a + 1; } +/* { dg-final { scan-assembler-not "bxns" } } */ +/* { dg-final { scan-assembler-not "blxns" } } */ +/* { dg-final { scan-assembler-not "bl\t__gnu_cmse_nonsecure_call" } } */ /* { dg-final { scan-assembler "foo:" } } */ /* { dg-final { scan-assembler-not "__acle_se_foo:" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c new file mode 100644 index 0000000000000000000000000000000000000000..285fa854b3d61c4d856e9be863ee73a87f1b11a9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c @@ -0,0 +1,43 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */ + + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double); + +int +foo (int a) +{ + return bar (3.0f, 2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts2, #1\.0" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c new file mode 100644 index 0000000000000000000000000000000000000000..8f56aaf2a97e740a77473cab245614b6fa696c04 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c @@ -0,0 +1,42 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (void); + +int +foo (int a) +{ + return bar () + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c new file mode 100644 index 0000000000000000000000000000000000000000..749f8a7278cb4ea9fd4462887ce7f37b57f83248 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c new file mode 100644 index 0000000000000000000000000000000000000000..4ad5068aac263f1c2edb17b8707fc9d1c0af6959 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c @@ -0,0 +1,35 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */ + + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double); + +int +foo (int a) +{ + return bar (3.0f, 2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler-not "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c new file mode 100644 index 0000000000000000000000000000000000000000..b18259eaef6da1933ef2d58d63b97d4bb4e450f4 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c @@ -0,0 +1,34 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (void); + +int +foo (int a) +{ + return bar () + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c new file mode 100644 index 0000000000000000000000000000000000000000..182ddf9540374f30ae3694259f1edb62161455e1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c @@ -0,0 +1,33 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c new file mode 100644 index 0000000000000000000000000000000000000000..dbbd262c89085eb0dc8737ad7955c3a12531492b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c @@ -0,0 +1,27 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */ +/* { dg-options "-mcmse -mfloat-abi=soft" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double); + +int +foo (int a) +{ + return bar (1.0f, 2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov" } } */ +/* { dg-final { scan-assembler-not "vmsr" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c new file mode 100644 index 0000000000000000000000000000000000000000..e33568400ef830abaa019b5c2727a9f3b5d5704b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c @@ -0,0 +1,27 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */ +/* { dg-options "-mcmse -mfloat-abi=soft" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (void); + +int +foo (int a) +{ + return bar () + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov" } } */ +/* { dg-final { scan-assembler-not "vmsr" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c new file mode 100644 index 0000000000000000000000000000000000000000..024a12e0a414730eb1cbebfca8574d548238ab28 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */ +/* { dg-options "-mcmse -mfloat-abi=soft" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler-not "vmov" } } */ +/* { dg-final { scan-assembler-not "vmsr" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c new file mode 100644 index 0000000000000000000000000000000000000000..0cf1a68267ee1b3b0207e149e17b3d3fbcc57e40 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c @@ -0,0 +1,42 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */ +/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (void); + +int +foo (int a) +{ + return bar () + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c new file mode 100644 index 0000000000000000000000000000000000000000..e9be53a42f1d62929327a531dda9544bd4c2df62 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */ +/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c new file mode 100644 index 0000000000000000000000000000000000000000..515fd22b9da2216d3f27ed4b0535dc3ab88e0eea --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c @@ -0,0 +1,34 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double); + +int +foo (int a) +{ + return bar (1.0f, 2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "\n\tmov\tr1, r4" } } */ +/* { dg-final { scan-assembler-not "\n\tmov\tr2, r4\n\tmov\tr3, r4" } } */ + +/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c new file mode 100644 index 0000000000000000000000000000000000000000..fbe3f6d4fd10700269995cde4aab0f105165c56d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c @@ -0,0 +1,34 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (void); + +int +foo (int a) +{ + return bar () + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ + diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c new file mode 100644 index 0000000000000000000000000000000000000000..4ae1b2f90f6164b4252029fbdac2e9f820d0741c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c @@ -0,0 +1,33 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arch_v8m_main_ok } */ +/* { dg-add-options arm_arch_v8m_main } */ +/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */ +/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */ +/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */ + +int __attribute__ ((cmse_nonsecure_call)) (*bar) (double); + +int +foo (int a) +{ + return bar (2.0) + a + 1; +} + +/* Checks for saving and clearing prior to function call. */ +/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */ +/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */ +/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */ +/* { dg-final { scan-assembler "mov\tr2, r4" } } */ +/* { dg-final { scan-assembler "mov\tr3, r4" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */ +/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */ + +/* Now we check that we use the correct intrinsic to call. */ +/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */ diff --git a/libgcc/config/arm/cmse_nonsecure_call.S b/libgcc/config/arm/cmse_nonsecure_call.S new file mode 100644 index 0000000000000000000000000000000000000000..a14b583486b2462cd91f42ab46eb80d1f04caf8a --- /dev/null +++ b/libgcc/config/arm/cmse_nonsecure_call.S @@ -0,0 +1,113 @@ +/* CMSE wrapper function used to save, clear and restore callee saved registers + for cmse_nonsecure_call's. + + Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This file is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +.syntax unified +.thumb +.global __gnu_cmse_nonsecure_call +__gnu_cmse_nonsecure_call: +#if defined(__ARM_ARCH_8M_MAIN__) +push {r5-r11,lr} +mov r5, r4 +mov r6, r4 +mov r7, r4 +mov r8, r4 +mov r9, r4 +mov r10, r4 +mov r11, r4 +mov ip, r4 +#if !defined(__SOFTFP__) +vpush.f64 {d8-d15} +#if __ARM_FP & 0x04 +vmov.f32 s16, #1.00000 +vmov.f32 s17, #1.00000 +vmov.f32 s18, #1.00000 +vmov.f32 s19, #1.00000 +vmov.f32 s20, #1.00000 +vmov.f32 s21, #1.00000 +vmov.f32 s22, #1.00000 +vmov.f32 s23, #1.00000 +vmov.f32 s24, #1.00000 +vmov.f32 s25, #1.00000 +vmov.f32 s26, #1.00000 +vmov.f32 s27, #1.00000 +vmov.f32 s28, #1.00000 +vmov.f32 s29, #1.00000 +vmov.f32 s30, #1.00000 +vmov.f32 s31, #1.00000 +#elif __ARM_FP & 0x08 +vmov.f64 d8, #1.00000 +vmov.f64 d9, #1.00000 +vmov.f64 d10, #1.00000 +vmov.f64 d11, #1.00000 +vmov.f64 d12, #1.00000 +vmov.f64 d13, #1.00000 +vmov.f64 d14, #1.00000 +vmov.f64 d15, #1.00000 +#else +#error "Half precision implementation not supported." +#endif +vmsr fpscr, r4 +#endif +#if defined(__ARM_FEATURE_DSP) +msr APSR_nzcvqg, r4 +#else +msr APSR_nzcvq, r4 +#endif +blxns r4 +#if !defined(__SOFTFP__) +vpop.f64 {d8-d15} +#endif +pop {r5-r11} +#elif defined (__ARM_ARCH_8M_BASE__) +push {r5-r7} +mov r5, r8 +mov r6, r9 +mov r7, r10 +push {r5-r7} +mov r5, r11 +mov r6, lr +push {r5,r6} +mov r5, r4 +mov r6, r4 +mov r7, r4 +mov r8, r4 +mov r9, r4 +mov r10, r4 +mov r11, r4 +mov ip, r4 +msr APSR_nzcvq, r4 +blxns r4 +pop {r5, r6} +mov lr, r6 +mov r11, r5 +pop {r5-r7} +mov r10, r7 +mov r9, r6 +mov r8, r5 +pop {r5-r7} +blx lr +#else +#error "This should only be used for armv8-m base- and mainline." +#endif diff --git a/libgcc/config/arm/t-arm b/libgcc/config/arm/t-arm index 5618143bfd0f02b170db3f9e4c0a15cecb403cec..9e85ac06b146feaa14ab83374a0156737b870310 100644 --- a/libgcc/config/arm/t-arm +++ b/libgcc/config/arm/t-arm @@ -12,4 +12,6 @@ libgcc-objects += cmse.o cmse_nonsecure_call.o cmse.o: $(srcdir)/config/arm/cmse.c $(gcc_compile) -c $(CMSE_OPTS) $< +cmse_nonsecure_call.o: $(srcdir)/config/arm/cmse_nonsecure_call.S + $(gcc_compile) -c $< endif