@@ -131,6 +131,7 @@ extern int arm_const_double_inline_cost (rtx);
extern bool arm_const_double_by_parts (rtx);
extern bool arm_const_double_by_immediates (rtx);
extern void arm_emit_call_insn (rtx, rtx, bool);
+bool detect_cmse_nonsecure_call (tree);
extern const char *output_call (rtx *);
void arm_emit_movpair (rtx, rtx);
extern const char *output_mov_long_double_arm_from_arm (rtx *);
@@ -17450,6 +17450,129 @@ note_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address, int do_pushes)
return;
}
+/* Saves callee saved registers, clears callee saved registers and caller saved
+ registers not used to pass arguments before a cmse_nonsecure_call. And
+ restores the callee saved registers after. */
+
+static void
+cmse_nonsecure_call_clear_caller_saved (void)
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ rtx_insn *insn;
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ uint64_t to_clear_mask, float_mask;
+ rtx_insn *seq;
+ rtx pat, call, unspec, link, reg, cleared_reg, tmp;
+ unsigned int regno, maxregno;
+ rtx address;
+
+ if (!NONDEBUG_INSN_P (insn))
+ continue;
+
+ if (!CALL_P (insn))
+ continue;
+
+ pat = PATTERN (insn);
+ gcc_assert (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 0);
+ call = XVECEXP (pat, 0, 0);
+
+ /* Get the real call RTX if the insn sets a value, ie. returns. */
+ if (GET_CODE (call) == SET)
+ call = SET_SRC (call);
+
+ /* Check if it is a cmse_nonsecure_call. */
+ unspec = XEXP (call, 0);
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_NONSECURE_MEM)
+ continue;
+
+ /* Determine the caller-saved registers we need to clear. */
+ to_clear_mask = (1LL << (NUM_ARG_REGS)) - 1;
+ maxregno = NUM_ARG_REGS - 1;
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ {
+ float_mask = (1LL << (D7_VFP_REGNUM + 1)) - 1;
+ float_mask &= ~((1LL << FIRST_VFP_REGNUM) - 1);
+ to_clear_mask |= float_mask;
+ maxregno = D7_VFP_REGNUM;
+ }
+
+ /* Make sure the register used to hold the function address is not
+ cleared. */
+ address = RTVEC_ELT (XVEC (unspec, 0), 0);
+ gcc_assert (MEM_P (address));
+ gcc_assert (REG_P (XEXP (address, 0)));
+ to_clear_mask &= ~(1LL << REGNO (XEXP (address, 0)));
+
+ /* Set basic block of call insn so that df rescan is performed on
+ insns inserted here. */
+ set_block_for_insn (insn, bb);
+ df_set_flags (DF_DEFER_INSN_RESCAN);
+ start_sequence ();
+
+ /* Make sure the scheduler doesn't schedule other insns beyond here. */
+ emit_insn (gen_blockage ());
+
+ /* Get the list of registers used by the function call. */
+ link = CALL_INSN_FUNCTION_USAGE (insn);
+ for (; link != NULL; link = XEXP (link, 1))
+ {
+ pat = XEXP (link, 0);
+ if (GET_CODE (pat) != USE)
+ continue;
+
+ reg = XEXP (pat, 0);
+ to_clear_mask &= ~(1LL << REGNO (reg));
+ if (ARM_NUM_REGS (GET_MODE (reg)) > 1)
+ to_clear_mask &= ~(1LL << (REGNO (reg) + 1));
+ }
+
+ /* We use right shift and left shift to clear the LSB of the address
+ we jump to instead of using bic, to avoid having to use an extra
+ register on thumb1. */
+ cleared_reg = XEXP (address, 0);
+ tmp = gen_rtx_LSHIFTRT (SImode, cleared_reg, const1_rtx);
+ emit_insn (gen_rtx_SET (cleared_reg, tmp));
+ tmp = gen_rtx_ASHIFT (SImode, cleared_reg, const1_rtx);
+ emit_insn (gen_rtx_SET (cleared_reg, tmp));
+
+ /* Clearing all registers that leak before doing a non-secure
+ call. */
+ for (regno = 0; regno <= maxregno; regno++)
+ {
+ if (!(to_clear_mask & (1LL << regno)))
+ continue;
+
+ /* If regno is a vfp register, even and its successor is also to
+ be cleared, use vmov. */
+ if (IS_VFP_REGNUM (regno))
+ {
+ if (TARGET_VFP_DOUBLE
+ && VFP_REGNO_OK_FOR_DOUBLE (regno)
+ && to_clear_mask & (1LL << (regno + 1)))
+ emit_move_insn (gen_rtx_REG (DFmode, regno++),
+ CONST1_RTX (DFmode));
+ else
+ emit_move_insn (gen_rtx_REG (SFmode, regno),
+ CONST1_RTX (SFmode));
+ }
+ else
+ emit_move_insn (gen_rtx_REG (SImode, regno), cleared_reg);
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ }
+ }
+}
+
/* Rewrite move insn into subtract of 0 if the condition codes will
be useful in next conditional jump insn. */
@@ -17750,6 +17873,8 @@ arm_reorg (void)
HOST_WIDE_INT address = 0;
Mfix * fix;
+ if (use_cmse)
+ cmse_nonsecure_call_clear_caller_saved ();
if (TARGET_THUMB1)
thumb1_reorg ();
else if (TARGET_THUMB2)
@@ -18125,6 +18250,23 @@ vfp_emit_fstmd (int base_reg, int count)
return count * 8;
}
+/* Returns true if -mcmse has been passed and the function pointed to by 'addr'
+ has the cmse_nonsecure_call attribute and returns false otherwise. */
+
+bool
+detect_cmse_nonsecure_call (tree addr)
+{
+ if (!addr)
+ return FALSE;
+
+ tree fntype = TREE_TYPE (addr);
+ if (use_cmse && lookup_attribute ("cmse_nonsecure_call",
+ TYPE_ATTRIBUTES (fntype)))
+ return TRUE;
+ return FALSE;
+}
+
+
/* Emit a call instruction with pattern PAT. ADDR is the address of
the call target. */
@@ -7628,6 +7628,7 @@
"
{
rtx callee, pat;
+ tree addr = MEM_EXPR (operands[0]);
/* In an untyped call, we can get NULL for operand 2. */
if (operands[2] == NULL_RTX)
@@ -7642,8 +7643,17 @@
: !REG_P (callee))
XEXP (operands[0], 0) = force_reg (Pmode, callee);
- pat = gen_call_internal (operands[0], operands[1], operands[2]);
- arm_emit_call_insn (pat, XEXP (operands[0], 0), false);
+ if (detect_cmse_nonsecure_call (addr))
+ {
+ pat = gen_nonsecure_call_internal (operands[0], operands[1],
+ operands[2]);
+ emit_call_insn (pat);
+ }
+ else
+ {
+ pat = gen_call_internal (operands[0], operands[1], operands[2]);
+ arm_emit_call_insn (pat, XEXP (operands[0], 0), false);
+ }
DONE;
}"
)
@@ -7654,6 +7664,24 @@
(use (match_operand 2 "" ""))
(clobber (reg:SI LR_REGNUM))])])
+(define_expand "nonsecure_call_internal"
+ [(parallel [(call (unspec:SI [(match_operand 0 "memory_operand" "")]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI 4))])]
+ ""
+ "
+ {
+ rtx tmp;
+ tmp = copy_to_suggested_reg (XEXP (operands[0], 0),
+ gen_rtx_REG (SImode, 4),
+ SImode);
+
+ operands[0] = replace_equiv_address (operands[0], tmp);
+ }")
+
(define_insn "*call_reg_armv5"
[(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
(match_operand 1 "" ""))
@@ -7689,6 +7717,7 @@
"
{
rtx pat, callee;
+ tree addr = MEM_EXPR (operands[1]);
/* In an untyped call, we can get NULL for operand 2. */
if (operands[3] == 0)
@@ -7703,9 +7732,18 @@
: !REG_P (callee))
XEXP (operands[1], 0) = force_reg (Pmode, callee);
- pat = gen_call_value_internal (operands[0], operands[1],
- operands[2], operands[3]);
- arm_emit_call_insn (pat, XEXP (operands[1], 0), false);
+ if (detect_cmse_nonsecure_call (addr))
+ {
+ pat = gen_nonsecure_call_value_internal (operands[0], operands[1],
+ operands[2], operands[3]);
+ emit_call_insn (pat);
+ }
+ else
+ {
+ pat = gen_call_value_internal (operands[0], operands[1],
+ operands[2], operands[3]);
+ arm_emit_call_insn (pat, XEXP (operands[1], 0), false);
+ }
DONE;
}"
)
@@ -7717,6 +7755,25 @@
(use (match_operand 3 "" ""))
(clobber (reg:SI LR_REGNUM))])])
+(define_expand "nonsecure_call_value_internal"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (unspec:SI [(match_operand 1 "memory_operand" "")]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "general_operand" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI 4))])]
+ ""
+ "
+ {
+ rtx tmp;
+ tmp = copy_to_suggested_reg (XEXP (operands[1], 0),
+ gen_rtx_REG (SImode, 4),
+ SImode);
+
+ operands[1] = replace_equiv_address (operands[1], tmp);
+ }")
+
(define_insn "*call_value_reg_armv5"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
@@ -1705,6 +1705,19 @@
(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_reg_thumb1_v5"
+ [(call (unspec:SI [(mem:SI (match_operand:SI 0 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 0))]
+ "TARGET_THUMB1 && use_cmse && !SIBLING_CALL_P (insn)"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_reg_thumb1"
[(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
(match_operand 1 "" ""))
@@ -1737,6 +1750,21 @@
(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_value_reg_thumb1_v5"
+ [(set (match_operand 0 "" "")
+ (call (unspec:SI
+ [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 1))]
+ "TARGET_THUMB1 && use_cmse"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_value_reg_thumb1"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
@@ -581,6 +581,19 @@
[(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_reg_thumb2"
+ [(call (unspec:SI [(mem:SI (match_operand:SI 0 "s_register_operand" "r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 0))]
+ "TARGET_THUMB2 && use_cmse"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_value_reg_thumb2"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
@@ -592,6 +605,21 @@
[(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_value_reg_thumb2"
+ [(set (match_operand 0 "" "")
+ (call
+ (unspec:SI [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 1))]
+ "TARGET_THUMB2 && use_cmse"
+ "bl\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*thumb2_indirect_jump"
[(set (pc)
(match_operand:SI 0 "register_operand" "l*r"))]
@@ -84,6 +84,8 @@
UNSPEC_VRINTA ; Represent a float to integral float rounding
; towards nearest, ties away from zero.
UNSPEC_PROBE_STACK ; Probe stack memory reference
+ UNSPEC_NONSECURE_MEM ; Represent non-secure memory in ARMv8-M with
+ ; security extension
])
(define_c_enum "unspec" [
new file mode 100644
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (int);
+
+int
+foo (int a)
+{
+ return bar (bar (a + 1));
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "movs\tr0, r4" } } */
+/* { dg-final { scan-assembler "\n\tmovs\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "\n\tmovs\tr2, r4\n\tmovs\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Remember dont clear r0 and r1, because we are passing the double parameter
+ * for bar in them. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
@@ -3,11 +3,19 @@
/* { dg-skip-if "Testing exclusion of -mcmse" { arm-*-* } { "-mcmse" } { "" } } */
+void __attribute__ ((cmse_nonsecure_call)) (*bar) (int);
+typedef void __attribute__ ((cmse_nonsecure_call)) baz (int);
+
int __attribute__ ((cmse_nonsecure_entry))
-foo (int a)
+foo (int a, baz b)
{
+ bar (a);
+ b (a);
return a + 1;
}
+/* { dg-final { scan-assembler-not "bxns" } } */
+/* { dg-final { scan-assembler-not "blxns" } } */
+/* { dg-final { scan-assembler-not "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "foo:" } } */
/* { dg-final { scan-assembler-not "__acle_se_foo:" } } */
new file mode 100644
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (3.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (3.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler-not "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "\n\tmov\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "\n\tmov\tr2, r4\n\tmov\tr3, r4" } } */
+
+/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
new file mode 100644
@@ -0,0 +1,113 @@
+/* CMSE wrapper function used to save, clear and restore callee saved registers
+ for cmse_nonsecure_call's.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+.syntax unified
+.thumb
+.global __gnu_cmse_nonsecure_call
+__gnu_cmse_nonsecure_call:
+#if defined(__ARM_ARCH_8M_MAIN__)
+push {r5-r11,lr}
+mov r5, r4
+mov r6, r4
+mov r7, r4
+mov r8, r4
+mov r9, r4
+mov r10, r4
+mov r11, r4
+mov ip, r4
+#if !defined(__SOFTFP__)
+vpush.f64 {d8-d15}
+#if __ARM_FP & 0x04
+vmov.f32 s16, #1.00000
+vmov.f32 s17, #1.00000
+vmov.f32 s18, #1.00000
+vmov.f32 s19, #1.00000
+vmov.f32 s20, #1.00000
+vmov.f32 s21, #1.00000
+vmov.f32 s22, #1.00000
+vmov.f32 s23, #1.00000
+vmov.f32 s24, #1.00000
+vmov.f32 s25, #1.00000
+vmov.f32 s26, #1.00000
+vmov.f32 s27, #1.00000
+vmov.f32 s28, #1.00000
+vmov.f32 s29, #1.00000
+vmov.f32 s30, #1.00000
+vmov.f32 s31, #1.00000
+#elif __ARM_FP & 0x08
+vmov.f64 d8, #1.00000
+vmov.f64 d9, #1.00000
+vmov.f64 d10, #1.00000
+vmov.f64 d11, #1.00000
+vmov.f64 d12, #1.00000
+vmov.f64 d13, #1.00000
+vmov.f64 d14, #1.00000
+vmov.f64 d15, #1.00000
+#else
+#error "Half precision implementation not supported."
+#endif
+vmsr fpscr, r4
+#endif
+#if defined(__ARM_FEATURE_DSP)
+msr APSR_nzcvqg, r4
+#else
+msr APSR_nzcvq, r4
+#endif
+blxns r4
+#if !defined(__SOFTFP__)
+vpop.f64 {d8-d15}
+#endif
+pop {r5-r11}
+#elif defined (__ARM_ARCH_8M_BASE__)
+push {r5-r7}
+mov r5, r8
+mov r6, r9
+mov r7, r10
+push {r5-r7}
+mov r5, r11
+mov r6, lr
+push {r5,r6}
+mov r5, r4
+mov r6, r4
+mov r7, r4
+mov r8, r4
+mov r9, r4
+mov r10, r4
+mov r11, r4
+mov ip, r4
+msr APSR_nzcvq, r4
+blxns r4
+pop {r5, r6}
+mov lr, r6
+mov r11, r5
+pop {r5-r7}
+mov r10, r7
+mov r9, r6
+mov r8, r5
+pop {r5-r7}
+blx lr
+#else
+#error "This should only be used for armv8-m base- and mainline."
+#endif
@@ -12,4 +12,6 @@ libgcc-objects += cmse.o cmse_nonsecure_call.o
cmse.o: $(srcdir)/config/arm/cmse.c
$(gcc_compile) -c $(CMSE_OPTS) $<
+cmse_nonsecure_call.o: $(srcdir)/config/arm/cmse_nonsecure_call.S
+ $(gcc_compile) -c $<
endif