===================================================================
@@ -1114,6 +1114,8 @@ update_nonlocal_goto_save_area (void)
rtx
allocate_dynamic_stack_space (rtx size, rtx target, int known_align)
{
+ rtx final_label, final_target;
+
/* If we're asking for zero bytes, it doesn't matter what we point
to since we can't dereference it. But return a reasonable
address anyway. */
@@ -1226,6 +1228,60 @@ allocate_dynamic_stack_space (rtx size,
#endif
size = round_push (size);
+ /* Don't use a TARGET that isn't a pseudo or is the wrong mode. */
+ if (target == 0 || !REG_P (target)
+ || REGNO (target) < FIRST_PSEUDO_REGISTER
+ || GET_MODE (target) != Pmode)
+ target = gen_reg_rtx (Pmode);
+
+ mark_reg_pointer (target, known_align);
+
+ final_label = NULL_RTX;
+ final_target = NULL_RTX;
+
+ /* If we are splitting the stack, we need to ask the backend whether
+ there is enough room on the current stack. If there isn't, or if
+ the backend doesn't know how to tell is, then we need to call a
+ function to allocate memory in some other way. This memory will
+ be released when we release the current stack segment. The
+ effect is that stack allocation becomes less efficient, but at
+ least it doesn't cause a stack overflow. */
+ if (flag_split_stack)
+ {
+ rtx available_label, space, func;
+
+ available_label = NULL_RTX;
+
+#ifdef HAVE_split_stack_space_check
+ if (HAVE_split_stack_space_check)
+ {
+ available_label = gen_label_rtx ();
+
+ /* This instruction will branch to AVAILABLE_LABEL if there
+ are SIZE bytes available on the stack. */
+ emit_insn (gen_split_stack_space_check (size, available_label));
+ }
+#endif
+
+ func = init_one_libfunc ("__morestack_allocate_stack_space");
+
+ space = emit_library_call_value (func, target, LCT_NORMAL, Pmode,
+ 1, size, Pmode);
+
+ if (available_label == NULL_RTX)
+ return space;
+
+ final_target = gen_reg_rtx (Pmode);
+ mark_reg_pointer (final_target, known_align);
+
+ emit_move_insn (final_target, space);
+
+ final_label = gen_label_rtx ();
+ emit_jump (final_label);
+
+ emit_label (available_label);
+ }
+
do_pending_stack_adjust ();
/* We ought to be called always on the toplevel and stack ought to be aligned
@@ -1243,14 +1299,6 @@ allocate_dynamic_stack_space (rtx size,
else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
probe_stack_range (STACK_CHECK_PROTECT, size);
- /* Don't use a TARGET that isn't a pseudo or is the wrong mode. */
- if (target == 0 || !REG_P (target)
- || REGNO (target) < FIRST_PSEUDO_REGISTER
- || GET_MODE (target) != Pmode)
- target = gen_reg_rtx (Pmode);
-
- mark_reg_pointer (target, known_align);
-
/* Perform the required allocation from the stack. Some systems do
this differently than simply incrementing/decrementing from the
stack pointer, such as acquiring the space by calling malloc(). */
@@ -1336,6 +1384,15 @@ allocate_dynamic_stack_space (rtx size,
if (cfun->nonlocal_goto_save_area != 0)
update_nonlocal_goto_save_area ();
+ /* Finish up the split stack handling. */
+ if (final_label != NULL_RTX)
+ {
+ gcc_assert (flag_split_stack);
+ emit_move_insn (final_target, target);
+ emit_label (final_label);
+ target = final_target;
+ }
+
return target;
}
===================================================================
@@ -15111,6 +15111,31 @@
(set_attr "atom_unit" "jeu")
(set_attr "length_immediate" "2")
(set_attr "modrm" "0")])
+
+;; If there are operand 0 bytes available on the stack, jump to
+;; operand 1.
+
+(define_expand "split_stack_space_check"
+ [(set (pc) (if_then_else
+ (ltu (minus (reg SP_REG)
+ (match_operand 0 "register_operand" ""))
+ (unspec [(const_int 0)] UNSPEC_STACK_CHECK))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+{
+ rtx reg, size, limit;
+
+ reg = gen_reg_rtx (Pmode);
+ size = force_reg (Pmode, operands[0]);
+ emit_insn (gen_sub3_insn (reg, stack_pointer_rtx, size));
+ ix86_compare_op0 = reg;
+ limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_STACK_CHECK);
+ ix86_compare_op1 = gen_rtx_MEM (Pmode, gen_rtx_CONST (Pmode, limit));
+ ix86_expand_branch (GEU, operands[1]);
+ DONE;
+})
(define_expand "ffs_cmove"
[(set (match_dup 2) (const_int -1))
===================================================================
@@ -75,6 +75,10 @@ extern void
__morestack_unblock_signals (void)
__attribute__ ((no_split_stack, flatten, visibility ("hidden")));
+extern void *
+__morestack_allocate_stack_space (size_t)
+ __attribute__ ((visibility ("default")));
+
extern size_t
__generic_findstack (void *stack)
__attribute__ ((no_split_stack, flatten, visibility ("hidden")));
@@ -104,6 +108,25 @@ struct stack_segment
/* The stack address when this stack was created. This is used when
popping the stack. */
void *old_stack;
+ /* A list of memory blocks allocated by dynamic stack
+ allocation. */
+ struct dynamic_allocation_blocks *dynamic_allocation;
+ /* A list of dynamic memory blocks no longer needed. */
+ struct dynamic_allocation_blocks *free_dynamic_allocation;
+};
+
+/* A list of memory blocks allocated by dynamic stack allocation.
+ This is used for code that calls alloca or uses variably sized
+ arrays. */
+
+struct dynamic_allocation_blocks
+{
+ /* The next block in the list. */
+ struct dynamic_allocation_blocks *next;
+ /* The size of the allocated memory. */
+ size_t size;
+ /* The allocated memory. */
+ void *block;
};
/* The first stack segment allocated for this thread. */
@@ -293,6 +316,8 @@ allocate_segment (size_t frame_size)
pss->prev = __morestack_current_segment;
pss->next = NULL;
pss->size = allocate - overhead;
+ pss->dynamic_allocation = NULL;
+ pss->free_dynamic_allocation = NULL;
if (__morestack_current_segment != NULL)
__morestack_current_segment->next = pss;
@@ -302,13 +327,50 @@ allocate_segment (size_t frame_size)
return pss;
}
-/* Release stack segments. */
+/* Free a list of dynamic blocks. */
-void
-__morestack_release_segments (struct stack_segment **pp)
+static void
+free_dynamic_blocks (struct dynamic_allocation_blocks *p)
{
+ while (p != NULL)
+ {
+ struct dynamic_allocation_blocks *next;
+
+ next = p->next;
+ free (p->block);
+ free (p);
+ p = next;
+ }
+}
+
+/* Merge two lists of dynamic blocks. */
+
+static struct dynamic_allocation_blocks *
+merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
+ struct dynamic_allocation_blocks *b)
+{
+ struct dynamic_allocation_blocks **pp;
+
+ if (a == NULL)
+ return b;
+ if (b == NULL)
+ return a;
+ for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
+ ;
+ *pp = b;
+ return a;
+}
+
+/* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
+ any dynamic blocks. Otherwise we return them. */
+
+struct dynamic_allocation_blocks *
+__morestack_release_segments (struct stack_segment **pp, int free_dynamic)
+{
+ struct dynamic_allocation_blocks *ret;
struct stack_segment *pss;
+ ret = NULL;
pss = *pp;
while (pss != NULL)
{
@@ -317,6 +379,21 @@ __morestack_release_segments (struct sta
next = pss->next;
+ if (pss->dynamic_allocation != NULL
+ || pss->free_dynamic_allocation != NULL)
+ {
+ if (free_dynamic)
+ {
+ free_dynamic_blocks (pss->dynamic_allocation);
+ free_dynamic_blocks (pss->free_dynamic_allocation);
+ }
+ else
+ {
+ ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
+ ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
+ }
+ }
+
allocate = pss->size + sizeof (struct stack_segment);
if (munmap (pss, allocate) < 0)
{
@@ -327,6 +404,8 @@ __morestack_release_segments (struct sta
pss = next;
}
*pp = NULL;
+
+ return ret;
}
/* This function is called by a processor specific function to set the
@@ -391,6 +470,7 @@ __generic_morestack (size_t *pframe_size
size_t frame_size = *pframe_size;
struct stack_segment *current;
struct stack_segment **pp;
+ struct dynamic_allocation_blocks *dynamic;
char *from;
char *to;
void *ret;
@@ -400,7 +480,9 @@ __generic_morestack (size_t *pframe_size
pp = current != NULL ? ¤t->next : &__morestack_segments;
if (*pp != NULL && (*pp)->size < frame_size)
- __morestack_release_segments (pp);
+ dynamic = __morestack_release_segments (pp, 0);
+ else
+ dynamic = NULL;
current = *pp;
if (current == NULL)
@@ -410,6 +492,14 @@ __generic_morestack (size_t *pframe_size
__morestack_current_segment = current;
+ if (dynamic != NULL)
+ {
+ /* Move the free blocks onto our list. We don't want to call
+ free here, as we are short on stack space. */
+ current->free_dynamic_allocation =
+ merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
+ }
+
*pframe_size = current->size - param_size;
#ifdef STACK_GROWS_DOWNWARD
@@ -516,6 +606,70 @@ __morestack_unblock_signals (void)
sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
}
+/* This function is called to allocate dynamic stack space, for alloca
+ or a variably sized array. This is a regular function with
+ sufficient stack space, so we just use malloc to allocate the
+ space. We attach the allocated blocks to the current stack
+ segment, so that they will eventually be reused or freed. */
+
+void *
+__morestack_allocate_stack_space (size_t size)
+{
+ struct stack_segment *seg, *current;
+ struct dynamic_allocation_blocks *p;
+
+ /* We have to block signals to avoid getting confused if we get
+ interrupted by a signal whose handler itself uses alloca or a
+ variably sized array. */
+ __morestack_block_signals ();
+
+ /* Since we don't want to call free while we are low on stack space,
+ we may have a list of already allocated blocks waiting to be
+ freed. Release them all, unless we find one that is large
+ enough. We don't look at every block to see if one is large
+ enough, just the first one, because we aren't trying to build a
+ memory allocator here, we're just trying to speed up common
+ cases. */
+
+ current = __morestack_current_segment;
+ p = NULL;
+ for (seg = __morestack_segments; seg != NULL; seg = seg->next)
+ {
+ p = seg->free_dynamic_allocation;
+ if (p != NULL)
+ {
+ if (p->size >= size)
+ {
+ seg->free_dynamic_allocation = p->next;
+ break;
+ }
+
+ free_dynamic_blocks (p);
+ seg->free_dynamic_allocation = NULL;
+ p = NULL;
+ }
+ }
+
+ if (p == NULL)
+ {
+ /* We need to allocate additional memory. */
+ p = malloc (sizeof (*p));
+ if (p == NULL)
+ abort ();
+ p->size = size;
+ p->block = malloc (size);
+ if (p->block == NULL)
+ abort ();
+ }
+
+ p->next = current->dynamic_allocation;
+ current->dynamic_allocation = p;
+
+ __morestack_unblock_signals ();
+
+ return p->block;
+}
+
/* Find the stack segment for STACK and return the amount of space
available. This is used when unwinding the stack because of an
exception, in order to reset the stack guard correctly. */
===================================================================
@@ -43,7 +43,8 @@ extern void __morestack_fail (const char
/* Release stack segments. */
-extern void __morestack_release_segments (struct stack_segment **)
+extern struct dynamic_allocation_blocks *
+ __morestack_release_segments (struct stack_segment **, int)
__attribute__ ((visibility ("hidden")));
/* Store the stack information in a processor dependent manner. */
===================================================================
@@ -68,7 +68,7 @@ static pthread_once_t create_key_once =
static void
free_segments (void* arg)
{
- __morestack_release_segments ((struct stack_segment **) arg);
+ __morestack_release_segments ((struct stack_segment **) arg, 1);
}
/* Set up the key for the list of segments. This is called via