@@ -132,6 +132,20 @@ static void eliminate_redundant_computations (gimple_stmt_iterator *);
static void record_equivalences_from_stmt (gimple, int);
static edge single_incoming_edge_ignoring_loop_edges (basic_block);
+/* Free the edge_info data attached to E, if it exists. */
+
+static void
+free_edge_info (edge e)
+{
+ struct edge_info *edge_info = (struct edge_info *)e->aux;
+
+ if (edge_info)
+ {
+ edge_info->cond_equivalences.release ();
+ free (edge_info);
+ }
+}
+
/* Allocate an EDGE_INFO for edge E and attach it to E.
Return the new EDGE_INFO structure. */
@@ -140,6 +154,9 @@ allocate_edge_info (edge e)
{
struct edge_info *edge_info;
+ /* Free the old one, if it exists. */
+ free_edge_info (e);
+
edge_info = XCNEW (struct edge_info);
e->aux = edge_info;
@@ -163,14 +180,8 @@ free_all_edge_infos (void)
{
FOR_EACH_EDGE (e, ei, bb->preds)
{
- struct edge_info *edge_info = (struct edge_info *) e->aux;
-
- if (edge_info)
- {
- edge_info->cond_equivalences.release ();
- free (edge_info);
- e->aux = NULL;
- }
+ free_edge_info (e);
+ e->aux = NULL;
}
}
}
@@ -574,6 +585,16 @@ pass_dominator::execute (function *fun)
a single loop. */
mark_dfs_back_edges ();
+ /* We want to create the edge info structures before the dominator walk
+ so that they'll be in place for the jump threader, particularly when
+ threading through a join block.
+
+ The conditions will be lazily updated with global equivalences as
+ we reach them during the dominator walk. */
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, fun)
+ record_edge_info (bb);
+
/* Recursively walk the dominator tree optimizing statements. */
dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
@@ -873,7 +894,7 @@ dom_opt_dom_walker::thread_across_edge (edge e)
/* With all the edge equivalences in the tables, go ahead and attempt
to thread through E->dest. */
::thread_across_edge (m_dummy_cond, e, false,
- const_and_copies,
+ const_and_copies, avail_exprs_stack,
simplify_stmt_for_jump_threading);
/* And restore the various tables to their state before
@@ -1358,6 +1358,7 @@ thread_across_edge (gcond *dummy_cond,
edge e,
bool handle_dominating_asserts,
const_and_copies *const_and_copies,
+ avail_exprs_stack *avail_exprs_stack,
tree (*simplify) (gimple, gimple))
{
bitmap visited = BITMAP_ALLOC (NULL);
@@ -1442,6 +1443,8 @@ thread_across_edge (gcond *dummy_cond,
/* Push a fresh marker so we can unwind the equivalences created
for each of E->dest's successors. */
const_and_copies->push_marker ();
+ if (avail_exprs_stack)
+ avail_exprs_stack->push_marker ();
/* Avoid threading to any block we have already visited. */
bitmap_clear (visited);
@@ -1493,6 +1496,8 @@ thread_across_edge (gcond *dummy_cond,
}
/* And unwind the equivalence table. */
+ if (avail_exprs_stack)
+ avail_exprs_stack->pop_to_marker ();
const_and_copies->pop_to_marker ();
}
BITMAP_FREE (visited);
@@ -31,6 +31,8 @@ extern void threadedge_finalize_values (void);
extern bool potentially_threadable_block (basic_block);
extern void propagate_threaded_block_debug_into (basic_block, basic_block);
extern void thread_across_edge (gcond *, edge, bool,
- const_and_copies *, tree (*) (gimple, gimple));
+ const_and_copies *,
+ avail_exprs_stack *,
+ tree (*) (gimple, gimple));
#endif /* GCC_TREE_SSA_THREADEDGE_H */
@@ -10205,7 +10205,7 @@ identify_jump_threads (void)
if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
continue;
- thread_across_edge (dummy, e, true, equiv_stack,
+ thread_across_edge (dummy, e, true, equiv_stack, NULL,
simplify_stmt_for_jump_threading);
}
}