@@ -1337,6 +1337,10 @@ ftree-loop-distribution
Common Report Var(flag_tree_loop_distribution) Optimization
Enable loop distribution on trees
+ftree-loop-distribute-memset-zero
+Common Report Var(flag_tree_loop_distribute_memset_zero) Optimization
+Enable loop distribution of initialization loops using memset zero
+
ftree-loop-im
Common Report Var(flag_tree_loop_im) Init(1) Optimization
Enable loop invariant motion on trees
@@ -384,7 +384,7 @@ Objective-C and Objective-C++ Dialects}.
-ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol
-ftree-copyrename -ftree-dce -ftree-dominator-opts -ftree-dse @gol
-ftree-forwprop -ftree-fre -ftree-loop-if-convert -ftree-loop-im @gol
--ftree-phiprop -ftree-loop-distribution @gol
+-ftree-phiprop -ftree-loop-distribution -ftree-loop-distribute-memset-zero @gol
-ftree-loop-ivcanon -ftree-loop-linear -ftree-loop-optimize @gol
-ftree-parallelize-loops=@var{n} -ftree-pre -ftree-pta -ftree-reassoc @gol
-ftree-sink -ftree-sra -ftree-switch-conversion @gol
@@ -6925,6 +6925,26 @@ DO I = 1, N
ENDDO
@end smallexample
+@item -ftree-loop-distribute-memset-zero
+Perform loop distribution of initialization loops and code generate
+them with a call to memset zero. For example, the loop
+@smallexample
+DO I = 1, N
+ A(I) = 0
+ B(I) = A(I) + I
+ENDDO
+@end smallexample
+is transformed to
+@smallexample
+DO I = 1, N
+ A(I) = 0
+ENDDO
+DO I = 1, N
+ B(I) = A(I) + I
+ENDDO
+@end smallexample
+and the initialization loop is transformed into a call to memset zero.
+
@item -ftree-loop-im
@opindex ftree-loop-im
Perform loop invariant motion on trees. This pass moves only invariants that
@@ -5038,6 +5038,32 @@ stores_from_loop (struct loop *loop, VEC (gimple, heap) **stmts)
free (bbs);
}
+/* Initialize STMTS with all the statements of LOOP that contain a
+ store to memory of the form "A[i] = 0". */
+
+void
+stores_zero_from_loop (struct loop *loop, VEC (gimple, heap) **stmts)
+{
+ unsigned int i;
+ basic_block bb;
+ gimple_stmt_iterator si;
+ gimple stmt;
+ tree op;
+ basic_block *bbs = get_loop_body_in_dom_order (loop);
+
+ for (i = 0; i < loop->num_nodes; i++)
+ for (bb = bbs[i], si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
+ if ((stmt = gsi_stmt (si))
+ && gimple_vdef (stmt)
+ && is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) == INTEGER_CST
+ && (op = gimple_assign_rhs1 (stmt))
+ && (integer_zerop (op) || real_zerop (op)))
+ VEC_safe_push (gimple, heap, *stmts, gsi_stmt (si));
+
+ free (bbs);
+}
+
/* For a data reference REF, return the declaration of its base
address or NULL_TREE if the base is not determined. */
@@ -564,6 +564,7 @@ index_in_loop_nest (int var, VEC (loop_p, heap) *loop_nest)
}
void stores_from_loop (struct loop *, VEC (gimple, heap) **);
+void stores_zero_from_loop (struct loop *, VEC (gimple, heap) **);
void remove_similar_memory_refs (VEC (gimple, heap) **);
bool rdg_defs_used_in_other_loops_p (struct graph *, int);
bool have_similar_memory_accesses (gimple, gimple);
@@ -1184,18 +1184,36 @@ tree_loop_distribution (void)
{
VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
- /* With the following working list, we're asking distribute_loop
- to separate the stores of the loop: when dependences allow,
- it will end on having one store per loop. */
- stores_from_loop (loop, &work_list);
-
- /* A simple heuristic for cache locality is to not split stores
- to the same array. Without this call, an unrolled loop would
- be split into as many loops as unroll factor, each loop
- storing in the same array. */
- remove_similar_memory_refs (&work_list);
-
- nb_generated_loops = distribute_loop (loop, work_list);
+ /* If both flag_tree_loop_distribute_memset_zero and
+ flag_tree_loop_distribution are set, then only memset_zero is
+ executed. */
+ if (flag_tree_loop_distribute_memset_zero)
+ {
+ /* With the following working list, we're asking
+ distribute_loop to separate from the rest of the loop the
+ stores of the form "A[i] = 0". */
+ stores_zero_from_loop (loop, &work_list);
+
+ /* If there is nothing to be distributed */
+ if (VEC_length (gimple, work_list) > 0)
+ nb_generated_loops = distribute_loop (loop, work_list);
+ }
+ else if (flag_tree_loop_distribution)
+ {
+ /* With the following working list, we're asking
+ distribute_loop to separate the stores of the loop: when
+ dependences allow, it will end on having one store per
+ loop. */
+ stores_from_loop (loop, &work_list);
+
+ /* A simple heuristic for cache locality is to not split
+ stores to the same array. Without this call, an unrolled
+ loop would be split into as many loops as unroll factor,
+ each loop storing in the same array. */
+ remove_similar_memory_refs (&work_list);
+
+ nb_generated_loops = distribute_loop (loop, work_list);
+ }
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -1217,7 +1235,8 @@ tree_loop_distribution (void)
static bool
gate_tree_loop_distribution (void)
{
- return flag_tree_loop_distribution != 0;
+ return flag_tree_loop_distribution
+ || flag_tree_loop_distribute_memset_zero;
}
struct gimple_opt_pass pass_loop_distribution =