@@ -162,14 +162,12 @@ flow_loop_dump (const class loop *loop, FILE *file,
void
flow_loops_dump (FILE *file, void (*loop_dump_aux) (const class loop *, FILE *, int), int verbose)
{
- class loop *loop;
-
if (!current_loops || ! file)
return;
fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
- FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
+ for (class loop *loop : loops_list (cfun, LI_INCLUDE_ROOT))
{
flow_loop_dump (loop, file, loop_dump_aux, verbose);
}
@@ -559,8 +557,7 @@ sort_sibling_loops (function *fn)
free (rc_order);
auto_vec<loop_p, 3> siblings;
- loop_p loop;
- FOR_EACH_LOOP_FN (fn, loop, LI_INCLUDE_ROOT)
+ for (class loop *loop : loops_list (fn, LI_INCLUDE_ROOT))
if (loop->inner && loop->inner->next)
{
loop_p sibling = loop->inner;
@@ -836,9 +833,7 @@ disambiguate_multiple_latches (class loop *loop)
void
disambiguate_loops_with_multiple_latches (void)
{
- class loop *loop;
-
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
if (!loop->latch)
disambiguate_multiple_latches (loop);
@@ -1457,7 +1452,7 @@ verify_loop_structure (void)
auto_sbitmap visited (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
unsigned n;
@@ -1503,7 +1498,7 @@ verify_loop_structure (void)
free (bbs);
/* Check headers and latches. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
i = loop->num;
if (loop->header == NULL)
@@ -1629,7 +1624,7 @@ verify_loop_structure (void)
}
/* Check the recorded loop exits. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
if (!loop->exits || loop->exits->e != NULL)
{
@@ -1723,7 +1718,7 @@ verify_loop_structure (void)
err = 1;
}
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
eloops = 0;
for (exit = loop->exits->next; exit->e; exit = exit->next)
@@ -658,62 +658,153 @@ enum li_flags
LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
};
-/* The iterator for loops. */
+/* Provide the functionality of std::as_const to support range-based for
+ to use const iterator. (We can't use std::as_const itself because it's
+ a C++17 feature.) */
+template <typename T>
+constexpr const T &
+as_const (T &t)
+{
+ return t;
+}
+
+/* A list for visiting loops, which contains the loop numbers instead of
+ the loop pointers. The scope is restricted in loop hierarchy tree
+ LOOPS or function FN and the visiting order is specified by FLAGS. */
-class loop_iterator
+class loops_list
{
public:
- loop_iterator (function *fn, loop_p *loop, unsigned flags);
+ loops_list (struct loops *loops, unsigned flags);
+
+ loops_list (function *fn, unsigned flags)
+ : loops_list (loops_for_fn (fn), flags)
+ {
+ }
+
+ template <typename T> class Iter
+ {
+ public:
+ Iter (const loops_list &l, unsigned idx) : list (l), curr_idx (idx)
+ {
+ fill_curr_loop ();
+ }
+
+ T operator* () const { return curr_loop; }
+
+ Iter &
+ operator++ ()
+ {
+ if (curr_idx < list.to_visit.length ())
+ {
+ /* Bump the index and fill a new one. */
+ curr_idx++;
+ fill_curr_loop ();
+ }
+ else
+ gcc_assert (!curr_loop);
+
+ return *this;
+ }
+
+ bool
+ operator!= (const Iter &rhs) const
+ {
+ return this->curr_idx != rhs.curr_idx;
+ }
+
+ private:
+ /* Fill the current loop starting from the current index. */
+ void fill_curr_loop ();
+
+ /* Reference to the loop list to visit. */
+ const loops_list &list;
+
+ /* The current index in the list to visit. */
+ unsigned curr_idx;
- inline loop_p next ();
+ /* The loop implied by the current index. */
+ loop_p curr_loop;
+ };
- /* The function we are visiting. */
- function *fn;
+ using iterator = Iter<loop_p>;
+ using const_iterator = Iter<const loop_p>;
+
+ iterator
+ begin ()
+ {
+ return iterator (*this, 0);
+ }
+
+ iterator
+ end ()
+ {
+ return iterator (*this, to_visit.length ());
+ }
+
+ const_iterator
+ begin () const
+ {
+ return const_iterator (*this, 0);
+ }
+
+ const_iterator
+ end () const
+ {
+ return const_iterator (*this, to_visit.length ());
+ }
+
+private:
+ /* The loop hierarchy tree we are visiting. */
+ struct loops *loops;
/* The list of loops to visit. */
auto_vec<int, 16> to_visit;
-
- /* The index of the actual loop. */
- unsigned idx;
};
-inline loop_p
-loop_iterator::next ()
+/* Starting from current index CURR_IDX (inclusive), find one index
+ which stands for one valid loop and fill the found loop as CURR_LOOP,
+ if we can't find one, set CURR_LOOP as null. */
+
+template <typename T>
+inline void
+loops_list::Iter<T>::fill_curr_loop ()
{
int anum;
- while (this->to_visit.iterate (this->idx, &anum))
+ while (this->list.to_visit.iterate (this->curr_idx, &anum))
{
- this->idx++;
- loop_p loop = get_loop (fn, anum);
+ loop_p loop = (*this->list.loops->larray)[anum];
if (loop)
- return loop;
+ {
+ curr_loop = loop;
+ return;
+ }
+ this->curr_idx++;
}
- return NULL;
+ curr_loop = nullptr;
}
-inline
-loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
+/* Set up the loops list to visit according to the specified
+ loop hierarchy tree LOOPS and iterating order FLAGS. */
+
+inline loops_list::loops_list (struct loops *loops, unsigned flags)
{
class loop *aloop;
unsigned i;
int mn;
- this->idx = 0;
- this->fn = fn;
- if (!loops_for_fn (fn))
- {
- *loop = NULL;
- return;
- }
+ this->loops = loops;
+ if (!loops)
+ return;
- this->to_visit.reserve_exact (number_of_loops (fn));
+ this->to_visit.reserve_exact (vec_safe_length (loops->larray));
mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
if (flags & LI_ONLY_INNERMOST)
{
- for (i = 0; vec_safe_iterate (loops_for_fn (fn)->larray, i, &aloop); i++)
+ for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
if (aloop != NULL
&& aloop->inner == NULL
&& aloop->num >= mn)
@@ -722,7 +813,7 @@ loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
else if (flags & LI_FROM_INNERMOST)
{
/* Push the loops to LI->TO_VISIT in postorder. */
- for (aloop = loops_for_fn (fn)->tree_root;
+ for (aloop = loops->tree_root;
aloop->inner != NULL;
aloop = aloop->inner)
continue;
@@ -748,7 +839,7 @@ loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
else
{
/* Push the loops to LI->TO_VISIT in preorder. */
- aloop = loops_for_fn (fn)->tree_root;
+ aloop = loops->tree_root;
while (1)
{
if (aloop->num >= mn)
@@ -766,20 +857,8 @@ loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
}
}
}
-
- *loop = this->next ();
}
-#define FOR_EACH_LOOP(LOOP, FLAGS) \
- for (loop_iterator li(cfun, &(LOOP), FLAGS); \
- (LOOP); \
- (LOOP) = li.next ())
-
-#define FOR_EACH_LOOP_FN(FN, LOOP, FLAGS) \
- for (loop_iterator li(FN, &(LOOP), FLAGS); \
- (LOOP); \
- (LOOP) = li.next ())
-
/* The properties of the target. */
struct target_cfgloop {
/* Number of available registers. */
@@ -1572,12 +1572,10 @@ create_preheader (class loop *loop, int flags)
void
create_preheaders (int flags)
{
- class loop *loop;
-
if (!current_loops)
return;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
create_preheader (loop, flags);
loops_state_set (LOOPS_HAVE_PREHEADERS);
}
@@ -1587,10 +1585,9 @@ create_preheaders (int flags)
void
force_single_succ_latches (void)
{
- class loop *loop;
edge e;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
if (loop->latch != loop->header && single_succ_p (loop->latch))
continue;
@@ -808,8 +808,6 @@ record_loads (tag_map_t &tag_map, struct loop *loop)
void
execute_tag_collision_avoidance ()
{
- struct loop *loop;
-
df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
df_chain_add_problem (DF_UD_CHAIN);
df_compute_regs_ever_live (true);
@@ -824,7 +822,7 @@ execute_tag_collision_avoidance ()
calculate_dominance_info (CDI_DOMINATORS);
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
tag_map_t tag_map (512);
@@ -3234,8 +3234,6 @@ mn10300_loop_contains_call_insn (loop_p loop)
static void
mn10300_scan_for_setlb_lcc (void)
{
- loop_p loop;
-
DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
df_analyze ();
@@ -3248,7 +3246,7 @@ mn10300_scan_for_setlb_lcc (void)
if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
be the case that its parent loop is suitable. Thus we should check all
loops, but work from the innermost outwards. */
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
const char * reason = NULL;
@@ -14479,15 +14479,13 @@ s390_adjust_loop_scan_osc (struct loop* loop)
static void
s390_adjust_loops ()
{
- struct loop *loop = NULL;
-
df_analyze ();
compute_bb_for_insn ();
/* Find the loops. */
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
if (dump_file)
{
@@ -79,14 +79,14 @@ and its subloops in the numbering. The index of a loop never changes.
The entries of the @code{larray} field should not be accessed directly.
The function @code{get_loop} returns the loop description for a loop with
-the given index. @code{number_of_loops} function returns number of
-loops in the function. To traverse all loops, use @code{FOR_EACH_LOOP}
-macro. The @code{flags} argument of the macro is used to determine
-the direction of traversal and the set of loops visited. Each loop is
-guaranteed to be visited exactly once, regardless of the changes to the
-loop tree, and the loops may be removed during the traversal. The newly
-created loops are never traversed, if they need to be visited, this
-must be done separately after their creation.
+the given index. @code{number_of_loops} function returns number of loops
+in the function. To traverse all loops, use range-based for loop with
+class @code{loop_list} instance. The @code{flags} argument of the macro
+is used to determine the direction of traversal and the set of loops
+visited. Each loop is guaranteed to be visited exactly once, regardless
+of the changes to the loop tree, and the loops may be removed during the
+traversal. The newly created loops are never traversed, if they need to
+be visited, this must be done separately after their creation.
Each basic block contains the reference to the innermost loop it belongs
to (@code{loop_father}). For this reason, it is only possible to have
@@ -2089,8 +2089,7 @@ pass_linterchange::execute (function *fun)
return 0;
bool changed_p = false;
- class loop *loop;
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
vec<loop_p> loop_nest = vNULL;
vec<data_reference_p> datarefs = vNULL;
@@ -486,13 +486,12 @@ adjust_unroll_factor (class loop *inner, struct data_dependence_relation *ddr,
static unsigned int
tree_loop_unroll_and_jam (void)
{
- class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
/* Go through all innermost loops. */
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
class loop *outer = loop_outer (loop);
@@ -1428,8 +1428,7 @@ loop_versioning::analyze_blocks ()
versioning at that level could be useful in some cases. */
get_loop_info (get_loop (m_fn, 0)).rejected_p = true;
- class loop *loop;
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
loop_info &linfo = get_loop_info (loop);
@@ -1650,8 +1649,7 @@ loop_versioning::make_versioning_decisions ()
AUTO_DUMP_SCOPE ("make_versioning_decisions",
dump_user_location_t::from_function_decl (m_fn->decl));
- class loop *loop;
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
loop_info &linfo = get_loop_info (loop);
if (decide_whether_loop_is_versionable (loop))
@@ -473,13 +473,12 @@ static bool
split_paths ()
{
bool changed = false;
- loop_p loop;
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
initialize_original_copy_tables ();
calculate_dominance_info (CDI_DOMINATORS);
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
/* Only split paths if we are optimizing this loop for speed. */
if (!optimize_loop_for_speed_p (loop))
@@ -1535,9 +1535,8 @@ graphite_regenerate_ast_isl (scop_p scop)
if_region->false_region->region.entry->flags |= EDGE_FALLTHRU;
/* remove_edge_and_dominated_blocks marks loops for removal but
doesn't actually remove them (fix that...). */
- loop_p loop;
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
- if (! loop->header)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
+ if (!loop->header)
delete_loop (loop);
}
@@ -377,8 +377,7 @@ canonicalize_loop_closed_ssa (loop_p loop, edge e)
static void
canonicalize_loop_form (void)
{
- loop_p loop;
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
edge e = single_exit (loop);
if (!e || (e->flags & (EDGE_COMPLEX|EDGE_FAKE)))
@@ -494,10 +493,9 @@ graphite_transform_loops (void)
if (dump_file && (dump_flags & TDF_DETAILS))
{
- loop_p loop;
int num_no_dependency = 0;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->can_be_parallel)
num_no_dependency++;
@@ -2923,7 +2923,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loops_dump (dump_file, NULL, 0);
scev_initialize ();
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
predicate loop_iterations = true;
sreal header_freq;
@@ -1087,9 +1087,8 @@ end:
}
else
{
- class loop *loop;
scev_initialize ();
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (!finite_loop_p (loop))
{
if (dump_file)
@@ -789,18 +789,14 @@ doloop_optimize (class loop *loop)
void
doloop_optimize_loops (void)
{
- class loop *loop;
-
if (optimize == 1)
{
df_live_add_problem ();
df_live_set_all_dirty ();
}
- FOR_EACH_LOOP (loop, 0)
- {
- doloop_optimize (loop);
- }
+ for (class loop *loop : loops_list (cfun, 0))
+ doloop_optimize (loop);
if (optimize == 1)
df_remove_problem (df_live);
@@ -137,7 +137,6 @@ loop_optimizer_init (unsigned flags)
void
loop_optimizer_finalize (struct function *fn, bool clean_loop_closed_phi)
{
- class loop *loop;
basic_block bb;
timevar_push (TV_LOOP_FINI);
@@ -167,7 +166,7 @@ loop_optimizer_finalize (struct function *fn, bool clean_loop_closed_phi)
goto loop_fini_done;
}
- FOR_EACH_LOOP_FN (fn, loop, 0)
+ for (class loop *loop : loops_list (fn, 0))
free_simple_loop_desc (loop);
/* Clean up. */
@@ -229,7 +228,7 @@ fix_loop_structure (bitmap changed_bbs)
loops, so that when we remove the loops, we know that the loops inside
are preserved, and do not waste time relinking loops that will be
removed later. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
/* Detect the case that the loop is no longer present even though
it wasn't marked for removal.
@@ -2136,7 +2136,7 @@ calculate_loop_reg_pressure (void)
rtx link;
class loop *loop, *parent;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->aux == NULL)
{
loop->aux = xcalloc (1, sizeof (class loop_data));
@@ -2203,7 +2203,7 @@ calculate_loop_reg_pressure (void)
bitmap_release (&curr_regs_live);
if (flag_ira_region == IRA_REGION_MIXED
|| flag_ira_region == IRA_REGION_ALL)
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
@@ -2217,7 +2217,7 @@ calculate_loop_reg_pressure (void)
}
if (dump_file == NULL)
return;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
parent = loop_outer (loop);
fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
@@ -2251,8 +2251,6 @@ calculate_loop_reg_pressure (void)
void
move_loop_invariants (void)
{
- class loop *loop;
-
if (optimize == 1)
df_live_add_problem ();
/* ??? This is a hack. We should only need to call df_live_set_all_dirty
@@ -2271,7 +2269,7 @@ move_loop_invariants (void)
}
df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
/* Process the loops, innermost first. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
curr_loop = loop;
/* move_single_loop_invariants for very large loops is time consuming
@@ -2284,10 +2282,8 @@ move_loop_invariants (void)
move_single_loop_invariants (loop);
}
- FOR_EACH_LOOP (loop, 0)
- {
+ for (class loop *loop : loops_list (cfun, 0))
free_loop_data (loop);
- }
if (flag_ira_loop_pressure)
/* There is no sense to keep this info because it was most
@@ -214,10 +214,8 @@ report_unroll (class loop *loop, dump_location_t locus)
static void
decide_unrolling (int flags)
{
- class loop *loop;
-
/* Scan the loops, inner ones first. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
loop->lpt_decision.decision = LPT_NONE;
dump_user_location_t locus = get_loop_location (loop);
@@ -278,14 +276,13 @@ decide_unrolling (int flags)
void
unroll_loops (int flags)
{
- class loop *loop;
bool changed = false;
/* Now decide rest of unrolling. */
decide_unrolling (flags);
/* Scan the loops, inner ones first. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
/* And perform the appropriate transformations. */
switch (loop->lpt_decision.decision)
@@ -1353,7 +1353,6 @@ sms_schedule (void)
int maxii, max_asap;
partial_schedule_ptr ps;
basic_block bb = NULL;
- class loop *loop;
basic_block condition_bb = NULL;
edge latch_edge;
HOST_WIDE_INT trip_count, max_trip_count;
@@ -1397,7 +1396,7 @@ sms_schedule (void)
/* Build DDGs for all the relevant loops and hold them in G_ARR
indexed by the loop index. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
rtx_insn *head, *tail;
rtx count_reg;
@@ -1543,7 +1542,7 @@ sms_schedule (void)
}
/* We don't want to perform SMS on new loops - created by versioning. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
rtx_insn *head, *tail;
rtx count_reg;
@@ -1949,7 +1949,7 @@ predict_loops (void)
/* Try to predict out blocks in a loop that are not part of a
natural loop. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
basic_block bb, *bbs;
unsigned j, n_exits = 0;
@@ -4111,8 +4111,7 @@ pass_profile::execute (function *fun)
profile_status_for_fn (fun) = PROFILE_GUESSED;
if (dump_file && (dump_flags & TDF_DETAILS))
{
- class loop *loop;
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
if (loop->header->count.initialized_p ())
fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
loop->num,
@@ -1466,13 +1466,12 @@ branch_prob (bool thunk)
if (flag_branch_probabilities
&& (profile_status_for_fn (cfun) == PROFILE_READ))
{
- class loop *loop;
if (dump_file && (dump_flags & TDF_DETAILS))
report_predictor_hitrates ();
/* At this moment we have precise loop iteration count estimates.
Record them to loop structure before the profile gets out of date. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->header->count > 0 && loop->header->count.reliable_p ())
{
gcov_type nit = expected_loop_iterations_unbounded (loop);
@@ -6247,10 +6247,8 @@ make_regions_from_the_rest (void)
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- class loop *loop;
-
/* Release aux fields so we don't free them later by mistake. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
loop->aux = NULL;
loop_optimizer_finalize ();
@@ -6271,11 +6269,11 @@ sel_find_rgns (void)
if (current_loops)
{
- loop_p loop;
+ unsigned flags = flag_sel_sched_pipelining_outer_loops
+ ? LI_FROM_INNERMOST
+ : LI_ONLY_INNERMOST;
- FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
- ? LI_FROM_INNERMOST
- : LI_ONLY_INNERMOST))
+ for (class loop *loop : loops_list (cfun, flags))
make_regions_from_loop_nest (loop);
}
@@ -312,12 +312,11 @@ replace_loop_annotate_in_block (basic_block bb, class loop *loop)
static void
replace_loop_annotate (void)
{
- class loop *loop;
basic_block bb;
gimple_stmt_iterator gsi;
gimple *stmt;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
/* First look into the header. */
replace_loop_annotate_in_block (loop->header, loop);
@@ -2027,12 +2026,8 @@ replace_uses_by (tree name, tree val)
/* Also update the trees stored in loop structures. */
if (current_loops)
{
- class loop *loop;
-
- FOR_EACH_LOOP (loop, 0)
- {
+ for (class loop *loop : loops_list (cfun, 0))
substitute_in_loop_info (loop, name, val);
- }
}
}
@@ -7752,9 +7747,9 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
/* Fix up orig_loop_num. If the block referenced in it has been moved
to dest_cfun, update orig_loop_num field, otherwise clear it. */
- class loop *dloop;
+ class loop *dloop = NULL;
signed char *moved_orig_loop_num = NULL;
- FOR_EACH_LOOP_FN (dest_cfun, dloop, 0)
+ for (class loop *dloop : loops_list (dest_cfun, 0))
if (dloop->orig_loop_num)
{
if (moved_orig_loop_num == NULL)
@@ -3300,14 +3300,13 @@ pass_if_conversion::gate (function *fun)
unsigned int
pass_if_conversion::execute (function *fun)
{
- class loop *loop;
unsigned todo = 0;
if (number_of_loops (fun) <= 1)
return 0;
auto_vec<gimple *> preds;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (flag_tree_loop_if_convert == 1
|| ((flag_tree_loop_vectorize || loop->force_vectorize)
&& !loop->dont_vectorize))
@@ -3312,7 +3312,7 @@ loop_distribution::execute (function *fun)
/* We can at the moment only distribute non-nested loops, thus restrict
walking to innermost loops. */
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
/* Don't distribute multiple exit edges loop, or cold loop when
not doing pattern detection. */
@@ -3989,7 +3989,6 @@ parallelize_loops (bool oacc_kernels_p)
{
unsigned n_threads;
bool changed = false;
- class loop *loop;
class loop *skip_loop = NULL;
class tree_niter_desc niter_desc;
struct obstack parloop_obstack;
@@ -4020,7 +4019,7 @@ parallelize_loops (bool oacc_kernels_p)
calculate_dominance_info (CDI_DOMINATORS);
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
if (loop == skip_loop)
{
@@ -3419,11 +3419,10 @@ pcom_worker::tree_predictive_commoning_loop (bool allow_unroll_p)
unsigned
tree_predictive_commoning (bool allow_unroll_p)
{
- class loop *loop;
unsigned ret = 0, changed = 0;
initialize_original_copy_tables ();
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
if (optimize_loop_for_speed_p (loop))
{
pcom_worker w(loop);
@@ -2977,16 +2977,12 @@ gather_stats_on_scev_database (void)
void
scev_initialize (void)
{
- class loop *loop;
-
gcc_assert (! scev_initialized_p ());
scalar_evolution_info = hash_table<scev_info_hasher>::create_ggc (100);
- FOR_EACH_LOOP (loop, 0)
- {
- loop->nb_iterations = NULL_TREE;
- }
+ for (class loop *loop : loops_list (cfun, 0))
+ loop->nb_iterations = NULL_TREE;
}
/* Return true if SCEV is initialized. */
@@ -3015,14 +3011,10 @@ scev_reset_htab (void)
void
scev_reset (void)
{
- class loop *loop;
-
scev_reset_htab ();
- FOR_EACH_LOOP (loop, 0)
- {
- loop->nb_iterations = NULL_TREE;
- }
+ for (class loop *loop : loops_list (cfun, 0))
+ loop->nb_iterations = NULL_TREE;
}
/* Return true if the IV calculation in TYPE can overflow based on the knowledge
@@ -417,7 +417,6 @@ find_obviously_necessary_stmts (bool aggressive)
/* Prevent the empty possibly infinite loops from being removed. */
if (aggressive)
{
- class loop *loop;
if (mark_irreducible_loops ())
FOR_EACH_BB_FN (bb, cfun)
{
@@ -433,7 +432,7 @@ find_obviously_necessary_stmts (bool aggressive)
}
}
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (!finite_loop_p (loop))
{
if (dump_file)
@@ -908,8 +908,7 @@ remove_unused_locals (void)
if (cfun->has_simduid_loops)
{
- class loop *loop;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->simduid && !is_used_p (loop->simduid))
loop->simduid = NULL_TREE;
}
@@ -348,7 +348,6 @@ protected:
unsigned int
ch_base::copy_headers (function *fun)
{
- class loop *loop;
basic_block header;
edge exit, entry;
basic_block *bbs, *copied_bbs;
@@ -365,7 +364,7 @@ ch_base::copy_headers (function *fun)
auto_vec<std::pair<edge, loop_p> > copied;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
int initial_limit = param_max_loop_header_insns;
int remaining_limit = initial_limit;
@@ -1662,7 +1662,7 @@ analyze_memory_references (bool store_motion)
{
gimple_stmt_iterator bsi;
basic_block bb, *bbs;
- class loop *loop, *outer;
+ class loop *outer;
unsigned i, n;
/* Collect all basic-blocks in loops and sort them after their
@@ -1706,7 +1706,7 @@ analyze_memory_references (bool store_motion)
/* Propagate the information about accessed memory references up
the loop hierarchy. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
/* Finalize the overall touched references (including subloops). */
bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
@@ -3133,7 +3133,6 @@ fill_always_executed_in (void)
static void
tree_ssa_lim_initialize (bool store_motion)
{
- class loop *loop;
unsigned i;
bitmap_obstack_initialize (&lim_bitmap_obstack);
@@ -3177,7 +3176,7 @@ tree_ssa_lim_initialize (bool store_motion)
its postorder index. */
i = 0;
bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
bb_loop_postorder[loop->num] = i++;
}
@@ -1285,14 +1285,13 @@ canonicalize_loop_induction_variables (class loop *loop,
unsigned int
canonicalize_induction_variables (void)
{
- class loop *loop;
bool changed = false;
bool irred_invalidated = false;
bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
estimate_numbers_of_iterations (cfun);
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
changed |= canonicalize_loop_induction_variables (loop,
true, UL_SINGLE_ITER,
@@ -8066,14 +8066,13 @@ finish:
void
tree_ssa_iv_optimize (void)
{
- class loop *loop;
struct ivopts_data data;
auto_bitmap toremove;
tree_ssa_iv_optimize_init (&data);
/* Optimize the loops starting with the innermost ones. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
if (!dbg_cnt (ivopts_loop))
continue;
@@ -362,11 +362,10 @@ add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
static void
get_loops_exits (bitmap *loop_exits)
{
- class loop *loop;
unsigned j;
edge e;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
auto_vec<edge> exit_edges = get_loop_exit_edges (loop);
loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
@@ -4559,13 +4559,11 @@ estimated_stmt_executions (class loop *loop, widest_int *nit)
void
estimate_numbers_of_iterations (function *fn)
{
- class loop *loop;
-
/* We don't want to issue signed overflow warnings while getting
loop iteration estimates. */
fold_defer_overflow_warnings ();
- FOR_EACH_LOOP_FN (fn, loop, 0)
+ for (class loop *loop : loops_list (fn, 0))
estimate_numbers_of_iterations (loop);
fold_undefer_and_ignore_overflow_warnings ();
@@ -5031,9 +5029,7 @@ free_numbers_of_iterations_estimates (class loop *loop)
void
free_numbers_of_iterations_estimates (function *fn)
{
- class loop *loop;
-
- FOR_EACH_LOOP_FN (fn, loop, 0)
+ for (class loop *loop : loops_list (fn, 0))
free_numbers_of_iterations_estimates (loop);
}
@@ -1980,7 +1980,6 @@ fail:
unsigned int
tree_ssa_prefetch_arrays (void)
{
- class loop *loop;
bool unrolled = false;
int todo_flags = 0;
@@ -2025,7 +2024,7 @@ tree_ssa_prefetch_arrays (void)
set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
}
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Processing loop %d:\n", loop->num);
@@ -1598,18 +1598,17 @@ split_loop_on_cond (struct loop *loop)
static unsigned int
tree_ssa_split_loops (void)
{
- class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
calculate_dominance_info (CDI_POST_DOMINATORS);
- FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
+ for (class loop *loop : loops_list (cfun, LI_INCLUDE_ROOT))
loop->aux = NULL;
/* Go through all loops starting from innermost. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
if (loop->aux)
{
@@ -1630,7 +1629,7 @@ tree_ssa_split_loops (void)
}
}
- FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
+ for (class loop *loop : loops_list (cfun, LI_INCLUDE_ROOT))
loop->aux = NULL;
clear_aux_for_blocks ();
@@ -90,11 +90,10 @@ static tree get_vop_from_header (class loop *);
unsigned int
tree_ssa_unswitch_loops (void)
{
- class loop *loop;
bool changed = false;
/* Go through all loops starting from innermost. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
if (!loop->inner)
/* Unswitch innermost loop. */
@@ -157,8 +157,7 @@ gate_oacc_kernels (function *fn)
if (!lookup_attribute ("oacc kernels", DECL_ATTRIBUTES (fn->decl)))
return false;
- class loop *loop;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->in_oacc_kernels_region)
return true;
@@ -455,12 +454,11 @@ public:
unsigned
pass_scev_cprop::execute (function *)
{
- class loop *loop;
bool any = false;
/* Perform final value replacement in loops, in case the replacement
expressions are cheap. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
any |= final_value_replacement_loop (loop);
return any ? TODO_cleanup_cfg | TODO_update_ssa_only_virtuals : 0;
@@ -1262,7 +1262,6 @@ clean_up_loop_closed_phi (function *fun)
tree rhs;
tree lhs;
gphi_iterator gsi;
- struct loop *loop;
/* Avoid possibly quadratic work when scanning for loop exits across
all loops of a nest. */
@@ -1274,7 +1273,7 @@ clean_up_loop_closed_phi (function *fun)
calculate_dominance_info (CDI_DOMINATORS);
/* Walk over loop in function. */
- FOR_EACH_LOOP_FN (fun, loop, 0)
+ for (class loop *loop : loops_list (fun, 0))
{
/* Check each exit edege of loop. */
auto_vec<edge> exits = get_loop_exit_edges (loop);
@@ -7637,9 +7637,8 @@ do_rpo_vn (function *fn, edge entry, bitmap exit_bbs,
loops and the outermost one optimistically. */
if (iterate)
{
- loop_p loop;
unsigned max_depth = param_rpo_vn_max_loop_depth;
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_ONLY_INNERMOST))
if (loop_depth (loop) > max_depth)
for (unsigned i = 2;
i < loop_depth (loop) - max_depth; ++i)
@@ -2561,7 +2561,6 @@ jump_thread_path_registry::thread_through_all_blocks
{
bool retval = false;
unsigned int i;
- class loop *loop;
auto_bitmap threaded_blocks;
hash_set<edge> visited_starting_edges;
@@ -2702,7 +2701,7 @@ jump_thread_path_registry::thread_through_all_blocks
/* Then perform the threading through loop headers. We start with the
innermost loop, so that the changes in cfg we perform won't affect
further threading. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
+ for (class loop *loop : loops_list (cfun, LI_FROM_INNERMOST))
{
if (!loop->header
|| !bitmap_bit_p (threaded_blocks, loop->header->index))
@@ -1194,7 +1194,7 @@ vectorize_loops (void)
/* If some loop was duplicated, it gets bigger number
than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
if (loop->dont_vectorize)
{
any_ifcvt_loops = true;
@@ -1213,7 +1213,7 @@ vectorize_loops (void)
loop4 (copy of loop2)
else
loop5 (copy of loop4)
- If FOR_EACH_LOOP gives us loop3 first (which has
+ If loops' iteration gives us loop3 first (which has
dont_vectorize set), make sure to process loop1 before loop4;
so that we can prevent vectorization of loop4 if loop1
is successfully vectorized. */
@@ -3337,8 +3337,7 @@ vrp_asserts::find_assert_locations (void)
/* Pre-seed loop latch liveness from loop header PHI nodes. Due to
the order we compute liveness and insert asserts we otherwise
fail to insert asserts into the loop latch. */
- loop_p loop;
- FOR_EACH_LOOP (loop, 0)
+ for (class loop *loop : loops_list (cfun, 0))
{
i = loop->latch->index;
unsigned int j = single_succ_edge (loop->latch)->dest_idx;