From 8e8d034ce3ea72e7e8ba9e6821f7ad83c7d216d3 Mon Sep 17 00:00:00 2001
From: Feng Xue <fxue@os.amperecomputing.com>
Date: Mon, 1 Jul 2024 15:39:05 +0800
Subject: [PATCH 1/4] vect: Shorten name of macro SLP_TREE_NUMBER_OF_VEC_STMTS
Will add a new field tightly coupled with "vec_stmts_size", if following naming
conversion as original, the new macro would be very long. So better to choose
samely meaningful but shorter names, the patch makes change for this macro,
the other new patch would handle the new field and macro accordingly as this.
2024-07-01 Feng Xue <fxue@os.amperecomputing.com>
gcc/
* tree-vectorizer.h (SLP_TREE_NUMBER_OF_VEC_STMTS): Change the macro
to SLP_TREE_VEC_STMTS_NUM.
* tree-vect-stmts.cc (vect_model_simple_cost): Likewise.
(check_load_store_for_partial_vectors): Likewise.
(vectorizable_bswap): Likewise.
(vectorizable_call): Likewise.
(vectorizable_conversion): Likewise.
(vectorizable_shift): Likewise. And replace direct field reference
to "vec_stmts_size" with the new macro.
(vectorizable_operation): Likewise.
(vectorizable_store): Likewise.
(vectorizable_load): Likewise.
(vectorizable_condition): Likewise.
* tree-vect-loop.cc (vect_reduction_update_partial_vector_usage):
Likewise.
(vectorizable_reduction): Likewise.
(vect_transform_reduction): Likewise.
(vectorizable_phi): Likewise.
(vectorizable_recurr): Likewise.
(vectorizable_induction): Likewise.
(vectorizable_live_operation): Likewise.
* tree-vect-slp.cc (_slp_tree::_slp_tree): Likewise.
(vect_slp_analyze_node_operations_1): Likewise.
(vect_prologue_cost_for_slp): Likewise.
(vect_slp_analyze_node_operations): Likewise.
(vect_create_constant_vectors): Likewise.
(vect_get_slp_vect_def): Likewise.
(vect_transform_slp_perm_load_1): Likewise.
(vectorizable_slp_permutation_1): Likewise.
(vect_schedule_slp_node): Likewise.
(vectorize_slp_instance_root_stmt): Likewise.
---
gcc/tree-vect-loop.cc | 17 +++++++-------
gcc/tree-vect-slp.cc | 34 +++++++++++++--------------
gcc/tree-vect-stmts.cc | 52 ++++++++++++++++++++----------------------
gcc/tree-vectorizer.h | 2 +-
4 files changed, 51 insertions(+), 54 deletions(-)
@@ -7471,7 +7471,7 @@ vect_reduction_update_partial_vector_usage (loop_vec_info loop_vinfo,
unsigned nvectors;
if (slp_node)
- nvectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ nvectors = SLP_TREE_VEC_STMTS_NUM (slp_node);
else
nvectors = vect_get_num_copies (loop_vinfo, vectype_in);
@@ -8121,7 +8121,7 @@ vectorizable_reduction (loop_vec_info loop_vinfo,
|| reduction_type == CONST_COND_REDUCTION
|| reduction_type == EXTRACT_LAST_REDUCTION)
&& slp_node
- && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)
+ && SLP_TREE_VEC_STMTS_NUM (slp_node) > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8600,7 +8600,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
if (slp_node)
{
ncopies = 1;
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
else
{
@@ -9196,7 +9196,7 @@ vectorizable_phi (vec_info *,
for the scalar and the vector PHIs. This avoids artificially
favoring the vector path (but may pessimize it in some cases). */
if (gimple_phi_num_args (as_a <gphi *> (stmt_info->stmt)) > 1)
- record_stmt_cost (cost_vec, SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
+ record_stmt_cost (cost_vec, SLP_TREE_VEC_STMTS_NUM (slp_node),
vector_stmt, stmt_info, vectype, 0, vect_body);
STMT_VINFO_TYPE (stmt_info) = phi_info_type;
return true;
@@ -9304,7 +9304,7 @@ vectorizable_recurr (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
unsigned ncopies;
if (slp_node)
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ ncopies = SLP_TREE_VEC_STMTS_NUM (slp_node);
else
ncopies = vect_get_num_copies (loop_vinfo, vectype);
poly_int64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
@@ -10217,8 +10217,7 @@ vectorizable_induction (loop_vec_info loop_vinfo,
}
/* loop cost for vec_loop. */
inside_cost
- = record_stmt_cost (cost_vec,
- SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
+ = record_stmt_cost (cost_vec, SLP_TREE_VEC_STMTS_NUM (slp_node),
vector_stmt, stmt_info, 0, vect_body);
/* prologue cost for vec_init (if not nested) and step. */
prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
@@ -10289,7 +10288,7 @@ vectorizable_induction (loop_vec_info loop_vinfo,
}
/* Now generate the IVs. */
- unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ unsigned nvects = SLP_TREE_VEC_STMTS_NUM (slp_node);
gcc_assert ((const_nunits * nvects) % group_size == 0);
unsigned nivs;
if (nested_in_vect_loop)
@@ -10949,7 +10948,7 @@ vectorizable_live_operation (vec_info *vinfo, stmt_vec_info stmt_info,
all the slp vectors. Calculate which slp vector it is and the index
within. */
int num_scalar = SLP_TREE_LANES (slp_node);
- int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ int num_vec = SLP_TREE_VEC_STMTS_NUM (slp_node);
poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
/* Calculate which vector contains the result, and which lane of
@@ -113,7 +113,7 @@ _slp_tree::_slp_tree ()
SLP_TREE_SCALAR_STMTS (this) = vNULL;
SLP_TREE_SCALAR_OPS (this) = vNULL;
SLP_TREE_VEC_DEFS (this) = vNULL;
- SLP_TREE_NUMBER_OF_VEC_STMTS (this) = 0;
+ SLP_TREE_VEC_STMTS_NUM (this) = 0;
SLP_TREE_CHILDREN (this) = vNULL;
SLP_TREE_LOAD_PERMUTATION (this) = vNULL;
SLP_TREE_LANE_PERMUTATION (this) = vNULL;
@@ -6567,8 +6567,8 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
for (unsigned i = 0; i < SLP_TREE_CHILDREN (node).length (); ++i)
if (SLP_TREE_DEF_TYPE (SLP_TREE_CHILDREN (node)[i]) == vect_internal_def)
{
- SLP_TREE_NUMBER_OF_VEC_STMTS (node)
- = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[i]);
+ SLP_TREE_VEC_STMTS_NUM (node)
+ = SLP_TREE_VEC_STMTS_NUM (SLP_TREE_CHILDREN (node)[i]);
break;
}
}
@@ -6581,7 +6581,7 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
vf = 1;
unsigned int group_size = SLP_TREE_LANES (node);
tree vectype = SLP_TREE_VECTYPE (node);
- SLP_TREE_NUMBER_OF_VEC_STMTS (node)
+ SLP_TREE_VEC_STMTS_NUM (node)
= vect_get_num_vectors (vf * group_size, vectype);
}
@@ -6704,13 +6704,13 @@ vect_prologue_cost_for_slp (slp_tree node,
unsigned HOST_WIDE_INT const_nunits;
unsigned nelt_limit;
auto ops = &SLP_TREE_SCALAR_OPS (node);
- auto_vec<unsigned int> starts (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
+ auto_vec<unsigned int> starts (SLP_TREE_VEC_STMTS_NUM (node));
if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
&& ! multiple_p (const_nunits, group_size))
{
nelt_limit = const_nunits;
hash_set<vect_scalar_ops_slice_hash> vector_ops;
- for (unsigned int i = 0; i < SLP_TREE_NUMBER_OF_VEC_STMTS (node); ++i)
+ for (unsigned int i = 0; i < SLP_TREE_VEC_STMTS_NUM (node); ++i)
if (!vector_ops.add ({ ops, i * const_nunits, const_nunits }))
starts.quick_push (i * const_nunits);
}
@@ -6855,7 +6855,7 @@ vect_slp_analyze_node_operations (vec_info *vinfo, slp_tree node,
poly_uint64 vf = 1;
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
vf = loop_vinfo->vectorization_factor;
- SLP_TREE_NUMBER_OF_VEC_STMTS (child)
+ SLP_TREE_VEC_STMTS_NUM (child)
= vect_get_num_vectors (vf * group_size, vector_type);
/* And cost them. */
vect_prologue_cost_for_slp (child, cost_vec);
@@ -8750,7 +8750,7 @@ vect_create_constant_vectors (vec_info *vinfo, slp_tree op_node)
/* We always want SLP_TREE_VECTYPE (op_node) here correctly set. */
vector_type = SLP_TREE_VECTYPE (op_node);
- unsigned int number_of_vectors = SLP_TREE_NUMBER_OF_VEC_STMTS (op_node);
+ unsigned int number_of_vectors = SLP_TREE_VEC_STMTS_NUM (op_node);
SLP_TREE_VEC_DEFS (op_node).create (number_of_vectors);
auto_vec<tree> voprnds (number_of_vectors);
@@ -8956,7 +8956,7 @@ vect_get_slp_vect_def (slp_tree slp_node, unsigned i)
void
vect_get_slp_defs (slp_tree slp_node, vec<tree> *vec_defs)
{
- vec_defs->create (SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node));
+ vec_defs->create (SLP_TREE_VEC_STMTS_NUM (slp_node));
vec_defs->splice (SLP_TREE_VEC_DEFS (slp_node));
}
@@ -9010,7 +9010,7 @@ vect_transform_slp_perm_load_1 (vec_info *vinfo, slp_tree node,
mode = TYPE_MODE (vectype);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- unsigned int nstmts = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
+ unsigned int nstmts = SLP_TREE_VEC_STMTS_NUM (node);
/* Initialize the vect stmts of NODE to properly insert the generated
stmts later. */
@@ -9210,7 +9210,7 @@ vect_transform_slp_perm_load_1 (vec_info *vinfo, slp_tree node,
if (n_loads)
{
if (repeating_p)
- *n_loads = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
+ *n_loads = SLP_TREE_VEC_STMTS_NUM (node);
else
{
/* Enforced above when !repeating_p. */
@@ -9445,7 +9445,7 @@ vectorizable_slp_permutation_1 (vec_info *vinfo, gimple_stmt_iterator *gsi,
that we use for permutes requires 3n elements. */
npatterns = SLP_TREE_LANES (node);
nelts_per_pattern = ncopies = 3;
- noutputs_per_mask = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
+ noutputs_per_mask = SLP_TREE_VEC_STMTS_NUM (node);
}
else
{
@@ -9691,8 +9691,8 @@ vect_schedule_slp_node (vec_info *vinfo,
stmt_vec_info stmt_info = SLP_TREE_REPRESENTATIVE (node);
- gcc_assert (SLP_TREE_NUMBER_OF_VEC_STMTS (node) != 0);
- SLP_TREE_VEC_DEFS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
+ gcc_assert (SLP_TREE_VEC_STMTS_NUM (node) != 0);
+ SLP_TREE_VEC_DEFS (node).create (SLP_TREE_VEC_STMTS_NUM (node));
if (SLP_TREE_CODE (node) != VEC_PERM_EXPR
&& STMT_VINFO_DATA_REF (stmt_info))
@@ -9951,7 +9951,7 @@ vectorize_slp_instance_root_stmt (slp_tree node, slp_instance instance)
if (instance->kind == slp_inst_kind_ctor)
{
- if (SLP_TREE_NUMBER_OF_VEC_STMTS (node) == 1)
+ if (SLP_TREE_VEC_STMTS_NUM (node) == 1)
{
tree vect_lhs = SLP_TREE_VEC_DEFS (node)[0];
tree root_lhs = gimple_get_lhs (instance->root_stmts[0]->stmt);
@@ -9961,9 +9961,9 @@ vectorize_slp_instance_root_stmt (slp_tree node, slp_instance instance)
vect_lhs);
rstmt = gimple_build_assign (root_lhs, vect_lhs);
}
- else if (SLP_TREE_NUMBER_OF_VEC_STMTS (node) > 1)
+ else if (SLP_TREE_VEC_STMTS_NUM (node) > 1)
{
- int nelts = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
+ int nelts = SLP_TREE_VEC_STMTS_NUM (node);
tree child_def;
int j;
vec<constructor_elt, va_gc> *v;
@@ -890,7 +890,7 @@ vect_model_simple_cost (vec_info *,
/* ??? Somehow we need to fix this at the callers. */
if (node)
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
+ ncopies = SLP_TREE_VEC_STMTS_NUM (node);
if (!node)
/* Cost the "broadcast" of a scalar operand in to a vector operand.
@@ -1508,7 +1508,7 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
unsigned int nvectors;
if (slp_node)
- nvectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ nvectors = SLP_TREE_VEC_STMTS_NUM (slp_node);
else
nvectors = vect_get_num_copies (loop_vinfo, vectype);
@@ -3084,8 +3084,7 @@ vectorizable_bswap (vec_info *vinfo,
record_stmt_cost (cost_vec,
1, vector_stmt, stmt_info, 0, vect_prologue);
record_stmt_cost (cost_vec,
- slp_node
- ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies,
+ slp_node ? SLP_TREE_VEC_STMTS_NUM (slp_node) : ncopies,
vec_perm, stmt_info, 0, vect_body);
return true;
}
@@ -3459,9 +3458,7 @@ vectorizable_call (vec_info *vinfo,
else
{
unsigned int nvectors
- = (slp_node
- ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
- : ncopies);
+ = (slp_node ? SLP_TREE_VEC_STMTS_NUM (slp_node) : ncopies);
tree scalar_mask = NULL_TREE;
if (mask_opno >= 0)
scalar_mask = gimple_call_arg (stmt_info->stmt, mask_opno);
@@ -5585,7 +5582,7 @@ vectorizable_conversion (vec_info *vinfo,
STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
/* The final packing step produces one vector result per copy. */
unsigned int nvectors
- = (slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies);
+ = (slp_node ? SLP_TREE_VEC_STMTS_NUM (slp_node) : ncopies);
vect_model_promotion_demotion_cost (stmt_info, dt, nvectors,
multi_step_cvt, cost_vec,
widen_arith);
@@ -5598,7 +5595,7 @@ vectorizable_conversion (vec_info *vinfo,
so >> MULTI_STEP_CVT divides by 2^(number of steps - 1). */
unsigned int nvectors
= (slp_node
- ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) >> multi_step_cvt
+ ? SLP_TREE_VEC_STMTS_NUM (slp_node) >> multi_step_cvt
: ncopies * 2);
vect_model_promotion_demotion_cost (stmt_info, dt, nvectors,
multi_step_cvt, cost_vec,
@@ -6204,7 +6201,7 @@ vectorizable_shift (vec_info *vinfo,
scalar shift operand but code-generation below simply always
takes the first. */
if (dt[1] == vect_internal_def
- && maybe_ne (nunits_out * SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
+ && maybe_ne (nunits_out * SLP_TREE_VEC_STMTS_NUM (slp_node),
stmts.length ()))
scalar_shift_arg = false;
}
@@ -6377,6 +6374,8 @@ vectorizable_shift (vec_info *vinfo,
TREE_TYPE (vectype), NULL);
}
+ unsigned vec_num = slp_node ? SLP_TREE_VEC_STMTS_NUM (slp_node) : ncopies;
+
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
@@ -6393,15 +6392,14 @@ vectorizable_shift (vec_info *vinfo,
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.\n");
vec_oprnd1 = op1;
- vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : ncopies);
+ vec_oprnds1.create (vec_num);
vec_oprnds1.quick_push (vec_oprnd1);
/* Store vec_oprnd1 for every vector stmt to be created.
We check during the analysis that all the shift arguments
are the same.
TODO: Allow different constants for different vector
stmts generated for an SLP instance. */
- for (k = 0;
- k < (slp_node ? slp_node->vec_stmts_size - 1 : ncopies - 1); k++)
+ for (k = 0; k < vec_num - 1; k++)
vec_oprnds1.quick_push (vec_oprnd1);
}
}
@@ -6416,8 +6414,8 @@ vectorizable_shift (vec_info *vinfo,
!loop_vinfo ? gsi : NULL);
vec_oprnd1 = vect_init_vector (vinfo, stmt_info, op1, vectype,
!loop_vinfo ? gsi : NULL);
- vec_oprnds1.create (slp_node->vec_stmts_size);
- for (k = 0; k < slp_node->vec_stmts_size; k++)
+ vec_oprnds1.create (vec_num);
+ for (k = 0; k < vec_num; k++)
vec_oprnds1.quick_push (vec_oprnd1);
}
else if (dt[1] == vect_constant_def)
@@ -6680,7 +6678,7 @@ vectorizable_operation (vec_info *vinfo,
if (slp_node)
{
ncopies = 1;
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
else
{
@@ -6818,7 +6816,7 @@ vectorizable_operation (vec_info *vinfo,
vector stmt. See below for the actual lowering that will
be applied. */
unsigned n
- = slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies;
+ = slp_node ? SLP_TREE_VEC_STMTS_NUM (slp_node) : ncopies;
switch (code)
{
case PLUS_EXPR:
@@ -8158,7 +8156,7 @@ vectorizable_store (vec_info *vinfo,
/* FORNOW. This restriction should be relaxed. */
if (loop
&& nested_in_vect_loop_p (loop, stmt_info)
- && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+ && (ncopies > 1 || (slp && SLP_TREE_VEC_STMTS_NUM (slp_node) > 1)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8334,7 +8332,7 @@ vectorizable_store (vec_info *vinfo,
grouped_store = false;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
== first_stmt_info);
@@ -8501,7 +8499,7 @@ vectorizable_store (vec_info *vinfo,
}
}
ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ ncopies = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
if (!costing_p)
@@ -9901,7 +9899,7 @@ vectorizable_load (vec_info *vinfo,
/* FORNOW. This restriction should be relaxed. */
if (nested_in_vect_loop
- && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+ && (ncopies > 1 || (slp && SLP_TREE_VEC_STMTS_NUM (slp_node) > 1)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -10174,7 +10172,7 @@ vectorizable_load (vec_info *vinfo,
}
gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
if (slp)
- for (j = 0; j < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); ++j)
+ for (j = 0; j < (int) SLP_TREE_VEC_STMTS_NUM (slp_node); ++j)
slp_node->push_vec_def (new_stmt);
else
{
@@ -10325,7 +10323,7 @@ vectorizable_load (vec_info *vinfo,
dr_chain.create (ncopies);
}
else
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ ncopies = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
unsigned int group_el = 0;
unsigned HOST_WIDE_INT
@@ -10518,7 +10516,7 @@ vectorizable_load (vec_info *vinfo,
/* We do not support grouped accesses in a nested loop,
instead the access is contiguous but it might be
permuted. No gap adjustment is needed though. */
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
else if (slp_perm
&& (group_size != scalar_lanes
|| !multiple_p (nunits, group_size)))
@@ -10532,7 +10530,7 @@ vectorizable_load (vec_info *vinfo,
}
else
{
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
group_gap_adj
= group_size - scalar_lanes;
}
@@ -10550,7 +10548,7 @@ vectorizable_load (vec_info *vinfo,
group_gap_adj = 0;
ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
if (slp)
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
gcc_assert (alignment_support_scheme);
@@ -12096,7 +12094,7 @@ vectorizable_condition (vec_info *vinfo,
if (slp_node)
{
ncopies = 1;
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ vec_num = SLP_TREE_VEC_STMTS_NUM (slp_node);
}
else
{
@@ -303,7 +303,7 @@ public:
#define SLP_TREE_SCALAR_OPS(S) (S)->ops
#define SLP_TREE_REF_COUNT(S) (S)->refcnt
#define SLP_TREE_VEC_DEFS(S) (S)->vec_defs
-#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
+#define SLP_TREE_VEC_STMTS_NUM(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
#define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
#define SLP_TREE_SIMD_CLONE_INFO(S) (S)->simd_clone_info
--
2.17.1