@@ -14120,7 +14120,7 @@ aarch64_simd_vector_alignment (const_tree type)
}
/* Implement target hook TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
-static HOST_WIDE_INT
+static poly_uint64
aarch64_vectorize_preferred_vector_alignment (const_tree type)
{
if (aarch64_sve_data_mode_p (TYPE_MODE (type)))
@@ -14145,9 +14145,11 @@ aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
/* For fixed-length vectors, check that the vectorizer will aim for
full-vector alignment. This isn't true for generic GCC vectors
that are wider than the ABI maximum of 128 bits. */
+ poly_uint64 preferred_alignment =
+ aarch64_vectorize_preferred_vector_alignment (type);
if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (wi::to_widest (TYPE_SIZE (type))
- != aarch64_vectorize_preferred_vector_alignment (type)))
+ && maybe_ne (wi::to_widest (TYPE_SIZE (type)),
+ preferred_alignment))
return false;
/* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
@@ -5889,7 +5889,7 @@ For vector memory operations the cost may depend on type (@var{vectype}) and
misalignment value (@var{misalign}).
@end deftypefn
-@deftypefn {Target Hook} HOST_WIDE_INT TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT (const_tree @var{type})
+@deftypefn {Target Hook} poly_uint64 TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT (const_tree @var{type})
This hook returns the preferred alignment in bits for accesses to
vectors of type @var{type} in vectorized code. This might be less than
or greater than the ABI-defined value returned by
@@ -1802,7 +1802,7 @@ for alignment.\n\
\n\
The default hook returns @code{TYPE_ALIGN (@var{type})}, which is\n\
correct for most targets.",
- HOST_WIDE_INT, (const_tree type),
+ poly_uint64, (const_tree type),
default_preferred_vector_alignment)
/* Return true if vector alignment is reachable (by peeling N
@@ -102,7 +102,7 @@ extern HOST_WIDE_INT constant_alignment_word_strings (const_tree,
HOST_WIDE_INT);
extern HOST_WIDE_INT default_vector_alignment (const_tree);
-extern HOST_WIDE_INT default_preferred_vector_alignment (const_tree);
+extern poly_uint64 default_preferred_vector_alignment (const_tree);
extern bool default_builtin_vector_alignment_reachable (const_tree, bool);
extern bool
default_builtin_support_vector_misalignment (machine_mode mode,
@@ -1260,7 +1260,7 @@ default_vector_alignment (const_tree type)
/* The default implementation of
TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
-HOST_WIDE_INT
+poly_uint64
default_preferred_vector_alignment (const_tree type)
{
return TYPE_ALIGN (type);
@@ -809,7 +809,7 @@ vect_record_base_alignments (vec_info *vinfo)
/* Return the target alignment for the vectorized form of DR_INFO. */
-static unsigned int
+static poly_uint64
vect_calculate_target_alignment (dr_vec_info *dr_info)
{
tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
@@ -852,10 +852,14 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
bool step_preserves_misalignment_p;
- unsigned HOST_WIDE_INT vector_alignment
- = vect_calculate_target_alignment (dr_info) / BITS_PER_UNIT;
+ poly_uint64 vector_alignment
+ = exact_div (vect_calculate_target_alignment (dr_info), BITS_PER_UNIT);
DR_TARGET_ALIGNMENT (dr_info) = vector_alignment;
+ unsigned HOST_WIDE_INT vect_align_c;
+ if (!vector_alignment.is_constant (&vect_align_c))
+ return;
+
/* No step for BB vectorization. */
if (!loop)
{
@@ -872,7 +876,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
else if (nested_in_vect_loop_p (loop, stmt_info))
{
step_preserves_misalignment_p
- = (DR_STEP_ALIGNMENT (dr_info->dr) % vector_alignment) == 0;
+ = (DR_STEP_ALIGNMENT (dr_info->dr) % vect_align_c) == 0;
if (dump_enabled_p ())
{
@@ -894,7 +898,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
{
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
step_preserves_misalignment_p
- = multiple_p (DR_STEP_ALIGNMENT (dr_info->dr) * vf, vector_alignment);
+ = multiple_p (DR_STEP_ALIGNMENT (dr_info->dr) * vf, vect_align_c);
if (!step_preserves_misalignment_p && dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -913,7 +917,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
base_misalignment = (*entry)->base_misalignment;
}
- if (drb->offset_alignment < vector_alignment
+ if (drb->offset_alignment < vect_align_c
|| !step_preserves_misalignment_p
/* We need to know whether the step wrt the vectorized loop is
negative when computing the starting misalignment below. */
@@ -925,13 +929,13 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
return;
}
- if (base_alignment < vector_alignment)
+ if (base_alignment < vect_align_c)
{
unsigned int max_alignment;
tree base = get_base_for_alignment (drb->base_address, &max_alignment);
- if (max_alignment < vector_alignment
+ if (max_alignment < vect_align_c
|| !vect_can_force_dr_alignment_p (base,
- vector_alignment * BITS_PER_UNIT))
+ vect_align_c * BITS_PER_UNIT))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
@@ -962,8 +966,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
* TREE_INT_CST_LOW (drb->step));
unsigned int const_misalignment;
- if (!known_misalignment (misalignment, vector_alignment,
- &const_misalignment))
+ if (!known_misalignment (misalignment, vect_align_c, &const_misalignment))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -1027,14 +1030,16 @@ vect_update_misalignment_for_peel (dr_vec_info *dr_info,
return;
}
- if (known_alignment_for_access_p (dr_info)
+ unsigned HOST_WIDE_INT alignment;
+ if (DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment)
+ && known_alignment_for_access_p (dr_info)
&& known_alignment_for_access_p (dr_peel_info))
{
bool negative = tree_int_cst_compare (DR_STEP (dr_info->dr),
size_zero_node) < 0;
int misal = DR_MISALIGNMENT (dr_info);
misal += negative ? -npeel * dr_size : npeel * dr_size;
- misal &= DR_TARGET_ALIGNMENT (dr_info) - 1;
+ misal &= alignment - 1;
SET_DR_MISALIGNMENT (dr_info, misal);
return;
}
@@ -1678,7 +1683,12 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
size_zero_node) < 0;
vectype = STMT_VINFO_VECTYPE (stmt_info);
- unsigned int target_align = DR_TARGET_ALIGNMENT (dr_info);
+ /* If known_alignment_for_access_p then we have set
+ DR_MISALIGNMENT which is only done if we know it at compiler
+ time, so it is safe to assume target alignment is constant.
+ */
+ unsigned int target_align =
+ DR_TARGET_ALIGNMENT (dr_info).to_constant ();
unsigned int dr_size = vect_get_scalar_dr_size (dr_info);
mis = (negative
? DR_MISALIGNMENT (dr_info)
@@ -1955,7 +1965,12 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
mis = (negative
? DR_MISALIGNMENT (dr0_info)
: -DR_MISALIGNMENT (dr0_info));
- unsigned int target_align = DR_TARGET_ALIGNMENT (dr0_info);
+ /* If known_alignment_for_access_p then we have set
+ DR_MISALIGNMENT which is only done if we know it at compiler
+ time, so it is safe to assume target alignment is constant.
+ */
+ unsigned int target_align =
+ DR_TARGET_ALIGNMENT (dr0_info).to_constant ();
npeel = ((mis & (target_align - 1))
/ vect_get_scalar_dr_size (dr0_info));
}
@@ -1995,9 +2010,19 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
unsigned max_peel = npeel;
if (max_peel == 0)
{
- unsigned int target_align = DR_TARGET_ALIGNMENT (dr0_info);
- max_peel = (target_align
- / vect_get_scalar_dr_size (dr0_info) - 1);
+ poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr0_info);
+ unsigned HOST_WIDE_INT target_align_c;
+ if (target_align.is_constant (&target_align_c))
+ max_peel =
+ target_align_c / vect_get_scalar_dr_size (dr0_info) - 1;
+ else
+ {
+ do_peeling = false;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Disable peeling, max peels set and vector"
+ " alignment unknown\n");
+ }
}
if (max_peel > max_allowed_peel)
{
@@ -2236,11 +2261,18 @@ vect_find_same_alignment_drs (vec_info *vinfo, data_dependence_relation *ddr)
if (maybe_ne (diff, 0))
{
/* Get the wider of the two alignments. */
- unsigned int align_a = (vect_calculate_target_alignment (dr_info_a)
- / BITS_PER_UNIT);
- unsigned int align_b = (vect_calculate_target_alignment (dr_info_b)
- / BITS_PER_UNIT);
- unsigned int max_align = MAX (align_a, align_b);
+ poly_uint64 align_a =
+ exact_div (vect_calculate_target_alignment (dr_info_a),
+ BITS_PER_UNIT);
+ poly_uint64 align_b =
+ exact_div (vect_calculate_target_alignment (dr_info_b),
+ BITS_PER_UNIT);
+ unsigned HOST_WIDE_INT align_a_c, align_b_c;
+ if (!align_a.is_constant (&align_a_c)
+ || !align_b.is_constant (&align_b_c))
+ return;
+
+ unsigned HOST_WIDE_INT max_align = MAX (align_a_c, align_b_c);
/* Require the gap to be a multiple of the larger vector alignment. */
if (!multiple_p (diff, max_align))
@@ -4296,7 +4328,8 @@ vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
else
set_ptr_info_alignment (SSA_NAME_PTR_INFO (name),
- DR_TARGET_ALIGNMENT (dr_info), misalign);
+ known_alignment (DR_TARGET_ALIGNMENT (dr_info)),
+ misalign);
}
/* Function vect_create_addr_base_for_vector_ref.
@@ -5340,10 +5373,13 @@ vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
new_temp = copy_ssa_name (ptr);
else
new_temp = make_ssa_name (TREE_TYPE (ptr));
- unsigned int align = DR_TARGET_ALIGNMENT (dr_info);
+ poly_uint64 align = DR_TARGET_ALIGNMENT (dr_info);
+ tree type = TREE_TYPE (ptr);
new_stmt = gimple_build_assign
(new_temp, BIT_AND_EXPR, ptr,
- build_int_cst (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
+ fold_build2 (MINUS_EXPR, type,
+ build_int_cst (type, 0),
+ build_int_cst (type, align)));
new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
gcc_assert (!new_bb);
data_ref
@@ -6226,7 +6262,7 @@ vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
on ALIGNMENT bit boundary. */
bool
-vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
+vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment)
{
if (!VAR_P (decl))
return false;
@@ -6236,9 +6272,9 @@ vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
return false;
if (TREE_STATIC (decl))
- return (alignment <= MAX_OFILE_ALIGNMENT);
+ return (known_le (alignment, MAX_OFILE_ALIGNMENT));
else
- return (alignment <= MAX_STACK_ALIGNMENT);
+ return (known_le (alignment, (unsigned HOST_WIDE_INT) MAX_STACK_ALIGNMENT));
}
@@ -1561,8 +1561,9 @@ get_misalign_in_elems (gimple **seq, loop_vec_info loop_vinfo)
stmt_vec_info stmt_info = dr_info->stmt;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- unsigned int target_align = DR_TARGET_ALIGNMENT (dr_info);
- gcc_assert (target_align != 0);
+ poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr_info);
+ unsigned HOST_WIDE_INT target_align_c;
+ tree target_align_minus_1;
bool negative = tree_int_cst_compare (DR_STEP (dr_info->dr),
size_zero_node) < 0;
@@ -1572,7 +1573,18 @@ get_misalign_in_elems (gimple **seq, loop_vec_info loop_vinfo)
tree start_addr = vect_create_addr_base_for_vector_ref (stmt_info, seq,
offset);
tree type = unsigned_type_for (TREE_TYPE (start_addr));
- tree target_align_minus_1 = build_int_cst (type, target_align - 1);
+ if (target_align.is_constant (&target_align_c))
+ target_align_minus_1 = build_int_cst (type, target_align_c - 1);
+ else
+ {
+ tree vla = build_int_cst (type, target_align);
+ tree vla_align = fold_build2 (BIT_AND_EXPR, type, vla,
+ fold_build2 (MINUS_EXPR, type,
+ build_int_cst (type, 0), vla));
+ target_align_minus_1 = fold_build2 (MINUS_EXPR, type, vla_align,
+ build_int_cst (type, 1));
+ }
+
HOST_WIDE_INT elem_size
= int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
tree elem_size_log = build_int_cst (type, exact_log2 (elem_size));
@@ -1631,7 +1643,7 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
tree iters, iters_name;
stmt_vec_info stmt_info = dr_info->stmt;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- unsigned int target_align = DR_TARGET_ALIGNMENT (dr_info);
+ poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr_info);
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
@@ -1650,8 +1662,12 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
tree type = TREE_TYPE (misalign_in_elems);
HOST_WIDE_INT elem_size
= int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
- HOST_WIDE_INT align_in_elems = target_align / elem_size;
- tree align_in_elems_minus_1 = build_int_cst (type, align_in_elems - 1);
+ /* We only do prolog peeling if the target alignment is known at compile
+ time. */
+ poly_uint64 align_in_elems =
+ exact_div (target_align, elem_size);
+ tree align_in_elems_minus_1 =
+ build_int_cst (type, align_in_elems - 1);
tree align_in_elems_tree = build_int_cst (type, align_in_elems);
/* Create: (niters_type) ((align_in_elems - misalign_in_elems)
@@ -1666,7 +1682,11 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
misalign_in_elems);
iters = fold_build2 (BIT_AND_EXPR, type, iters, align_in_elems_minus_1);
iters = fold_convert (niters_type, iters);
- *bound = align_in_elems - 1;
+ unsigned HOST_WIDE_INT align_in_elems_c;
+ if (align_in_elems.is_constant (&align_in_elems_c))
+ *bound = align_in_elems_c - 1;
+ else
+ *bound = -1;
}
if (dump_enabled_p ())
@@ -2404,6 +2424,13 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
profile_probability prob_prolog, prob_vector, prob_epilog;
int estimated_vf;
int prolog_peeling = 0;
+ /* We currently do not support prolog peeling if the target alignment is not
+ known at compile time. 'vect_gen_prolog_loop_niters' depends on the
+ target alignment being constant. */
+ dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
+ if (dr_info && !DR_TARGET_ALIGNMENT (dr_info).is_constant ())
+ return NULL;
+
if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
prolog_peeling = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
@@ -6131,8 +6131,10 @@ ensure_base_align (dr_vec_info *dr_info)
{
tree base_decl = dr_info->base_decl;
- unsigned int align_base_to
- = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT;
+ // We should only be able to increase the alignment of a base object if
+ // we know what its new alignment should be at compile time.
+ unsigned HOST_WIDE_INT align_base_to =
+ DR_TARGET_ALIGNMENT (dr_info).to_constant () * BITS_PER_UNIT;
if (decl_in_symtab_p (base_decl))
symtab_node::get (base_decl)->increase_alignment (align_base_to);
@@ -7080,7 +7082,8 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < vec_num; i++)
{
- unsigned align, misalign;
+ unsigned misalign;
+ unsigned HOST_WIDE_INT align;
tree final_mask = NULL_TREE;
if (loop_masks)
@@ -7121,7 +7124,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
vect_permute_store_chain(). */
vec_oprnd = result_chain[i];
- align = DR_TARGET_ALIGNMENT (first_dr_info);
+ align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
if (aligned_access_p (first_dr_info))
misalign = 0;
else if (DR_MISALIGNMENT (first_dr_info) == -1)
@@ -8309,7 +8312,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
case dr_aligned:
case dr_unaligned_supported:
{
- unsigned int align, misalign;
+ unsigned int misalign;
+ unsigned HOST_WIDE_INT align;
if (memory_access_type == VMAT_GATHER_SCATTER)
{
@@ -8329,7 +8333,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
break;
}
- align = DR_TARGET_ALIGNMENT (dr_info);
+ align =
+ known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
if (alignment_support_scheme == dr_aligned)
{
gcc_assert (aligned_access_p (first_dr_info));
@@ -8396,7 +8401,10 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
ptr = copy_ssa_name (dataref_ptr);
else
ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
- unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
+ // For explicit realign the target alignment should be
+ // known at compile time.
+ unsigned HOST_WIDE_INT align =
+ DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
new_stmt = gimple_build_assign
(ptr, BIT_AND_EXPR, dataref_ptr,
build_int_cst
@@ -8440,7 +8448,10 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
new_temp = copy_ssa_name (dataref_ptr);
else
new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
- unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
+ // We should only be doing this if we know the target
+ // alignment at compile time.
+ unsigned HOST_WIDE_INT align =
+ DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
new_stmt = gimple_build_assign
(new_temp, BIT_AND_EXPR, dataref_ptr,
build_int_cst (TREE_TYPE (dataref_ptr),
@@ -773,7 +773,7 @@ struct dr_vec_info {
int misalignment;
/* The byte alignment that we'd ideally like the reference to have,
and the value that misalignment is measured against. */
- int target_alignment;
+ poly_uint64 target_alignment;
/* If true the alignment of base_decl needs to be increased. */
bool base_misaligned;
tree base_decl;
@@ -1299,7 +1299,7 @@ vect_known_alignment_in_bytes (dr_vec_info *dr_info)
if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN)
return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
if (DR_MISALIGNMENT (dr_info) == 0)
- return DR_TARGET_ALIGNMENT (dr_info);
+ return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info);
}
@@ -1518,7 +1518,7 @@ extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info);
/* In tree-vect-data-refs.c. */
-extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
+extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
extern enum dr_alignment_support vect_supportable_dr_alignment
(dr_vec_info *, bool);
extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,