@@ -9623,9 +9623,12 @@ vectorizable_recurr (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
return false;
/* The recurrence costs the initialization vector and one permute
- for each copy. */
- unsigned prologue_cost = record_stmt_cost (cost_vec, 1, scalar_to_vec,
- stmt_info, 0, vect_prologue);
+ for each copy. With SLP the prologue value is explicitly
+ represented and costed separately. */
+ unsigned prologue_cost = 0;
+ if (!slp_node)
+ prologue_cost = record_stmt_cost (cost_vec, 1, scalar_to_vec,
+ stmt_info, 0, vect_prologue);
unsigned inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
stmt_info, 0, vect_body);
if (dump_enabled_p ())
@@ -512,7 +512,9 @@ static const int no_arg_map[] = { 0 };
static const int arg0_map[] = { 1, 0 };
static const int arg1_map[] = { 1, 1 };
static const int arg2_map[] = { 1, 2 };
+static const int arg1_arg3_map[] = { 2, 1, 3 };
static const int arg1_arg4_map[] = { 2, 1, 4 };
+static const int arg1_arg3_arg4_map[] = { 3, 1, 3, 4 };
static const int arg3_arg2_map[] = { 2, 3, 2 };
static const int op1_op0_map[] = { 2, 1, 0 };
static const int off_map[] = { 1, -3 };
@@ -573,6 +575,13 @@ vect_get_operand_map (const gimple *stmt, bool gather_scatter_p = false,
case IFN_MASK_LEN_GATHER_LOAD:
return arg1_arg4_map;
+ case IFN_SCATTER_STORE:
+ return arg1_arg3_map;
+
+ case IFN_MASK_SCATTER_STORE:
+ case IFN_MASK_LEN_SCATTER_STORE:
+ return arg1_arg3_arg4_map;
+
case IFN_MASK_STORE:
return gather_scatter_p ? off_arg3_arg2_map : arg3_arg2_map;
@@ -1187,7 +1196,10 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (cfn == CFN_MASK_LOAD
|| cfn == CFN_GATHER_LOAD
|| cfn == CFN_MASK_GATHER_LOAD
- || cfn == CFN_MASK_LEN_GATHER_LOAD)
+ || cfn == CFN_MASK_LEN_GATHER_LOAD
+ || cfn == CFN_SCATTER_STORE
+ || cfn == CFN_MASK_SCATTER_STORE
+ || cfn == CFN_MASK_LEN_SCATTER_STORE)
ldst_p = true;
else if (cfn == CFN_MASK_STORE)
{
@@ -1473,6 +1485,9 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
&& rhs_code != CFN_GATHER_LOAD
&& rhs_code != CFN_MASK_GATHER_LOAD
&& rhs_code != CFN_MASK_LEN_GATHER_LOAD
+ && rhs_code != CFN_SCATTER_STORE
+ && rhs_code != CFN_MASK_SCATTER_STORE
+ && rhs_code != CFN_MASK_LEN_SCATTER_STORE
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info)
/* Not grouped loads are handled as externals for BB
vectorization. For loop vectorization we can handle
@@ -9162,7 +9162,8 @@ vectorizable_store (vec_info *vinfo,
{
if (loop_masks)
final_mask = vect_get_loop_mask (loop_vinfo, gsi,
- loop_masks, ncopies,
+ loop_masks,
+ ncopies * vec_num,
vectype, j);
if (vec_mask)
final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
@@ -9188,7 +9189,8 @@ vectorizable_store (vec_info *vinfo,
{
if (loop_lens)
final_len = vect_get_loop_len (loop_vinfo, gsi,
- loop_lens, ncopies,
+ loop_lens,
+ ncopies * vec_num,
vectype, j, 1);
else
final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));