@@ -12295,6 +12295,7 @@ update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
refs that get_load_store_type classified as VMAT_GATHER_SCATTER. */
auto vstmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
if (STMT_VINFO_MEMORY_ACCESS_TYPE (vstmt_vinfo) == VMAT_GATHER_SCATTER
+ || STMT_VINFO_STRIDED_P (vstmt_vinfo)
|| STMT_VINFO_GATHER_SCATTER_P (vstmt_vinfo))
{
/* ??? As we copy epilogues from the main loop incremental
@@ -2274,7 +2274,8 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
on nearby locations. Or, even if it's a win over scalar code,
it might not be a win over vectorizing at a lower VF, if that
allows us to use contiguous accesses. */
- if (*memory_access_type == VMAT_ELEMENTWISE
+ if ((*memory_access_type == VMAT_ELEMENTWISE
+ || *memory_access_type == VMAT_STRIDED_SLP)
&& single_element_p
&& loop_vinfo
&& vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,