@@ -2264,21 +2264,21 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
}
}
}
+ }
- /* As a last resort, trying using a gather load or scatter store.
+ /* As a last resort, trying using a gather load or scatter store.
- ??? Although the code can handle all group sizes correctly,
- it probably isn't a win to use separate strided accesses based
- on nearby locations. Or, even if it's a win over scalar code,
- it might not be a win over vectorizing at a lower VF, if that
- allows us to use contiguous accesses. */
- if (*memory_access_type == VMAT_ELEMENTWISE
- && single_element_p
- && loop_vinfo
- && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
- masked_p, gs_info))
- *memory_access_type = VMAT_GATHER_SCATTER;
- }
+ ??? Although the code can handle all group sizes correctly,
+ it probably isn't a win to use separate strided accesses based
+ on nearby locations. Or, even if it's a win over scalar code,
+ it might not be a win over vectorizing at a lower VF, if that
+ allows us to use contiguous accesses. */
+ if (*memory_access_type == VMAT_ELEMENTWISE
+ && single_element_p
+ && loop_vinfo
+ && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
+ masked_p, gs_info))
+ *memory_access_type = VMAT_GATHER_SCATTER;
if (*memory_access_type == VMAT_GATHER_SCATTER
|| *memory_access_type == VMAT_ELEMENTWISE)