@@ -35214,48 +35214,6 @@ static const struct builtin_description bdesc_tm[] =
{ OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_LM256", (enum ix86_builtins) BUILT_IN_TM_LOG_M256, UNKNOWN, VOID_FTYPE_PCVOID },
};
-/* TM callbacks. */
-
-/* Return the builtin decl needed to load a vector of TYPE. */
-
-static tree
-ix86_builtin_tm_load (tree type)
-{
- if (TREE_CODE (type) == VECTOR_TYPE)
- {
- switch (tree_to_uhwi (TYPE_SIZE (type)))
- {
- case 64:
- return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64);
- case 128:
- return builtin_decl_explicit (BUILT_IN_TM_LOAD_M128);
- case 256:
- return builtin_decl_explicit (BUILT_IN_TM_LOAD_M256);
- }
- }
- return NULL_TREE;
-}
-
-/* Return the builtin decl needed to store a vector of TYPE. */
-
-static tree
-ix86_builtin_tm_store (tree type)
-{
- if (TREE_CODE (type) == VECTOR_TYPE)
- {
- switch (tree_to_uhwi (TYPE_SIZE (type)))
- {
- case 64:
- return builtin_decl_explicit (BUILT_IN_TM_STORE_M64);
- case 128:
- return builtin_decl_explicit (BUILT_IN_TM_STORE_M128);
- case 256:
- return builtin_decl_explicit (BUILT_IN_TM_STORE_M256);
- }
- }
- return NULL_TREE;
-}
-
/* Initialize the transactional memory vector load/store builtins. */
static void
@@ -54340,12 +54298,6 @@ ix86_addr_space_zero_address_valid (addr_space_t as)
#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
ix86_builtin_vectorized_function
-#undef TARGET_VECTORIZE_BUILTIN_TM_LOAD
-#define TARGET_VECTORIZE_BUILTIN_TM_LOAD ix86_builtin_tm_load
-
-#undef TARGET_VECTORIZE_BUILTIN_TM_STORE
-#define TARGET_VECTORIZE_BUILTIN_TM_STORE ix86_builtin_tm_store
-
#undef TARGET_VECTORIZE_BUILTIN_GATHER
#define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
@@ -4253,10 +4253,6 @@ address; but often a machine-dependent strategy can generate better code.
@hook TARGET_VECTORIZE_DESTROY_COST_DATA
-@hook TARGET_VECTORIZE_BUILTIN_TM_LOAD
-
-@hook TARGET_VECTORIZE_BUILTIN_TM_STORE
-
@hook TARGET_VECTORIZE_BUILTIN_GATHER
@hook TARGET_VECTORIZE_BUILTIN_SCATTER
@@ -12,6 +12,10 @@ DEF_TM_BUILTIN (BUILT_IN_TM_IRREVOCABLE, "_ITM_changeTransactionMode",
DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY, "_ITM_memcpyRtWt",
BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY_RNWT, "_ITM_memcpyRnWt",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY_RTWN, "_ITM_memcpyRtWn",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
DEF_TM_BUILTIN (BUILT_IN_TM_MEMMOVE, "_ITM_memmoveRtWt",
BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
DEF_TM_BUILTIN (BUILT_IN_TM_MEMSET, "_ITM_memsetW",
@@ -1808,24 +1808,6 @@ parameter is true if the memory access is defined in a packed struct.",
(machine_mode mode, const_tree type, int misalignment, bool is_packed),
default_builtin_support_vector_misalignment)
-/* Return the builtin decl needed to load a vector of TYPE. */
-DEFHOOK
-(builtin_tm_load,
- "This hook should return the built-in decl needed to load a vector of the "
- "given type within a transaction.",
- tree,
- (tree),
- default_builtin_tm_load_store)
-
-/* Return the builtin decl needed to store a vector of TYPE. */
-DEFHOOK
-(builtin_tm_store,
- "This hook should return the built-in decl needed to store a vector of the "
- "given type within a transaction.",
- tree,
- (tree),
- default_builtin_tm_load_store)
-
/* Returns the preferred mode for SIMD operations for the specified
scalar mode. */
DEFHOOK
@@ -12,4 +12,4 @@ void f()
}
}
-/* { dg-final { scan-tree-dump-times "memmoveRtWt \\\(&large_global," 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "memcpyRnWt \\\(&large_global," 1 "tmmark" } } */
@@ -18,4 +18,4 @@ int f()
return lala.x[i];
}
-/* { dg-final { scan-tree-dump-times "memmoveRtWt \\\(.*, &lacopy" 1 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "memcpyRtWn \\\(.*, &lacopy" 1 "tmedge" } } */
@@ -1190,7 +1190,6 @@ static void
tm_log_emit_stmt (tree addr, gimple *stmt)
{
tree type = TREE_TYPE (addr);
- tree size = TYPE_SIZE_UNIT (type);
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
gimple *log;
enum built_in_function code = BUILT_IN_TM_LOG;
@@ -1201,43 +1200,60 @@ tm_log_emit_stmt (tree addr, gimple *stmt)
code = BUILT_IN_TM_LOG_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOG_LDOUBLE;
- else if (tree_fits_uhwi_p (size))
+ else if (TYPE_SIZE (type) != NULL
+ && tree_fits_uhwi_p (TYPE_SIZE (type)))
{
- unsigned int n = tree_to_uhwi (size);
- switch (n)
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_LOG_1;
- break;
- case 2:
- code = BUILT_IN_TM_LOG_2;
- break;
- case 4:
- code = BUILT_IN_TM_LOG_4;
- break;
- case 8:
- code = BUILT_IN_TM_LOG_8;
- break;
- default:
- code = BUILT_IN_TM_LOG;
- if (TREE_CODE (type) == VECTOR_TYPE)
+ switch (type_size)
{
- if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
- code = BUILT_IN_TM_LOG_M64;
- else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
- code = BUILT_IN_TM_LOG_M128;
- else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
- code = BUILT_IN_TM_LOG_M256;
+ case 64:
+ code = BUILT_IN_TM_LOG_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_LOG_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_LOG_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 1:
+ code = BUILT_IN_TM_LOG_1;
+ break;
+ case 2:
+ code = BUILT_IN_TM_LOG_2;
+ break;
+ case 4:
+ code = BUILT_IN_TM_LOG_4;
+ break;
+ case 8:
+ code = BUILT_IN_TM_LOG_8;
+ break;
}
- break;
}
}
+ if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
+ code = BUILT_IN_TM_LOG;
+ tree decl = builtin_decl_explicit (code);
+
addr = gimplify_addr (&gsi, addr);
if (code == BUILT_IN_TM_LOG)
- log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
+ log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
else
- log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
+ log = gimple_build_call (decl, 1, addr);
gsi_insert_before (&gsi, log, GSI_SAME_STMT);
}
@@ -2171,44 +2187,66 @@ transaction_subcode_ior (struct tm_region *region, unsigned flags)
static gcall *
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
- enum built_in_function code = END_BUILTINS;
- tree t, type = TREE_TYPE (rhs), decl;
+ tree t, type = TREE_TYPE (rhs);
gcall *gcall;
+ built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_LOAD_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_LOAD_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOAD_LDOUBLE;
- else if (TYPE_SIZE_UNIT (type) != NULL
- && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ else
{
- switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
+ if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
+ return NULL;
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_LOAD_1;
- break;
- case 2:
- code = BUILT_IN_TM_LOAD_2;
- break;
- case 4:
- code = BUILT_IN_TM_LOAD_4;
- break;
- case 8:
- code = BUILT_IN_TM_LOAD_8;
- break;
+ switch (type_size)
+ {
+ case 64:
+ code = BUILT_IN_TM_LOAD_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_LOAD_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_LOAD_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 8:
+ code = BUILT_IN_TM_LOAD_1;
+ break;
+ case 16:
+ code = BUILT_IN_TM_LOAD_2;
+ break;
+ case 32:
+ code = BUILT_IN_TM_LOAD_4;
+ break;
+ case 64:
+ code = BUILT_IN_TM_LOAD_8;
+ break;
+ default:
+ return NULL;
+ }
}
}
- if (code == END_BUILTINS)
- {
- decl = targetm.vectorize.builtin_tm_load (type);
- if (!decl)
- return NULL;
- }
- else
- decl = builtin_decl_explicit (code);
+ tree decl = builtin_decl_explicit (code);
+ gcc_assert (decl);
t = gimplify_addr (gsi, rhs);
gcall = gimple_build_call (decl, 1, t);
@@ -2243,44 +2281,66 @@ build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
static gcall *
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
- enum built_in_function code = END_BUILTINS;
tree t, fn, type = TREE_TYPE (rhs), simple_type;
gcall *gcall;
+ built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_STORE_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_STORE_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_STORE_LDOUBLE;
- else if (TYPE_SIZE_UNIT (type) != NULL
- && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ else
{
- switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
+ if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
+ return NULL;
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_STORE_1;
- break;
- case 2:
- code = BUILT_IN_TM_STORE_2;
- break;
- case 4:
- code = BUILT_IN_TM_STORE_4;
- break;
- case 8:
- code = BUILT_IN_TM_STORE_8;
- break;
+ switch (type_size)
+ {
+ case 64:
+ code = BUILT_IN_TM_STORE_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_STORE_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_STORE_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 8:
+ code = BUILT_IN_TM_STORE_1;
+ break;
+ case 16:
+ code = BUILT_IN_TM_STORE_2;
+ break;
+ case 32:
+ code = BUILT_IN_TM_STORE_4;
+ break;
+ case 64:
+ code = BUILT_IN_TM_STORE_8;
+ break;
+ default:
+ return NULL;
+ }
}
}
- if (code == END_BUILTINS)
- {
- fn = targetm.vectorize.builtin_tm_store (type);
- if (!fn)
- return NULL;
- }
- else
- fn = builtin_decl_explicit (code);
+ fn = builtin_decl_explicit (code);
+ gcc_assert (fn);
simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
@@ -2342,63 +2402,80 @@ expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
return;
}
+ if (load_p)
+ transaction_subcode_ior (region, GTMA_HAVE_LOAD);
+ if (store_p)
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+
// Remove original load/store statement.
gsi_remove (gsi, true);
+ // Attempt to use a simple load/store helper function.
if (load_p && !store_p)
- {
- transaction_subcode_ior (region, GTMA_HAVE_LOAD);
- gcall = build_tm_load (loc, lhs, rhs, gsi);
- }
+ gcall = build_tm_load (loc, lhs, rhs, gsi);
else if (store_p && !load_p)
- {
- transaction_subcode_ior (region, GTMA_HAVE_STORE);
- gcall = build_tm_store (loc, lhs, rhs, gsi);
- }
+ gcall = build_tm_store (loc, lhs, rhs, gsi);
+
+ // If gcall has not been set, then we do not have a simple helper
+ // function available for the type. This may be true of larger
+ // structures, vectors, and non-standard float types.
if (!gcall)
{
- tree lhs_addr, rhs_addr, tmp;
-
- if (load_p)
- transaction_subcode_ior (region, GTMA_HAVE_LOAD);
- if (store_p)
- transaction_subcode_ior (region, GTMA_HAVE_STORE);
+ tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
- /* ??? Figure out if there's any possible overlap between the LHS
- and the RHS and if not, use MEMCPY. */
+ // If this is a type that we couldn't handle above, but it's
+ // in a register, we must spill it to memory for the copy.
+ if (is_gimple_reg (lhs))
+ {
+ ltmp = create_tmp_var (TREE_TYPE (lhs));
+ lhs_addr = build_fold_addr_expr (ltmp);
+ }
+ else
+ lhs_addr = gimplify_addr (gsi, lhs);
+ if (is_gimple_reg (rhs))
+ {
+ tree rtmp = create_tmp_var (TREE_TYPE (rhs));
+ rhs_addr = build_fold_addr_expr (rtmp);
+ gcall = gimple_build_assign (rtmp, rhs);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
+ else
+ rhs_addr = gimplify_addr (gsi, rhs);
- if (load_p && is_gimple_reg (lhs))
+ // Choose the appropriate memory transfer function.
+ if (load_p && store_p)
+ {
+ // ??? Figure out if there's any possible overlap between
+ // the LHS and the RHS and if not, use MEMCPY.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
+ }
+ else if (load_p)
{
- tmp = create_tmp_var (TREE_TYPE (lhs));
- lhs_addr = build_fold_addr_expr (tmp);
+ // Note that the store is non-transactional and cannot overlap.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
}
else
{
- tmp = NULL_TREE;
- lhs_addr = gimplify_addr (gsi, lhs);
+ // Note that the load is non-transactional and cannot overlap.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
}
- rhs_addr = gimplify_addr (gsi, rhs);
- gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
- 3, lhs_addr, rhs_addr,
+
+ gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
- if (tmp)
+ if (ltmp)
{
- gcall = gimple_build_assign (lhs, tmp);
+ gcall = gimple_build_assign (lhs, ltmp);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
}
- /* Now that we have the load/store in its instrumented form, add
- thread private addresses to the log if applicable. */
+ // Now that we have the load/store in its instrumented form, add
+ // thread private addresses to the log if applicable.
if (!store_p)
requires_barrier (region->entry_block, lhs, gcall);
-
- // The calls to build_tm_{store,load} above inserted the instrumented
- // call into the stream.
- // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
@@ -10330,6 +10330,143 @@ local_define_builtin (const char *name, tree type, enum built_in_function code,
set_builtin_decl (code, decl, true);
}
+/* A subroutine of build_tm_vector_builtins. Define a builtin with
+ all of the appropriate attributes. */
+static void
+tm_define_builtin (const char *name, tree type, built_in_function code,
+ tree decl_attrs, tree type_attrs)
+{
+ tree decl = add_builtin_function (name, type, code, BUILT_IN_NORMAL,
+ name + strlen ("__builtin_"), decl_attrs);
+ decl_attributes (&TREE_TYPE (decl), type_attrs, ATTR_FLAG_BUILT_IN);
+ set_builtin_decl (code, decl, true);
+}
+
+/* A subroutine of build_tm_vector_builtins. Find a supported vector
+ type VECTOR_BITS wide with inner mode ELEM_MODE. */
+static tree
+find_tm_vector_type (unsigned vector_bits, machine_mode elem_mode)
+{
+ unsigned elem_bits = GET_MODE_BITSIZE (elem_mode);
+ unsigned nunits = vector_bits / elem_bits;
+
+ gcc_assert (elem_bits * nunits == vector_bits);
+
+ machine_mode vector_mode = mode_for_vector (elem_mode, nunits);
+ if (!VECTOR_MODE_P (vector_mode)
+ || !targetm.vector_mode_supported_p (vector_mode))
+ return NULL_TREE;
+
+ tree innertype = lang_hooks.types.type_for_mode (elem_mode, 0);
+ return build_vector_type_for_mode (innertype, vector_mode);
+}
+
+/* A subroutine of build_common_builtin_nodes. Define TM builtins for
+ vector types. This is done after the target hook, so that the target
+ has a chance to override these. */
+static void
+build_tm_vector_builtins (void)
+{
+ tree vtype, pvtype, ftype, decl;
+ tree attrs_load, attrs_type_load;
+ tree attrs_store, attrs_type_store;
+ tree attrs_log, attrs_type_log;
+
+ /* Do nothing if TM is turned off, either with switch or
+ not enabled in the language. */
+ if (!flag_tm || !builtin_decl_explicit_p (BUILT_IN_TM_LOAD_1))
+ return;
+
+ /* Use whatever attributes a normal TM load has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_LOAD_1);
+ attrs_load = DECL_ATTRIBUTES (decl);
+ attrs_type_load = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ /* Use whatever attributes a normal TM store has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_STORE_1);
+ attrs_store = DECL_ATTRIBUTES (decl);
+ attrs_type_store = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ /* Use whatever attributes a normal TM log has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_LOG);
+ attrs_log = DECL_ATTRIBUTES (decl);
+ attrs_type_log = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+
+ /* By default, 64 bit vectors go through the long long helpers. */
+
+ /* If a 128-bit vector is supported, declare those builtins. */
+ if (!builtin_decl_declared_p (BUILT_IN_TM_STORE_M128)
+ && ((vtype = find_tm_vector_type (128, SImode))
+ || (vtype = find_tm_vector_type (128, SFmode))))
+ {
+ pvtype = build_pointer_type (vtype);
+
+ ftype = build_function_type_list (void_type_node, pvtype, vtype, NULL);
+ tm_define_builtin ("__builtin__ITM_WM128", ftype,
+ BUILT_IN_TM_STORE_M128,
+ attrs_store, attrs_type_store);
+ tm_define_builtin ("__builtin__ITM_WaRM128", ftype,
+ BUILT_IN_TM_STORE_WAR_M128,
+ attrs_store, attrs_type_store);
+ tm_define_builtin ("__builtin__ITM_WaWM128", ftype,
+ BUILT_IN_TM_STORE_WAW_M128,
+ attrs_store, attrs_type_store);
+
+ ftype = build_function_type_list (vtype, pvtype, NULL);
+ tm_define_builtin ("__builtin__ITM_RM128", ftype,
+ BUILT_IN_TM_LOAD_M128,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RaRM128", ftype,
+ BUILT_IN_TM_LOAD_RAR_M128,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RaWM128", ftype,
+ BUILT_IN_TM_LOAD_RAW_M128,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RfWM128", ftype,
+ BUILT_IN_TM_LOAD_RFW_M128,
+ attrs_load, attrs_type_load);
+
+ ftype = build_function_type_list (void_type_node, pvtype, NULL);
+ tm_define_builtin ("__builtin__ITM_LM128", ftype,
+ BUILT_IN_TM_LOG_M128, attrs_log, attrs_type_log);
+ }
+
+ /* If a 256-bit vector is supported, declare those builtins. */
+ if (!builtin_decl_declared_p (BUILT_IN_TM_STORE_M256)
+ && ((vtype = find_tm_vector_type (256, SImode))
+ || (vtype = find_tm_vector_type (256, SFmode))))
+ {
+ pvtype = build_pointer_type (vtype);
+
+ ftype = build_function_type_list (void_type_node, pvtype, vtype, NULL);
+ tm_define_builtin ("__builtin__ITM_WM256", ftype,
+ BUILT_IN_TM_STORE_M256,
+ attrs_store, attrs_type_store);
+ tm_define_builtin ("__builtin__ITM_WaRM256", ftype,
+ BUILT_IN_TM_STORE_WAR_M256,
+ attrs_store, attrs_type_store);
+ tm_define_builtin ("__builtin__ITM_WaWM256", ftype,
+ BUILT_IN_TM_STORE_WAW_M256,
+ attrs_store, attrs_type_store);
+
+ ftype = build_function_type_list (vtype, pvtype, NULL);
+ tm_define_builtin ("__builtin__ITM_RM256", ftype,
+ BUILT_IN_TM_LOAD_M256,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RaRM256", ftype,
+ BUILT_IN_TM_LOAD_RAR_M256,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RaWM256", ftype,
+ BUILT_IN_TM_LOAD_RAW_M256,
+ attrs_load, attrs_type_load);
+ tm_define_builtin ("__builtin__ITM_RfWM256", ftype,
+ BUILT_IN_TM_LOAD_RFW_M256,
+ attrs_load, attrs_type_load);
+
+ ftype = build_function_type_list (void_type_node, pvtype, NULL);
+ tm_define_builtin ("__builtin__ITM_LM256", ftype,
+ BUILT_IN_TM_LOG_M256, attrs_log, attrs_type_log);
+ }
+}
+
/* Call this function after instantiating all builtins that the language
front end cares about. This will build the rest of the builtins
and internal functions that are relied upon by the tree optimizers and
@@ -10568,6 +10705,7 @@ build_common_builtin_nodes (void)
}
}
+ build_tm_vector_builtins ();
init_internal_fns ();
}
@@ -62,16 +62,26 @@ libitm_la_SOURCES = \
query.cc retry.cc rwlock.cc useraction.cc util.cc \
sjlj.S tls.cc method-serial.cc method-gl.cc method-ml.cc
+if ARCH_AARCH64
+libitm_la_SOURCES += vect128.cc
+endif
if ARCH_ARM
-libitm_la_SOURCES += hwcap.cc
+libitm_la_SOURCES += hwcap.cc neon.cc
+endif
+if ARCH_PPC
+libitm_la_SOURCES += vect128.cc
+vect128.lo : override CXXFLAGS += -maltivec
+endif
+if ARCH_S390
+libitm_la_SOURCES += vect128.cc
+vect128.lo : override CXXFLAGS += -march=z13
endif
if ARCH_X86
-libitm_la_SOURCES += x86_sse.cc x86_avx.cc
-# Make sure -msse is appended at the end.
-x86_sse.lo : override CXXFLAGS += -msse
+libitm_la_SOURCES += vect64.cc vect128.cc x86_avx.cc
+vect64.lo : override CXXFLAGS += -msse
+vect128.lo : override CXXFLAGS += -msse
endif
if ARCH_X86_AVX
-# Make sure -mavx is appended at the end.
x86_avx.lo : override CXXFLAGS += -mavx
endif
new file mode 100644
@@ -0,0 +1,3 @@
+#ifdef __ARM_NEON
+#include <vect128.cc>
+#endif
@@ -281,7 +281,10 @@ else
fi
AC_SUBST(link_itm)
+AM_CONDITIONAL([ARCH_AARCH64], [test "$ARCH" = aarch64])
AM_CONDITIONAL([ARCH_ARM], [test "$ARCH" = arm])
+AM_CONDITIONAL([ARCH_PPC], [test "$ARCH" = powerpc -o "$ARCH" = powerpc64])
+AM_CONDITIONAL([ARCH_S390], [test "$ARCH" = s390 -o "$ARCH" = s390x])
AM_CONDITIONAL([ARCH_X86], [test "$ARCH" = x86])
AM_CONDITIONAL([ARCH_X86_AVX], [test "$libitm_cv_as_avx" = yes])
AM_CONDITIONAL([ARCH_FUTEX], [test $enable_linux_futex = yes])
@@ -232,7 +232,11 @@ ITM_LOG(CE)
ITM_BARRIERS(M256)
ITM_LOG(M256)
# endif
-#endif /* i386 */
+#else
+ typedef int _ITM_TYPE_M128 __attribute__((vector_size(16), may_alias));
+ ITM_BARRIERS(M128)
+ ITM_LOG(M128)
+#endif
#undef ITM_BARRIERS
#undef ITM_LOG
similarity index 90%
rename from libitm/config/x86/x86_sse.cc
rename to libitm/vect128.cc
@@ -27,16 +27,9 @@
// ??? Use memcpy for now, until we have figured out how to best instantiate
// these loads/stores.
-CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M64, GTM::abi_disp()->, )
CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M128, GTM::abi_disp()->, )
void ITM_REGPARM
-_ITM_LM64 (const _ITM_TYPE_M64 *ptr)
-{
- GTM::GTM_LB (ptr, sizeof (*ptr));
-}
-
-void ITM_REGPARM
_ITM_LM128 (const _ITM_TYPE_M128 *ptr)
{
GTM::GTM_LB (ptr, sizeof (*ptr));
new file mode 100644
@@ -0,0 +1,36 @@
+/* Copyright (C) 2009-2016 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+#include "dispatch.h"
+
+// ??? Use memcpy for now, until we have figured out how to best instantiate
+// these loads/stores.
+CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M64, GTM::abi_disp()->, )
+
+void ITM_REGPARM
+_ITM_LM64 (const _ITM_TYPE_M64 *ptr)
+{
+ GTM::GTM_LB (ptr, sizeof (*ptr));
+}