@@ -721,6 +721,7 @@ instrument_mem_region_access (tree base, tree len,
gimple_stmt_iterator gsi = *iter;
tree pointed_to_type = TREE_TYPE (TREE_TYPE (base));
+ basic_block fallthrough_bb = NULL, then_bb = NULL;
if (!is_gimple_constant (len))
{
/* So, the length of the memory area to asan-protect is
@@ -733,7 +734,6 @@ instrument_mem_region_access (tree base, tree len,
}
// falltrough instructions, starting with *ITER. */
- basic_block fallthrough_bb, then_bb;
gimple g = gimple_build_cond (NE_EXPR,
len,
build_int_cst (TREE_TYPE (len), 0),
@@ -747,22 +747,22 @@ instrument_mem_region_access (tree base, tree len,
/* The 'then block' of the 'if (len != 0) condition is where
we'll generate the asan instrumentation code now. */
gsi = gsi_start_bb (then_bb);
-
- /* Instrument the beginning of the memory region to be accessed,
- and arrange for the rest of the intrumentation code to be
- inserted in the then block *after* the current gsi. */
- build_check_stmt (base, &gsi, location, is_store,
- int_size_in_bytes (pointed_to_type));
- gsi = gsi_last_bb (then_bb);
}
+
+ /* Instrument the beginning of the memory region to be accessed,
+ and arrange for the rest of the intrumentation code to be
+ inserted in the then block *after* the current gsi. */
+ build_check_stmt (base, &gsi, location, is_store, 1);
+
+ if (then_bb)
+ /* We are in the case where the length of the region is not
+ constant; so instrumentation code is being generated in the
+ 'then block' of the 'if (len != 0) condition. Let's arrange
+ for the subsequent instrumentation statements to go in the
+ 'then block'. */
+ gsi = gsi_last_bb (then_bb);
else
- {
- /* Instrument the beginning of the memory region to be
- accessed. */
- build_check_stmt (base, iter, location, is_store,
- int_size_in_bytes (pointed_to_type));
- gsi = *iter;
- }
+ *iter = gsi;
/* We want to instrument the access at the end of the memory region,
which is at (base + len - 1). */
Is that any better?
> > + /* instrument access at _2; */
> > + gsi_next (&gsi);
> > + tree end = gimple_assign_lhs (region_end);
> > + build_check_stmt (end, &gsi, location, is_store,
>
> Can't you just pass gimple_assign_lhs (region_end) as first
> argument to build_check_stmt? And again, I think you want
> to test a single byte there, not more.
Done.
>
> > + int_size_in_bytes (TREE_TYPE (end)));
>
> > + switch (DECL_FUNCTION_CODE (callee))
> > + {
> > + /* (s, s, n) style memops. */
> > + case BUILT_IN_BCMP:
> > + case BUILT_IN_MEMCMP:
> > + /* These cannot be safely instrumented as their length parameter
> > + is just a mere limit.
> > +
> > + case BUILT_IN_STRNCASECMP:
> > + case BUILT_IN_STRNCMP: */
>
> I think these comments make the code less readable instead of more readable,
> I'd move the comments why something can't be instrumented to the default:
> case.
Fixed.
> On the other side, you IMHO want to handle here also __atomic_* and
> __sync_* builtins (not by using instrument_mem_region_access, but
> just instrument_derefs
Updated in the patch below.
> (if the argument is ADDR_EXPR, on what it points
> to, otherwise if it is SSA_NAME, on MEM_REF created for it).
I updated instrument_derefs to handle ADDR_EXPR and SSA_NAME. While
doing this, it occurred to me that the bit field detection code in
there looked not to be quite right as it was considering the MEM_REF
access foo[10] like a bit field access in the example:
void
foo ()
{
char foo[10] = {0};
__sync_fetch_and_add (&foo[10], 1);
}
Here is the updated patch.
gcc/
* asan.c (insert_if_then_before_iter, instrument_mem_region_access,
maybe_instrument_builtin_call, maybe_instrument_call): New static
functions.
(instrument_assignment): Factorize from ...
(transform_statements): ... here. Use maybe_instrument_call to
instrument builtin function calls as well.
(instrument_derefs): Support ADDR_EXPR and SSA_NAME nodes. Fix
detection of bit-field access.
---
gcc/asan.c | 522 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 513 insertions(+), 9 deletions(-)
@@ -466,6 +466,40 @@ create_cond_insert_point_before_iter (gimple_stmt_iterator *iter,
return gsi_last_bb (cond_bb);
}
+/* Insert an if condition followed by a 'then block' right before the
+ statement pointed to by ITER. The fallthrough block -- which is the
+ else block of the condition as well as the destination of the
+ outcoming edge of the 'then block' -- starts with the statement
+ pointed to by ITER.
+
+ COND is the condition of the if.
+
+ If THEN_MORE_LIKELY_P is true, the probability of the edge to the
+ 'then block' is higher than the probability of the edge to the
+ fallthrough block.
+
+ Upon completion of the function, *THEN_BB is set to the newly
+ inserted 'then block' and similarly, *FALLTHROUGH_BB is set to the
+ fallthrough block.
+
+ *ITER is adjusted to still point to the same statement it was
+ pointing to initially. */
+
+static void
+insert_if_then_before_iter (gimple cond,
+ gimple_stmt_iterator *iter,
+ bool then_more_likely_p,
+ basic_block *then_bb,
+ basic_block *fallthrough_bb)
+{
+ gimple_stmt_iterator cond_insert_point =
+ create_cond_insert_point_before_iter (iter,
+ then_more_likely_p,
+ then_bb,
+ fallthrough_bb);
+ gsi_insert_after (&cond_insert_point, cond, GSI_NEW_STMT);
+}
+
/* Instrument the memory access instruction BASE. Insert new
statements before ITER.
@@ -626,7 +660,7 @@ build_check_stmt (tree base, gimple_stmt_iterator *iter,
/* If T represents a memory access, add instrumentation code before ITER.
LOCATION is source code location.
- IS_STORE is either 1 (for a store) or 0 (for a load). */
+ IS_STORE is either TRUE (for a store) or FALSE (for a load). */
static void
instrument_derefs (gimple_stmt_iterator *iter, tree t,
@@ -638,6 +672,13 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
type = TREE_TYPE (t);
switch (TREE_CODE (t))
{
+ case ADDR_EXPR:
+ t = TREE_OPERAND (t, 0);
+ type = TREE_TYPE (t);
+ break;
+ case SSA_NAME:
+ instrument_derefs (iter, SSA_NAME_VAR (t), location, is_store);
+ return;
case ARRAY_REF:
case COMPONENT_REF:
case INDIRECT_REF:
@@ -661,13 +702,477 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
int volatilep = 0, unsignedp = 0;
get_inner_reference (t, &bitsize, &bitpos, &offset,
&mode, &unsignedp, &volatilep, false);
- if (bitpos != 0 || bitsize != size_in_bytes * BITS_PER_UNIT)
+ if (bitpos % BITS_PER_UNIT || bitsize != size_in_bytes * BITS_PER_UNIT)
return;
base = build_fold_addr_expr (t);
build_check_stmt (base, iter, location, is_store, size_in_bytes);
}
+/* Instrument an access to a contiguous memory region that starts at
+ the address pointed to by BASE, over a length of LEN (expressed in
+ the sizeof (*BASE) bytes). ITER points to the instruction before
+ which the instrumentation instructions must be inserted. LOCATION
+ is the source location that the instrumentation instructions must
+ have. If IS_STORE is true, then the memory access is a store;
+ otherwise, it's a load. */
+
+static void
+instrument_mem_region_access (tree base, tree len,
+ gimple_stmt_iterator *iter,
+ location_t location, bool is_store)
+{
+ if (integer_zerop (len))
+ return;
+
+ gimple_stmt_iterator gsi = *iter;
+
+ basic_block fallthrough_bb = NULL, then_bb = NULL;
+ if (!is_gimple_constant (len))
+ {
+ /* So, the length of the memory area to asan-protect is
+ non-constant. Let's guard the generated instrumentation code
+ like:
+
+ if (len != 0)
+ {
+ //asan instrumentation code goes here.
+ }
+ // falltrough instructions, starting with *ITER. */
+
+ gimple g = gimple_build_cond (NE_EXPR,
+ len,
+ build_int_cst (TREE_TYPE (len), 0),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, location);
+ insert_if_then_before_iter (g, iter, /*then_more_likely_p=*/true,
+ &then_bb, &fallthrough_bb);
+ /* Note that fallthrough_bb starts with the statement that was
+ pointed to by ITER. */
+
+ /* The 'then block' of the 'if (len != 0) condition is where
+ we'll generate the asan instrumentation code now. */
+ gsi = gsi_start_bb (then_bb);
+ }
+
+ /* Instrument the beginning of the memory region to be accessed,
+ and arrange for the rest of the intrumentation code to be
+ inserted in the then block *after* the current gsi. */
+ build_check_stmt (base, &gsi, location, is_store, 1);
+
+ if (then_bb)
+ /* We are in the case where the length of the region is not
+ constant; so instrumentation code is being generated in the
+ 'then block' of the 'if (len != 0) condition. Let's arrange
+ for the subsequent instrumentation statements to go in the
+ 'then block'. */
+ gsi = gsi_last_bb (then_bb);
+ else
+ *iter = gsi;
+
+ /* We want to instrument the access at the end of the memory region,
+ which is at (base + len - 1). */
+
+ /* offset = len - 1; */
+ len = unshare_expr (len);
+ gimple offset =
+ gimple_build_assign_with_ops (TREE_CODE (len),
+ make_ssa_name (TREE_TYPE (len), NULL),
+ len, NULL);
+ gimple_set_location (offset, location);
+ gsi_insert_before (&gsi, offset, GSI_NEW_STMT);
+
+ offset =
+ gimple_build_assign_with_ops (MINUS_EXPR,
+ make_ssa_name (size_type_node, NULL),
+ gimple_assign_lhs (offset),
+ build_int_cst (size_type_node, 1));
+ gimple_set_location (offset, location);
+ gsi_insert_after (&gsi, offset, GSI_NEW_STMT);
+
+ /* _1 = base; */
+ base = unshare_expr (base);
+ gimple region_end =
+ gimple_build_assign_with_ops (TREE_CODE (base),
+ make_ssa_name (TREE_TYPE (base), NULL),
+ base, NULL);
+ gimple_set_location (region_end, location);
+ gsi_insert_after (&gsi, region_end, GSI_NEW_STMT);
+
+ /* _2 = _1 + offset; */
+ region_end =
+ gimple_build_assign_with_ops (POINTER_PLUS_EXPR,
+ make_ssa_name (TREE_TYPE (base), NULL),
+ gimple_assign_lhs (region_end),
+ gimple_assign_lhs (offset));
+ gimple_set_location (region_end, location);
+ gsi_insert_after (&gsi, region_end, GSI_NEW_STMT);
+
+ /* instrument access at _2; */
+ gsi_next (&gsi);
+ build_check_stmt (gimple_assign_lhs (region_end),
+ &gsi, location, is_store, 1);
+}
+
+/* If the statement pointed to by the iterator ITER is a call to a
+ builtin memory access function, instrumented it and return TRUE.
+ Otherwise, return false. */
+
+static bool
+maybe_instrument_builtin_call (gimple_stmt_iterator *iter)
+{
+ gimple call = gsi_stmt (*iter);
+ location_t location = gimple_location (call);
+
+ if (!is_gimple_call (call))
+ return false;
+
+ tree callee = gimple_call_fndecl (call);
+
+ if (!is_builtin_fn (callee)
+ || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
+ return false;
+
+ tree source0 = NULL_TREE, source1 = NULL_TREE,
+ dest = NULL_TREE, len = NULL_TREE;
+ bool is_store = true;
+
+ switch (DECL_FUNCTION_CODE (callee))
+ {
+ /* (s, s, n) style memops. */
+ case BUILT_IN_BCMP:
+ case BUILT_IN_MEMCMP:
+ len = gimple_call_arg (call, 2);
+ source0 = gimple_call_arg (call, 0);
+ source1 = gimple_call_arg (call, 1);
+ break;
+
+ /* (src, dest, n) style memops. */
+ case BUILT_IN_BCOPY:
+ len = gimple_call_arg (call, 2);
+ source0 = gimple_call_arg (call, 0);
+ dest = gimple_call_arg (call, 2);
+ break;
+
+ /* (dest, src, n) style memops. */
+ case BUILT_IN_MEMCPY:
+ case BUILT_IN_MEMCPY_CHK:
+ case BUILT_IN_MEMMOVE:
+ case BUILT_IN_MEMMOVE_CHK:
+ case BUILT_IN_MEMPCPY:
+ case BUILT_IN_MEMPCPY_CHK:
+ dest = gimple_call_arg (call, 0);
+ source0 = gimple_call_arg (call, 1);
+ len = gimple_call_arg (call, 2);
+ break;
+
+ /* (dest, n) style memops. */
+ case BUILT_IN_BZERO:
+ dest = gimple_call_arg (call, 0);
+ len = gimple_call_arg (call, 1);
+ break;
+
+ /* (dest, x, n) style memops*/
+ case BUILT_IN_MEMSET:
+ case BUILT_IN_MEMSET_CHK:
+ dest = gimple_call_arg (call, 0);
+ len = gimple_call_arg (call, 2);
+ break;
+
+ /* And now the __atomic* and __sync builtins.
+ These are handled differently from the classical memory memory
+ access builtins above. */
+
+ case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
+ case BUILT_IN_ATOMIC_IS_LOCK_FREE:
+ is_store = false;
+
+ /* Fall through. */
+
+ case BUILT_IN_SYNC_FETCH_AND_ADD_N:
+ case BUILT_IN_SYNC_FETCH_AND_ADD_1:
+ case BUILT_IN_SYNC_FETCH_AND_ADD_2:
+ case BUILT_IN_SYNC_FETCH_AND_ADD_4:
+ case BUILT_IN_SYNC_FETCH_AND_ADD_8:
+ case BUILT_IN_SYNC_FETCH_AND_ADD_16:
+
+ case BUILT_IN_SYNC_FETCH_AND_SUB_N:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_1:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_2:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_4:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_8:
+ case BUILT_IN_SYNC_FETCH_AND_SUB_16:
+
+ case BUILT_IN_SYNC_FETCH_AND_OR_N:
+ case BUILT_IN_SYNC_FETCH_AND_OR_1:
+ case BUILT_IN_SYNC_FETCH_AND_OR_2:
+ case BUILT_IN_SYNC_FETCH_AND_OR_4:
+ case BUILT_IN_SYNC_FETCH_AND_OR_8:
+ case BUILT_IN_SYNC_FETCH_AND_OR_16:
+
+ case BUILT_IN_SYNC_FETCH_AND_AND_N:
+ case BUILT_IN_SYNC_FETCH_AND_AND_1:
+ case BUILT_IN_SYNC_FETCH_AND_AND_2:
+ case BUILT_IN_SYNC_FETCH_AND_AND_4:
+ case BUILT_IN_SYNC_FETCH_AND_AND_8:
+ case BUILT_IN_SYNC_FETCH_AND_AND_16:
+
+ case BUILT_IN_SYNC_FETCH_AND_XOR_N:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_1:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_2:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_4:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_8:
+ case BUILT_IN_SYNC_FETCH_AND_XOR_16:
+
+ case BUILT_IN_SYNC_FETCH_AND_NAND_N:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_1:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_2:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_4:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_8:
+
+ case BUILT_IN_SYNC_ADD_AND_FETCH_N:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_1:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_2:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_4:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_8:
+ case BUILT_IN_SYNC_ADD_AND_FETCH_16:
+
+ case BUILT_IN_SYNC_SUB_AND_FETCH_N:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_1:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_2:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_4:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_8:
+ case BUILT_IN_SYNC_SUB_AND_FETCH_16:
+
+ case BUILT_IN_SYNC_OR_AND_FETCH_N:
+ case BUILT_IN_SYNC_OR_AND_FETCH_1:
+ case BUILT_IN_SYNC_OR_AND_FETCH_2:
+ case BUILT_IN_SYNC_OR_AND_FETCH_4:
+ case BUILT_IN_SYNC_OR_AND_FETCH_8:
+ case BUILT_IN_SYNC_OR_AND_FETCH_16:
+
+ case BUILT_IN_SYNC_AND_AND_FETCH_N:
+ case BUILT_IN_SYNC_AND_AND_FETCH_1:
+ case BUILT_IN_SYNC_AND_AND_FETCH_2:
+ case BUILT_IN_SYNC_AND_AND_FETCH_4:
+ case BUILT_IN_SYNC_AND_AND_FETCH_8:
+ case BUILT_IN_SYNC_AND_AND_FETCH_16:
+
+ case BUILT_IN_SYNC_XOR_AND_FETCH_N:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_1:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_2:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_4:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_8:
+ case BUILT_IN_SYNC_XOR_AND_FETCH_16:
+
+ case BUILT_IN_SYNC_NAND_AND_FETCH_N:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_1:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_2:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_4:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_8:
+
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
+ case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
+
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8:
+ case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16:
+
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_N:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8:
+ case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16:
+
+ case BUILT_IN_SYNC_LOCK_RELEASE_N:
+ case BUILT_IN_SYNC_LOCK_RELEASE_1:
+ case BUILT_IN_SYNC_LOCK_RELEASE_2:
+ case BUILT_IN_SYNC_LOCK_RELEASE_4:
+ case BUILT_IN_SYNC_LOCK_RELEASE_8:
+ case BUILT_IN_SYNC_LOCK_RELEASE_16:
+
+ case BUILT_IN_ATOMIC_TEST_AND_SET:
+ case BUILT_IN_ATOMIC_CLEAR:
+ case BUILT_IN_ATOMIC_EXCHANGE:
+ case BUILT_IN_ATOMIC_EXCHANGE_N:
+ case BUILT_IN_ATOMIC_EXCHANGE_1:
+ case BUILT_IN_ATOMIC_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_EXCHANGE_16:
+
+ case BUILT_IN_ATOMIC_LOAD:
+ case BUILT_IN_ATOMIC_LOAD_N:
+ case BUILT_IN_ATOMIC_LOAD_1:
+ case BUILT_IN_ATOMIC_LOAD_2:
+ case BUILT_IN_ATOMIC_LOAD_4:
+ case BUILT_IN_ATOMIC_LOAD_8:
+ case BUILT_IN_ATOMIC_LOAD_16:
+
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
+
+ case BUILT_IN_ATOMIC_STORE:
+ case BUILT_IN_ATOMIC_STORE_N:
+ case BUILT_IN_ATOMIC_STORE_1:
+ case BUILT_IN_ATOMIC_STORE_2:
+ case BUILT_IN_ATOMIC_STORE_4:
+ case BUILT_IN_ATOMIC_STORE_8:
+ case BUILT_IN_ATOMIC_STORE_16:
+
+ case BUILT_IN_ATOMIC_ADD_FETCH_N:
+ case BUILT_IN_ATOMIC_ADD_FETCH_1:
+ case BUILT_IN_ATOMIC_ADD_FETCH_2:
+ case BUILT_IN_ATOMIC_ADD_FETCH_4:
+ case BUILT_IN_ATOMIC_ADD_FETCH_8:
+ case BUILT_IN_ATOMIC_ADD_FETCH_16:
+
+ case BUILT_IN_ATOMIC_SUB_FETCH_N:
+ case BUILT_IN_ATOMIC_SUB_FETCH_1:
+ case BUILT_IN_ATOMIC_SUB_FETCH_2:
+ case BUILT_IN_ATOMIC_SUB_FETCH_4:
+ case BUILT_IN_ATOMIC_SUB_FETCH_8:
+ case BUILT_IN_ATOMIC_SUB_FETCH_16:
+
+ case BUILT_IN_ATOMIC_AND_FETCH_N:
+ case BUILT_IN_ATOMIC_AND_FETCH_1:
+ case BUILT_IN_ATOMIC_AND_FETCH_2:
+ case BUILT_IN_ATOMIC_AND_FETCH_4:
+ case BUILT_IN_ATOMIC_AND_FETCH_8:
+ case BUILT_IN_ATOMIC_AND_FETCH_16:
+
+ case BUILT_IN_ATOMIC_NAND_FETCH_N:
+ case BUILT_IN_ATOMIC_NAND_FETCH_1:
+ case BUILT_IN_ATOMIC_NAND_FETCH_2:
+ case BUILT_IN_ATOMIC_NAND_FETCH_4:
+ case BUILT_IN_ATOMIC_NAND_FETCH_8:
+ case BUILT_IN_ATOMIC_NAND_FETCH_16:
+
+ case BUILT_IN_ATOMIC_XOR_FETCH_N:
+ case BUILT_IN_ATOMIC_XOR_FETCH_1:
+ case BUILT_IN_ATOMIC_XOR_FETCH_2:
+ case BUILT_IN_ATOMIC_XOR_FETCH_4:
+ case BUILT_IN_ATOMIC_XOR_FETCH_8:
+ case BUILT_IN_ATOMIC_XOR_FETCH_16:
+
+ case BUILT_IN_ATOMIC_OR_FETCH_N:
+ case BUILT_IN_ATOMIC_OR_FETCH_1:
+ case BUILT_IN_ATOMIC_OR_FETCH_2:
+ case BUILT_IN_ATOMIC_OR_FETCH_4:
+ case BUILT_IN_ATOMIC_OR_FETCH_8:
+ case BUILT_IN_ATOMIC_OR_FETCH_16:
+
+ case BUILT_IN_ATOMIC_FETCH_ADD_N:
+ case BUILT_IN_ATOMIC_FETCH_ADD_1:
+ case BUILT_IN_ATOMIC_FETCH_ADD_2:
+ case BUILT_IN_ATOMIC_FETCH_ADD_4:
+ case BUILT_IN_ATOMIC_FETCH_ADD_8:
+ case BUILT_IN_ATOMIC_FETCH_ADD_16:
+
+ case BUILT_IN_ATOMIC_FETCH_SUB_N:
+ case BUILT_IN_ATOMIC_FETCH_SUB_1:
+ case BUILT_IN_ATOMIC_FETCH_SUB_2:
+ case BUILT_IN_ATOMIC_FETCH_SUB_4:
+ case BUILT_IN_ATOMIC_FETCH_SUB_8:
+ case BUILT_IN_ATOMIC_FETCH_SUB_16:
+
+ case BUILT_IN_ATOMIC_FETCH_AND_N:
+ case BUILT_IN_ATOMIC_FETCH_AND_1:
+ case BUILT_IN_ATOMIC_FETCH_AND_2:
+ case BUILT_IN_ATOMIC_FETCH_AND_4:
+ case BUILT_IN_ATOMIC_FETCH_AND_8:
+ case BUILT_IN_ATOMIC_FETCH_AND_16:
+
+ case BUILT_IN_ATOMIC_FETCH_NAND_N:
+ case BUILT_IN_ATOMIC_FETCH_NAND_1:
+ case BUILT_IN_ATOMIC_FETCH_NAND_2:
+ case BUILT_IN_ATOMIC_FETCH_NAND_4:
+ case BUILT_IN_ATOMIC_FETCH_NAND_8:
+ case BUILT_IN_ATOMIC_FETCH_NAND_16:
+
+ case BUILT_IN_ATOMIC_FETCH_XOR_N:
+ case BUILT_IN_ATOMIC_FETCH_XOR_1:
+ case BUILT_IN_ATOMIC_FETCH_XOR_2:
+ case BUILT_IN_ATOMIC_FETCH_XOR_4:
+ case BUILT_IN_ATOMIC_FETCH_XOR_8:
+ case BUILT_IN_ATOMIC_FETCH_XOR_16:
+
+ case BUILT_IN_ATOMIC_FETCH_OR_N:
+ case BUILT_IN_ATOMIC_FETCH_OR_1:
+ case BUILT_IN_ATOMIC_FETCH_OR_2:
+ case BUILT_IN_ATOMIC_FETCH_OR_4:
+ case BUILT_IN_ATOMIC_FETCH_OR_8:
+ case BUILT_IN_ATOMIC_FETCH_OR_16:
+ source0 = gimple_call_arg (call, 0);
+ break;
+
+ default:
+ /* The other builtins memory access are not instrumented in this
+ function because they either don't have any length parameter,
+ or their length parameter is just a limit. */
+ break;
+ }
+
+ if (len != NULL_TREE)
+ {
+ is_store = (dest != NULL_TREE);
+
+ if (source0 != NULL_TREE)
+ instrument_mem_region_access (source0, len, iter,
+ location, is_store);
+ if (source1 != NULL_TREE)
+ instrument_mem_region_access (source1, len, iter,
+ location, is_store);
+ else if (dest != NULL_TREE)
+ instrument_mem_region_access (dest, len, iter,
+ location, is_store);
+ return true;
+ }
+ else if (source0 != NULL)
+ instrument_derefs (iter, source0, location, is_store);
+ return false;
+}
+
+/* Instrument the assignment statement ITER if it is subject to
+ instrumentation. */
+
+static void
+instrument_assignment (gimple_stmt_iterator *iter)
+{
+ gimple s = gsi_stmt (*iter);
+
+ gcc_assert (gimple_assign_single_p (s));
+
+ instrument_derefs (iter, gimple_assign_lhs (s),
+ gimple_location (s), true);
+ instrument_derefs (iter, gimple_assign_rhs1 (s),
+ gimple_location (s), false);
+}
+
+/* Instrument the function call pointed to by the iterator ITER, if it
+ is subject to instrumentation. At the moment, the only function
+ calls that are instrumented are some built-in functions that access
+ memory. Look at maybe_instrument_builtin_call to learn more. */
+
+static void
+maybe_instrument_call (gimple_stmt_iterator *iter)
+{
+ maybe_instrument_builtin_call (iter);
+}
+
/* asan: this looks too complex. Can this be done simpler? */
/* Transform
1) Memory references.
@@ -686,13 +1191,12 @@ transform_statements (void)
if (bb->index >= saved_last_basic_block) continue;
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
- gimple s = gsi_stmt (i);
- if (!gimple_assign_single_p (s))
- continue;
- instrument_derefs (&i, gimple_assign_lhs (s),
- gimple_location (s), true);
- instrument_derefs (&i, gimple_assign_rhs1 (s),
- gimple_location (s), false);
+ gimple s = gsi_stmt (i);
+
+ if (gimple_assign_single_p (s))
+ instrument_assignment (&i);
+ else if (is_gimple_call (s))
+ maybe_instrument_call (&i);
}
}
}