@@ -22,50 +22,21 @@ (define_mode_iterator AMO [SI DI])
;;; Plain atomic modify operations.
-;; Non-fetching atomic add predates all other BPF atomic insns.
-;; Use xadd{w,dw} for compatibility with older GAS without support
-;; for v3 atomics. Newer GAS supports "aadd[32]" in line with the
-;; other atomic operations.
-(define_insn "atomic_add<AMO:mode>"
- [(set (match_operand:AMO 0 "memory_operand" "+m")
- (unspec_volatile:AMO
- [(plus:AMO (match_dup 0)
- (match_operand:AMO 1 "register_operand" "r"))
- (match_operand:SI 2 "const_int_operand")] ;; Memory model.
- UNSPEC_AADD))]
- ""
- "{xadd<mop>\t%0,%1|lock *(<smop> *)%w0 += %w1}"
- [(set_attr "type" "atomic")])
-
-(define_insn "atomic_and<AMO:mode>"
- [(set (match_operand:AMO 0 "memory_operand" "+m")
- (unspec_volatile:AMO
- [(and:AMO (match_dup 0)
- (match_operand:AMO 1 "register_operand" "r"))
- (match_operand:SI 2 "const_int_operand")] ;; Memory model.
- UNSPEC_AAND))]
- "bpf_has_v3_atomics"
- "{aand<msuffix>\t%0,%1|lock *(<smop> *)%w0 &= %w1}")
-
-(define_insn "atomic_or<AMO:mode>"
- [(set (match_operand:AMO 0 "memory_operand" "+m")
- (unspec_volatile:AMO
- [(ior:AMO (match_dup 0)
- (match_operand:AMO 1 "register_operand" "r"))
- (match_operand:SI 2 "const_int_operand")] ;; Memory model.
- UNSPEC_AOR))]
- "bpf_has_v3_atomics"
- "{aor<msuffix>\t%0,%1|lock *(<smop> *)%w0 %|= %w1}")
-
-(define_insn "atomic_xor<AMO:mode>"
- [(set (match_operand:AMO 0 "memory_operand" "+m")
- (unspec_volatile:AMO
- [(xor:AMO (match_dup 0)
- (match_operand:AMO 1 "register_operand" "r"))
- (match_operand:SI 2 "const_int_operand")] ;; Memory model.
- UNSPEC_AXOR))]
- "bpf_has_v3_atomics"
- "{axor<msuffix>\t%0,%1|lock *(<smop> *)%w0 ^= %w1}")
+;; The BPF instruction set provides non-fetching atomic instructions
+;; that could be used to implement the corresponding named insns:
+;;
+;; atomic_add -> aadd (aka xadd)
+;; atomic_and -> aand
+;; atomic_or -> aor
+;; atomic_xor -> axor
+;;
+;; However, we are not including insns for these here because the
+;; non-fetching BPF atomic instruction imply different memory ordering
+;; semantics than the fetching BPF atomic instruction used to
+;; implement the atomic_fetch_* insns below (afadd, afand, afor,
+;; afxor) and they cannot be used interchangeably, as it is expected
+;; by GCC when it uses a non-fetching variant as an optimization of a
+;; fetching operation where the returned value is not used.
;;; Feching (read-modify-store) versions of atomic operations.
@@ -1,7 +1,12 @@
-/* Test 64-bit non-fetch atomic operations. */
+/* Test 64-bit fetch and non-fetch atomic operations. */
/* { dg-do compile } */
/* { dg-options "-mv3-atomics -O2 -masm=normal" } */
+/* Note that GCC optimizes __atomic_add_fetch calls whose return value is not
+ used into non-fetching operations that, in BPF, generate fetching
+ instructions anyway. See note in gcc/config/bpf/atomic.md on this
+ regard. */
+
long val;
void
@@ -10,40 +15,76 @@ test_atomic_add (long x)
__atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_add (long x)
+{
+ return __atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_sub (long x)
{
__atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_sub (long x)
+{
+ return __atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_and (long x)
{
__atomic_and_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_and (long x)
+{
+ return __atomic_and_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_nand (long x)
{
__atomic_nand_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_nand (long x)
+{
+ return __atomic_nand_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_or (long x)
{
__atomic_or_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_or (long x)
+{
+ return __atomic_or_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_xor (long x)
{
__atomic_xor_fetch (&val, x, __ATOMIC_ACQUIRE);
}
-/* sub implemented in terms of add, and we output xadd to support older GAS. */
-/* { dg-final { scan-assembler-times "xadddw\t" 2 } } */
-/* { dg-final { scan-assembler-times "aand\t" 1 } } */
+long
+test_used_atomic_xor (long x)
+{
+ return __atomic_xor_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
+/* sub implemented in terms of add. */
+/* { dg-final { scan-assembler-times "afadd\t" 4 } } */
+/* { dg-final { scan-assembler-times "afand\t" 2 } } */
/* nand must use an exchange loop */
/* { dg-final { scan-assembler "acmp\t" } } */
-/* { dg-final { scan-assembler-times "aor\t" 1 } } */
-/* { dg-final { scan-assembler-times "axor\t" 1 } } */
+/* { dg-final { scan-assembler-times "afor\t" 2 } } */
+/* { dg-final { scan-assembler-times "afxor\t" 2 } } */
@@ -1,7 +1,12 @@
-/* Test 32-bit non-fetch atomic operations. */
+/* Test 32-bit fetch and non-fetch atomic operations. */
/* { dg-do compile } */
/* { dg-options "-mv3-atomics -O2 -masm=normal" } */
+/* Note that GCC optimizes __atomic_add_fetch calls whose return value is not
+ used into non-fetching operations that, in BPF, generate fetching
+ instructions anyway. See note in gcc/config/bpf/atomic.md on this
+ regard. */
+
int val;
void
@@ -10,40 +15,76 @@ test_atomic_add (int x)
__atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_add (int x)
+{
+ return __atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_sub (int x)
{
__atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_sub (int x)
+{
+ return __atomic_add_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_and (int x)
{
__atomic_and_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_and (int x)
+{
+ return __atomic_and_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_nand (int x)
{
__atomic_nand_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_nand (int x)
+{
+ return __atomic_nand_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_or (int x)
{
__atomic_or_fetch (&val, x, __ATOMIC_ACQUIRE);
}
+long
+test_used_atomic_or (int x)
+{
+ return __atomic_or_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
void
test_atomic_xor (int x)
{
__atomic_xor_fetch (&val, x, __ATOMIC_ACQUIRE);
}
-/* sub implemented in terms of add, and we output xadd to support older GAS. */
-/* { dg-final { scan-assembler-times "xaddw\t" 2 } } */
-/* { dg-final { scan-assembler-times "aand32\t" 1 } } */
+long
+test_used_atomic_xor (int x)
+{
+ return __atomic_xor_fetch (&val, x, __ATOMIC_ACQUIRE);
+}
+
+/* sub implemented in terms of add. */
+/* { dg-final { scan-assembler-times "afadd32" 4 } } */
+/* { dg-final { scan-assembler-times "afand32\t" 2 } } */
/* nand must use an exchange loop */
/* { dg-final { scan-assembler "acmp32\t" } } */
-/* { dg-final { scan-assembler-times "aor32\t" 1 } } */
-/* { dg-final { scan-assembler-times "axor32\t" 1 } } */
+/* { dg-final { scan-assembler-times "afor32\t" 2 } } */
+/* { dg-final { scan-assembler-times "afxor32\t" 2 } } */
@@ -11,5 +11,5 @@ foo ()
__sync_fetch_and_add((int *)val, (int)delta);
}
-/* { dg-final { scan-assembler "xadddw\t.*" } } */
-/* { dg-final { scan-assembler "xaddw\t.*" } } */
+/* { dg-final { scan-assembler "afadd\t.*" } } */
+/* { dg-final { scan-assembler "afadd32\t.*" } } */