From c75e170548dd40c58940cb18d05f413c76d21d34 Mon Sep 17 00:00:00 2001
From: Giuseppe D'Angelo <giuseppe.dangelo@kdab.com>
Date: Mon, 9 Dec 2024 01:32:27 +0100
Subject: [PATCH] libstdc++: add support for cv-qualified types in atomic_ref
(P3323R1)
P3233R1 (DR for C++20/C++11, fixes LWG 4069 and 3508) clarifies that
std::atomic_ref<cv T> is meant to be supported.
This commit implements it by splitting the __atomic_ref class (that
atomic_ref inherits from) into a further base class (__atomic_ref_base):
* __atomic_ref_base<T> implements the atomic API for const and non-const
Ts (with specializations for integrals, floating points, pointers);
* __atomic_ref<T> inherits from __atomic_ref_base<T>; if T is
non-const adds on top the "mutating" atomic APIs like store(),
exchange(), and so on; same discussion w.r.t. the specializations.
The primary atomic_ref is now meant to be used for cv-bool, not just
bool, amend the detection accordingly.
At the same time, disable support for cv-qualified types in std::atomic
(for instance, std::atomic<volatile T> isn't meaningful; one should use
volatile std::atomic<T>), again as per the paper.
libstdc++-v3/ChangeLog:
* include/bits/atomic_base.h: Add support for atomic_ref<cv T>:
refactor __atomic_ref into a further subclass in order to
implement the constraints on atomic_ref mutating APIs; change
_Tp in various function signatures to be value_type instead.
* include/std/atomic: Add a static_assert to std::atomic, as per
P3233R1, complementing the existing ones.
* testsuite/29_atomics/atomic_ref/bool.cc: Add tests for
cv types in atomic_ref.
* testsuite/29_atomics/atomic_ref/deduction.cc: Likewise.
* testsuite/29_atomics/atomic_ref/float.cc: Likewise.
* testsuite/29_atomics/atomic_ref/generic.cc: Likewise.
* testsuite/29_atomics/atomic_ref/integral.cc: Likewise.
* testsuite/29_atomics/atomic_ref/pointer.cc: Likewise.
* testsuite/29_atomics/atomic_ref/requirements.cc: Likewise.
* testsuite/29_atomics/atomic_ref/wait_notify.cc: Likewise.
Signed-off-by: Giuseppe D'Angelo <giuseppe.dangelo@kdab.com>
---
libstdc++-v3/include/bits/atomic_base.h | 507 +++++++++++-------
libstdc++-v3/include/std/atomic | 1 +
.../testsuite/29_atomics/atomic_ref/bool.cc | 18 +
.../29_atomics/atomic_ref/deduction.cc | 33 +-
.../testsuite/29_atomics/atomic_ref/float.cc | 21 +-
.../29_atomics/atomic_ref/generic.cc | 6 +
.../29_atomics/atomic_ref/integral.cc | 6 +
.../29_atomics/atomic_ref/pointer.cc | 6 +
.../29_atomics/atomic_ref/requirements.cc | 70 ++-
.../29_atomics/atomic_ref/wait_notify.cc | 10 +
10 files changed, 440 insertions(+), 238 deletions(-)
@@ -1473,14 +1473,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
};
#undef _GLIBCXX20_INIT
+ // atomic_ref inherits from __atomic_ref;
+ // __atomic_ref inherits from __atomic_ref_base.
+ //
+ // __atomic_ref_base provides the common APIs for const and non-const types;
+ // __atomic ref adds on top the APIs for non-const types, thus implementing
+ // the various constraints in [atomic.ref].
+
template<typename _Tp,
- bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
- bool = is_floating_point_v<_Tp>>
+ bool = is_const_v<_Tp>,
+ bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,
+ bool = is_floating_point_v<_Tp>,
+ bool = is_pointer_v<_Tp>>
struct __atomic_ref;
- // base class for non-integral, non-floating-point, non-pointer types
+ template<typename _Tp,
+ bool _IsIntegral,
+ bool _IsFloatingPoint,
+ bool _IsPointer>
+ struct __atomic_ref_base;
+
+ // Const types
+ template<typename _Tp, bool _IsIntegral, bool _IsFloatingPoint, bool _IsPointer>
+ struct __atomic_ref<_Tp, true, _IsIntegral, _IsFloatingPoint, _IsPointer>
+ : __atomic_ref_base<_Tp, _IsIntegral, _IsFloatingPoint, _IsPointer>
+ {
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, _IsIntegral, _IsFloatingPoint, _IsPointer>(__t)
+ { }
+ };
+
+ // Non-integral, non-floating-point, non-pointer types
template<typename _Tp>
- struct __atomic_ref<_Tp, false, false>
+ struct __atomic_ref_base<_Tp, false, false, false>
{
static_assert(is_trivially_copyable_v<_Tp>);
@@ -1490,70 +1518,97 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
? 0 : sizeof(_Tp);
public:
- using value_type = _Tp;
+ using value_type = remove_cv_t<_Tp>;
static constexpr bool is_always_lock_free
= __atomic_always_lock_free(sizeof(_Tp), 0);
+ static_assert(is_always_lock_free || !is_volatile_v<_Tp>);
+
static constexpr size_t required_alignment
= _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
- __atomic_ref& operator=(const __atomic_ref&) = delete;
+ __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
explicit
- __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
+ __atomic_ref_base(_Tp& __t) : _M_ptr(std::__addressof(__t))
{
__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
}
- __atomic_ref(const __atomic_ref&) noexcept = default;
+ __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
- _Tp
- operator=(_Tp __t) const noexcept
- {
- this->store(__t);
- return __t;
- }
-
- operator _Tp() const noexcept { return this->load(); }
+ operator value_type() const noexcept { return this->load(); }
bool
is_lock_free() const noexcept
{ return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
- void
- store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::store(_M_ptr, __t, __m); }
-
- _Tp
+ value_type
load(memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::load(_M_ptr, __m); }
- _Tp
- exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
+#if __glibcxx_atomic_wait
+ _GLIBCXX_ALWAYS_INLINE void
+ wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::wait(_M_ptr, __old, __m); }
+
+ // TODO add const volatile overload
+#endif // __glibcxx_atomic_wait
+
+ protected:
+ _Tp* _M_ptr;
+ };
+
+ template<typename _Tp>
+ struct __atomic_ref<_Tp, false, false, false, false>
+ : __atomic_ref_base<_Tp, false, false, false>
+ {
+ using value_type = typename __atomic_ref_base<_Tp, false, false, false>::value_type;
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, false, false, false>(__t)
+ { }
+
+ void
+ store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(this->_M_ptr, __t, __m); }
+
+ value_type
+ operator=(value_type __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ value_type
+ exchange(value_type __desired, memory_order __m = memory_order_seq_cst)
const noexcept
- { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+ { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
bool
- compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_weak<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_strong<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1562,7 +1617,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
bool
- compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1571,64 +1626,51 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
#if __glibcxx_atomic_wait
- _GLIBCXX_ALWAYS_INLINE void
- wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::wait(_M_ptr, __old, __m); }
-
- // TODO add const volatile overload
-
_GLIBCXX_ALWAYS_INLINE void
notify_one() const noexcept
- { __atomic_impl::notify_one(_M_ptr); }
+ { __atomic_impl::notify_one(this->_M_ptr); }
// TODO add const volatile overload
_GLIBCXX_ALWAYS_INLINE void
notify_all() const noexcept
- { __atomic_impl::notify_all(_M_ptr); }
+ { __atomic_impl::notify_all(this->_M_ptr); }
// TODO add const volatile overload
#endif // __glibcxx_atomic_wait
-
- private:
- _Tp* _M_ptr;
};
- // base class for atomic_ref<integral-type>
+
+ // Integral types (except cv-bool)
template<typename _Tp>
- struct __atomic_ref<_Tp, true, false>
+ struct __atomic_ref_base<_Tp, true, false, false>
{
static_assert(is_integral_v<_Tp>);
public:
- using value_type = _Tp;
+ using value_type = remove_cv_t<_Tp>;
using difference_type = value_type;
static constexpr bool is_always_lock_free
= __atomic_always_lock_free(sizeof(_Tp), 0);
+ static_assert(is_always_lock_free || !is_volatile_v<_Tp>);
+
static constexpr size_t required_alignment
= sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
- __atomic_ref() = delete;
- __atomic_ref& operator=(const __atomic_ref&) = delete;
+ __atomic_ref_base() = delete;
+ __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
explicit
- __atomic_ref(_Tp& __t) : _M_ptr(&__t)
+ __atomic_ref_base(_Tp& __t) : _M_ptr(&__t)
{
__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
}
- __atomic_ref(const __atomic_ref&) noexcept = default;
-
- _Tp
- operator=(_Tp __t) const noexcept
- {
- this->store(__t);
- return __t;
- }
+ __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
- operator _Tp() const noexcept { return this->load(); }
+ operator value_type() const noexcept { return this->load(); }
bool
is_lock_free() const noexcept
@@ -1636,39 +1678,71 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
}
- void
- store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::store(_M_ptr, __t, __m); }
-
- _Tp
+ value_type
load(memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::load(_M_ptr, __m); }
- _Tp
- exchange(_Tp __desired,
+#if __glibcxx_atomic_wait
+ _GLIBCXX_ALWAYS_INLINE void
+ wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::wait(_M_ptr, __old, __m); }
+
+ // TODO add const volatile overload
+#endif // __glibcxx_atomic_wait
+
+ protected:
+ _Tp* _M_ptr;
+ };
+
+ template<typename _Tp>
+ struct __atomic_ref<_Tp, false, true, false, false>
+ : __atomic_ref_base<_Tp, true, false, false>
+ {
+ using value_type = typename __atomic_ref_base<_Tp, true, false, false>::value_type;
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, true, false, false>(__t)
+ { }
+
+ value_type
+ operator=(value_type __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ void
+ store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(this->_M_ptr, __t, __m); }
+
+ value_type
+ exchange(value_type __desired,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+ { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
bool
- compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_weak<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_strong<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1677,7 +1751,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
bool
- compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1686,21 +1760,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
#if __glibcxx_atomic_wait
- _GLIBCXX_ALWAYS_INLINE void
- wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::wait(_M_ptr, __old, __m); }
-
- // TODO add const volatile overload
-
_GLIBCXX_ALWAYS_INLINE void
notify_one() const noexcept
- { __atomic_impl::notify_one(_M_ptr); }
+ { __atomic_impl::notify_one(this->_M_ptr); }
// TODO add const volatile overload
_GLIBCXX_ALWAYS_INLINE void
notify_all() const noexcept
- { __atomic_impl::notify_all(_M_ptr); }
+ { __atomic_impl::notify_all(this->_M_ptr); }
// TODO add const volatile overload
#endif // __glibcxx_atomic_wait
@@ -1708,27 +1776,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
value_type
fetch_add(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
+ { return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }
value_type
fetch_sub(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
+ { return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }
value_type
fetch_and(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
+ { return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }
value_type
fetch_or(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
+ { return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }
value_type
fetch_xor(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
+ { return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
_GLIBCXX_ALWAYS_INLINE value_type
operator++(int) const noexcept
@@ -1740,70 +1808,62 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
value_type
operator++() const noexcept
- { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
+ { return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }
value_type
operator--() const noexcept
- { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
+ { return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }
value_type
operator+=(value_type __i) const noexcept
- { return __atomic_impl::__add_fetch(_M_ptr, __i); }
+ { return __atomic_impl::__add_fetch(this->_M_ptr, __i); }
value_type
operator-=(value_type __i) const noexcept
- { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
+ { return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }
value_type
operator&=(value_type __i) const noexcept
- { return __atomic_impl::__and_fetch(_M_ptr, __i); }
+ { return __atomic_impl::__and_fetch(this->_M_ptr, __i); }
value_type
operator|=(value_type __i) const noexcept
- { return __atomic_impl::__or_fetch(_M_ptr, __i); }
+ { return __atomic_impl::__or_fetch(this->_M_ptr, __i); }
value_type
operator^=(value_type __i) const noexcept
- { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
-
- private:
- _Tp* _M_ptr;
+ { return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }
};
- // base class for atomic_ref<floating-point-type>
+ // Floating-point types
template<typename _Fp>
- struct __atomic_ref<_Fp, false, true>
+ struct __atomic_ref_base<_Fp, false, true, false>
{
static_assert(is_floating_point_v<_Fp>);
public:
- using value_type = _Fp;
+ using value_type = remove_cv_t<_Fp>;
using difference_type = value_type;
static constexpr bool is_always_lock_free
= __atomic_always_lock_free(sizeof(_Fp), 0);
+ static_assert(is_always_lock_free || !is_volatile_v<_Fp>);
+
static constexpr size_t required_alignment = __alignof__(_Fp);
- __atomic_ref() = delete;
- __atomic_ref& operator=(const __atomic_ref&) = delete;
+ __atomic_ref_base() = delete;
+ __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
explicit
- __atomic_ref(_Fp& __t) : _M_ptr(&__t)
+ __atomic_ref_base(_Fp& __t) : _M_ptr(&__t)
{
__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
}
- __atomic_ref(const __atomic_ref&) noexcept = default;
+ __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
- _Fp
- operator=(_Fp __t) const noexcept
- {
- this->store(__t);
- return __t;
- }
-
- operator _Fp() const noexcept { return this->load(); }
+ operator value_type() const noexcept { return this->load(); }
bool
is_lock_free() const noexcept
@@ -1811,39 +1871,71 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
}
- void
- store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::store(_M_ptr, __t, __m); }
-
_Fp
load(memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::load(_M_ptr, __m); }
+#if __glibcxx_atomic_wait
+ _GLIBCXX_ALWAYS_INLINE void
+ wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::wait(_M_ptr, __old, __m); }
+
+ // TODO add const volatile overload
+#endif // __glibcxx_atomic_wait
+
+ protected:
+ _Fp* _M_ptr;
+ };
+
+ template<typename _Fp>
+ struct __atomic_ref<_Fp, false, false, true, false>
+ : __atomic_ref_base<_Fp, false, true, false>
+ {
+ using value_type = typename __atomic_ref_base<_Fp, false, true, false>::value_type;
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Fp& __t) : __atomic_ref_base<_Fp, false, true, false>(__t)
+ { }
+
+ value_type
+ operator=(value_type __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ void
+ store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(this->_M_ptr, __t, __m); }
+
_Fp
- exchange(_Fp __desired,
+ exchange(value_type __desired,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+ { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
bool
- compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_weak<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_strong<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1852,7 +1944,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
bool
- compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1861,21 +1953,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
#if __glibcxx_atomic_wait
- _GLIBCXX_ALWAYS_INLINE void
- wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::wait(_M_ptr, __old, __m); }
-
- // TODO add const volatile overload
-
_GLIBCXX_ALWAYS_INLINE void
notify_one() const noexcept
- { __atomic_impl::notify_one(_M_ptr); }
+ { __atomic_impl::notify_one(this->_M_ptr); }
// TODO add const volatile overload
_GLIBCXX_ALWAYS_INLINE void
notify_all() const noexcept
- { __atomic_impl::notify_all(_M_ptr); }
+ { __atomic_impl::notify_all(this->_M_ptr); }
// TODO add const volatile overload
#endif // __glibcxx_atomic_wait
@@ -1883,56 +1969,50 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
value_type
fetch_add(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
+ { return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }
value_type
fetch_sub(value_type __i,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
+ { return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
value_type
operator+=(value_type __i) const noexcept
- { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
+ { return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
value_type
operator-=(value_type __i) const noexcept
- { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
-
- private:
- _Fp* _M_ptr;
+ { return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }
};
- // base class for atomic_ref<pointer-type>
+ // Pointer types
template<typename _Tp>
- struct __atomic_ref<_Tp*, false, false>
+ struct __atomic_ref_base<_Tp, false, false, true>
{
+ static_assert(is_pointer_v<_Tp>);
+
public:
- using value_type = _Tp*;
+ using value_type = remove_cv_t<_Tp>;
using difference_type = ptrdiff_t;
static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
- static constexpr size_t required_alignment = __alignof__(_Tp*);
+ static_assert(is_always_lock_free || !is_volatile_v<_Tp>);
- __atomic_ref() = delete;
- __atomic_ref& operator=(const __atomic_ref&) = delete;
+ static constexpr size_t required_alignment = __alignof__(_Tp);
+
+ __atomic_ref_base() = delete;
+ __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
explicit
- __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
+ __atomic_ref_base(_Tp& __t) : _M_ptr(std::__addressof(__t))
{
__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
}
- __atomic_ref(const __atomic_ref&) noexcept = default;
-
- _Tp*
- operator=(_Tp* __t) const noexcept
- {
- this->store(__t);
- return __t;
- }
+ __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
- operator _Tp*() const noexcept { return this->load(); }
+ operator value_type() const noexcept { return this->load(); }
bool
is_lock_free() const noexcept
@@ -1940,39 +2020,94 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
}
- void
- store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::store(_M_ptr, __t, __m); }
-
- _Tp*
+ value_type
load(memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::load(_M_ptr, __m); }
- _Tp*
- exchange(_Tp* __desired,
+#if __glibcxx_atomic_wait
+ _GLIBCXX_ALWAYS_INLINE void
+ wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::wait(_M_ptr, __old, __m); }
+
+ // TODO add const volatile overload
+#endif // __glibcxx_atomic_wait
+
+ protected:
+ static constexpr ptrdiff_t
+ _S_type_size(ptrdiff_t __d) noexcept
+ {
+ using _PointedType = remove_pointer_t<_Tp>;
+ static_assert(is_object_v<_PointedType>);
+ return __d * sizeof(_PointedType);
+ }
+
+ _Tp* _M_ptr;
+ };
+
+ template<typename _Tp>
+ struct __atomic_ref<_Tp, false, false, false, true>
+ : __atomic_ref_base<_Tp, false, false, true>
+ {
+ using value_type = typename __atomic_ref_base<_Tp, false, false, true>::value_type;
+ using difference_type = typename __atomic_ref_base<_Tp, false, false, true>::difference_type;
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, false, false, true>(__t)
+ { }
+
+#if __glibcxx_atomic_wait
+ _GLIBCXX_ALWAYS_INLINE void
+ notify_one() const noexcept
+ { __atomic_impl::notify_one(this->_M_ptr); }
+
+ // TODO add const volatile overload
+
+ _GLIBCXX_ALWAYS_INLINE void
+ notify_all() const noexcept
+ { __atomic_impl::notify_all(this->_M_ptr); }
+
+ // TODO add const volatile overload
+#endif // __glibcxx_atomic_wait
+
+ value_type
+ operator=(value_type __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ void
+ store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(this->_M_ptr, __t, __m); }
+
+ value_type
+ exchange(value_type __desired,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+ { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
bool
- compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_weak<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __success,
memory_order __failure) const noexcept
{
return __atomic_impl::compare_exchange_strong<true>(
- _M_ptr, __expected, __desired, __success, __failure);
+ this->_M_ptr, __expected, __desired, __success, __failure);
}
bool
- compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+ compare_exchange_weak(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1981,7 +2116,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
bool
- compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+ compare_exchange_strong(value_type& __expected, value_type __desired,
memory_order __order = memory_order_seq_cst)
const noexcept
{
@@ -1989,35 +2124,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__cmpexch_failure_order(__order));
}
-#if __glibcxx_atomic_wait
- _GLIBCXX_ALWAYS_INLINE void
- wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
- { __atomic_impl::wait(_M_ptr, __old, __m); }
-
- // TODO add const volatile overload
-
- _GLIBCXX_ALWAYS_INLINE void
- notify_one() const noexcept
- { __atomic_impl::notify_one(_M_ptr); }
-
- // TODO add const volatile overload
-
- _GLIBCXX_ALWAYS_INLINE void
- notify_all() const noexcept
- { __atomic_impl::notify_all(_M_ptr); }
-
- // TODO add const volatile overload
-#endif // __glibcxx_atomic_wait
-
_GLIBCXX_ALWAYS_INLINE value_type
fetch_add(difference_type __d,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
+ { return __atomic_impl::fetch_add(this->_M_ptr, this->_S_type_size(__d), __m); }
_GLIBCXX_ALWAYS_INLINE value_type
fetch_sub(difference_type __d,
memory_order __m = memory_order_seq_cst) const noexcept
- { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
+ { return __atomic_impl::fetch_sub(this->_M_ptr, this->_S_type_size(__d), __m); }
value_type
operator++(int) const noexcept
@@ -2030,36 +2145,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
value_type
operator++() const noexcept
{
- return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
+ return __atomic_impl::__add_fetch(this->_M_ptr, this->_S_type_size(1));
}
value_type
operator--() const noexcept
{
- return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
+ return __atomic_impl::__sub_fetch(this->_M_ptr, this->_S_type_size(1));
}
value_type
operator+=(difference_type __d) const noexcept
{
- return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
+ return __atomic_impl::__add_fetch(this->_M_ptr, this->_S_type_size(__d));
}
value_type
operator-=(difference_type __d) const noexcept
{
- return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
+ return __atomic_impl::__sub_fetch(this->_M_ptr, this->_S_type_size(__d));
}
-
- private:
- static constexpr ptrdiff_t
- _S_type_size(ptrdiff_t __d) noexcept
- {
- static_assert(is_object_v<_Tp>);
- return __d * sizeof(_Tp);
- }
-
- _Tp** _M_ptr;
};
#endif // C++2a
@@ -222,6 +222,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_assert(is_move_constructible_v<_Tp>);
static_assert(is_copy_assignable_v<_Tp>);
static_assert(is_move_assignable_v<_Tp>);
+ static_assert(is_same_v<_Tp, remove_cv_t<_Tp>>);
#endif
public:
@@ -13,3 +13,21 @@ static_assert( not has_or<std::atomic_ref<bool>> );
static_assert( not has_xor<std::atomic_ref<bool>> );
static_assert( not has_fetch_add<std::atomic_ref<bool>> );
static_assert( not has_fetch_sub<std::atomic_ref<bool>> );
+
+static_assert( not has_and<std::atomic_ref<const bool>> );
+static_assert( not has_or<std::atomic_ref<const bool>> );
+static_assert( not has_xor<std::atomic_ref<const bool>> );
+static_assert( not has_fetch_add<std::atomic_ref<const bool>> );
+static_assert( not has_fetch_sub<std::atomic_ref<const bool>> );
+
+static_assert( not has_and<std::atomic_ref<volatile bool>> );
+static_assert( not has_or<std::atomic_ref<volatile bool>> );
+static_assert( not has_xor<std::atomic_ref<volatile bool>> );
+static_assert( not has_fetch_add<std::atomic_ref<volatile bool>> );
+static_assert( not has_fetch_sub<std::atomic_ref<volatile bool>> );
+
+static_assert( not has_and<std::atomic_ref<const volatile bool>> );
+static_assert( not has_or<std::atomic_ref<const volatile bool>> );
+static_assert( not has_xor<std::atomic_ref<const volatile bool>> );
+static_assert( not has_fetch_add<std::atomic_ref<const volatile bool>> );
+static_assert( not has_fetch_sub<std::atomic_ref<const volatile bool>> );
@@ -19,22 +19,29 @@
#include <atomic>
+template <typename T>
void
-test01()
+test_impl(T v)
{
- int i = 0;
- std::atomic_ref a0(i);
- static_assert(std::is_same_v<decltype(a0), std::atomic_ref<int>>);
-
- float f = 1.0f;
- std::atomic_ref a1(f);
- static_assert(std::is_same_v<decltype(a1), std::atomic_ref<float>>);
+ std::atomic_ref a(v);
+ static_assert(std::is_same_v<decltype(a), std::atomic_ref<T>>);
+}
- int* p = &i;
- std::atomic_ref a2(p);
- static_assert(std::is_same_v<decltype(a2), std::atomic_ref<int*>>);
+template <typename T>
+void
+test(T v)
+{
+ test_impl<T>(v);
+ test_impl<const T>(v);
+ test_impl<volatile T>(v);
+ test_impl<const volatile T>(v);
+}
+int main()
+{
+ test<int>(0);
+ test<float>(1.0f);
+ test<int*>(nullptr);
struct X { } x;
- std::atomic_ref a3(x);
- static_assert(std::is_same_v<decltype(a3), std::atomic_ref<X>>);
+ test<X>(x);
}
@@ -299,14 +299,19 @@ test04()
{
if constexpr (std::atomic_ref<float>::is_always_lock_free)
{
- float i = 0;
- float* ptr = 0;
- std::atomic_ref<float*> a0(ptr);
- std::atomic_ref<float*> a1(ptr);
- std::atomic_ref<float*> a2(a0);
- a0 = &i;
- VERIFY( a1 == &i );
- VERIFY( a2 == &i );
+ float i = 0.0f;
+ std::atomic_ref<float> a0(i);
+ std::atomic_ref<float> a1(i);
+ std::atomic_ref<const float> a1c(i);
+ std::atomic_ref<volatile float> a1v(i);
+ std::atomic_ref<const volatile float> a1cv(i);
+ std::atomic_ref<float> a2(a0);
+ a0 = 1.0f;
+ VERIFY( a1 == 1.0f );
+ VERIFY( a1c == 1.0f );
+ VERIFY( a1v == 1.0f );
+ VERIFY( a1cv == 1.0f );
+ VERIFY( a2 == 1.0f );
}
}
@@ -108,9 +108,15 @@ test02()
X i;
std::atomic_ref<X> a0(i);
std::atomic_ref<X> a1(i);
+ std::atomic_ref<const X> a1c(i);
+ std::atomic_ref<volatile X> a1v(i);
+ std::atomic_ref<const volatile X> a1cv(i);
std::atomic_ref<X> a2(a0);
a0 = 42;
VERIFY( a1.load() == 42 );
+ VERIFY( a1c.load() == 42 );
+ VERIFY( a1v.load() == 42 );
+ VERIFY( a1cv.load() == 42 );
VERIFY( a2.load() == 42 );
}
@@ -302,9 +302,15 @@ test03()
int i = 0;
std::atomic_ref<int> a0(i);
std::atomic_ref<int> a1(i);
+ std::atomic_ref<const int> a1c(i);
+ std::atomic_ref<volatile int> a1v(i);
+ std::atomic_ref<const volatile int> a1cv(i);
std::atomic_ref<int> a2(a0);
a0 = 42;
VERIFY( a1 == 42 );
+ VERIFY( a1c == 42 );
+ VERIFY( a1v == 42 );
+ VERIFY( a1cv == 42 );
VERIFY( a2 == 42 );
}
@@ -210,9 +210,15 @@ test03()
int* ptr = 0;
std::atomic_ref<int*> a0(ptr);
std::atomic_ref<int*> a1(ptr);
+ std::atomic_ref<int* const> a1c(ptr);
+ std::atomic_ref<int* volatile> a1v(ptr);
+ std::atomic_ref<int* const volatile> a1cv(ptr);
std::atomic_ref<int*> a2(a0);
a0 = &i;
VERIFY( a1 == &i );
+ VERIFY( a1c == &i );
+ VERIFY( a1v == &i );
+ VERIFY( a1cv == &i );
VERIFY( a2 == &i );
}
@@ -18,56 +18,94 @@
// { dg-do compile { target c++20 } }
#include <atomic>
+#include <type_traits>
+template <class T>
void
-test01()
+test_generic()
{
- struct X { int c; };
- using A = std::atomic_ref<X>;
+ using A = std::atomic_ref<T>;
static_assert( std::is_standard_layout_v<A> );
static_assert( std::is_nothrow_copy_constructible_v<A> );
static_assert( std::is_trivially_destructible_v<A> );
- static_assert( std::is_same_v<A::value_type, X> );
+ static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );
static_assert( !std::is_copy_assignable_v<A> );
static_assert( !std::is_move_assignable_v<A> );
}
+template <class T>
void
-test02()
+test_integral()
{
- using A = std::atomic_ref<int>;
+ static_assert( std::is_integral_v<T> );
+ using A = std::atomic_ref<T>;
static_assert( std::is_standard_layout_v<A> );
static_assert( std::is_nothrow_copy_constructible_v<A> );
static_assert( std::is_trivially_destructible_v<A> );
- static_assert( std::is_same_v<A::value_type, int> );
- static_assert( std::is_same_v<A::difference_type, A::value_type> );
+ static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );
+ static_assert( std::is_same_v<typename A::difference_type, typename A::value_type> );
static_assert( !std::is_copy_assignable_v<A> );
static_assert( !std::is_move_assignable_v<A> );
}
+template <class T>
void
-test03()
+test_floating_point()
{
- using A = std::atomic_ref<double>;
+ static_assert( std::is_floating_point_v<T> );
+ using A = std::atomic_ref<T>;
static_assert( std::is_standard_layout_v<A> );
static_assert( std::is_nothrow_copy_constructible_v<A> );
static_assert( std::is_trivially_destructible_v<A> );
- static_assert( std::is_same_v<A::value_type, double> );
- static_assert( std::is_same_v<A::difference_type, A::value_type> );
+ static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );
+ static_assert( std::is_same_v<typename A::difference_type, typename A::value_type> );
static_assert( !std::is_copy_assignable_v<A> );
static_assert( !std::is_move_assignable_v<A> );
}
+template <class T>
void
-test04()
+test_pointer()
{
- using A = std::atomic_ref<int*>;
+ static_assert( std::is_pointer_v<T> );
+ using A = std::atomic_ref<T>;
static_assert( std::is_standard_layout_v<A> );
static_assert( std::is_nothrow_copy_constructible_v<A> );
static_assert( std::is_trivially_destructible_v<A> );
- static_assert( std::is_same_v<A::value_type, int*> );
- static_assert( std::is_same_v<A::difference_type, std::ptrdiff_t> );
+ static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );
+ static_assert( std::is_same_v<typename A::difference_type, std::ptrdiff_t> );
static_assert( std::is_nothrow_copy_constructible_v<A> );
static_assert( !std::is_copy_assignable_v<A> );
static_assert( !std::is_move_assignable_v<A> );
}
+
+int
+main()
+{
+ struct X { int c; };
+ test_generic<X>();
+ test_generic<const X>();
+ test_generic<volatile X>();
+ test_generic<const volatile X>();
+
+ // atomic_ref excludes (cv) `bool` from the set of integral types
+ test_generic<bool>();
+ test_generic<const bool>();
+ test_generic<volatile bool>();
+ test_generic<const volatile bool>();
+
+ test_integral<int>();
+ test_integral<const int>();
+ test_integral<volatile int>();
+ test_integral<const volatile int>();
+
+ test_floating_point<double>();
+ test_floating_point<const double>();
+ test_floating_point<volatile double>();
+ test_floating_point<const volatile double>();
+
+ test_pointer<int*>();
+ test_pointer<int* const>();
+ test_pointer<int* volatile>();
+ test_pointer<int* const volatile>();
+}
\ No newline at end of file
@@ -41,6 +41,16 @@ template<typename S>
});
a.wait(va);
t.join();
+
+ std::atomic_ref<const S> b{ aa };
+ b.wait(va);
+ std::thread t2([&]
+ {
+ a.store(va);
+ a.notify_one();
+ });
+ b.wait(vb);
+ t2.join();
}
}
--
2.34.1