30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
33 #pragma GCC system_header
40 #ifndef _GLIBCXX_ALWAYS_INLINE
41 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
44 namespace std _GLIBCXX_VISIBILITY(default)
46 _GLIBCXX_BEGIN_NAMESPACE_VERSION
56 #if __cplusplus > 201703L
67 inline constexpr
memory_order memory_order_relaxed = memory_order::relaxed;
68 inline constexpr
memory_order memory_order_consume = memory_order::consume;
69 inline constexpr
memory_order memory_order_acquire = memory_order::acquire;
70 inline constexpr
memory_order memory_order_release = memory_order::release;
71 inline constexpr
memory_order memory_order_acq_rel = memory_order::acq_rel;
72 inline constexpr
memory_order memory_order_seq_cst = memory_order::seq_cst;
85 enum __memory_order_modifier
87 __memory_order_mask = 0x0ffff,
88 __memory_order_modifier_mask = 0xffff0000,
89 __memory_order_hle_acquire = 0x10000,
90 __memory_order_hle_release = 0x20000
94 operator|(
memory_order __m, __memory_order_modifier __mod)
109 return __m == memory_order_acq_rel ? memory_order_acquire
110 : __m == memory_order_release ? memory_order_relaxed : __m;
116 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
117 | __memory_order_modifier(__m & __memory_order_modifier_mask));
120 _GLIBCXX_ALWAYS_INLINE
void
122 { __atomic_thread_fence(
int(__m)); }
124 _GLIBCXX_ALWAYS_INLINE
void
126 { __atomic_signal_fence(
int(__m)); }
129 template<
typename _Tp>
139 template<
typename _IntTp>
143 #define ATOMIC_VAR_INIT(_VI) { _VI }
145 template<
typename _Tp>
148 template<
typename _Tp>
152 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
153 typedef bool __atomic_flag_data_type;
155 typedef unsigned char __atomic_flag_data_type;
168 _GLIBCXX_BEGIN_EXTERN_C
172 __atomic_flag_data_type _M_i;
175 _GLIBCXX_END_EXTERN_C
177 #define ATOMIC_FLAG_INIT { 0 }
193 _GLIBCXX_ALWAYS_INLINE
bool
194 test_and_set(
memory_order __m = memory_order_seq_cst) noexcept
196 return __atomic_test_and_set (&_M_i,
int(__m));
199 _GLIBCXX_ALWAYS_INLINE
bool
200 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
202 return __atomic_test_and_set (&_M_i,
int(__m));
205 _GLIBCXX_ALWAYS_INLINE
void
209 __glibcxx_assert(__b != memory_order_consume);
210 __glibcxx_assert(__b != memory_order_acquire);
211 __glibcxx_assert(__b != memory_order_acq_rel);
213 __atomic_clear (&_M_i,
int(__m));
216 _GLIBCXX_ALWAYS_INLINE
void
217 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
220 __glibcxx_assert(__b != memory_order_consume);
221 __glibcxx_assert(__b != memory_order_acquire);
222 __glibcxx_assert(__b != memory_order_acq_rel);
224 __atomic_clear (&_M_i,
int(__m));
228 static constexpr __atomic_flag_data_type
230 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
258 template<
typename _ITp>
261 using value_type = _ITp;
262 using difference_type = value_type;
265 typedef _ITp __int_type;
267 static constexpr
int _S_alignment =
268 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) :
alignof(_ITp);
270 alignas(_S_alignment) __int_type _M_i;
280 constexpr
__atomic_base(__int_type __i) noexcept : _M_i (__i) { }
282 operator __int_type() const noexcept
285 operator __int_type() const volatile noexcept
289 operator=(__int_type __i) noexcept
296 operator=(__int_type __i)
volatile noexcept
303 operator++(
int) noexcept
304 {
return fetch_add(1); }
307 operator++(
int)
volatile noexcept
308 {
return fetch_add(1); }
311 operator--(
int) noexcept
312 {
return fetch_sub(1); }
315 operator--(
int)
volatile noexcept
316 {
return fetch_sub(1); }
319 operator++() noexcept
320 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
323 operator++() volatile noexcept
324 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
327 operator--() noexcept
328 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
331 operator--() volatile noexcept
332 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
335 operator+=(__int_type __i) noexcept
336 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
339 operator+=(__int_type __i)
volatile noexcept
340 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
343 operator-=(__int_type __i) noexcept
344 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
347 operator-=(__int_type __i)
volatile noexcept
348 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
351 operator&=(__int_type __i) noexcept
352 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
355 operator&=(__int_type __i)
volatile noexcept
356 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
359 operator|=(__int_type __i) noexcept
360 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
363 operator|=(__int_type __i)
volatile noexcept
364 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
367 operator^=(__int_type __i) noexcept
368 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
371 operator^=(__int_type __i)
volatile noexcept
372 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
375 is_lock_free() const noexcept
378 return __atomic_is_lock_free(
sizeof(_M_i),
379 reinterpret_cast<void *>(-_S_alignment));
383 is_lock_free() const volatile noexcept
386 return __atomic_is_lock_free(
sizeof(_M_i),
387 reinterpret_cast<void *>(-_S_alignment));
390 _GLIBCXX_ALWAYS_INLINE
void
391 store(__int_type __i,
memory_order __m = memory_order_seq_cst) noexcept
394 __glibcxx_assert(__b != memory_order_acquire);
395 __glibcxx_assert(__b != memory_order_acq_rel);
396 __glibcxx_assert(__b != memory_order_consume);
398 __atomic_store_n(&_M_i, __i,
int(__m));
401 _GLIBCXX_ALWAYS_INLINE
void
402 store(__int_type __i,
403 memory_order __m = memory_order_seq_cst)
volatile noexcept
406 __glibcxx_assert(__b != memory_order_acquire);
407 __glibcxx_assert(__b != memory_order_acq_rel);
408 __glibcxx_assert(__b != memory_order_consume);
410 __atomic_store_n(&_M_i, __i,
int(__m));
413 _GLIBCXX_ALWAYS_INLINE __int_type
414 load(
memory_order __m = memory_order_seq_cst)
const noexcept
417 __glibcxx_assert(__b != memory_order_release);
418 __glibcxx_assert(__b != memory_order_acq_rel);
420 return __atomic_load_n(&_M_i,
int(__m));
423 _GLIBCXX_ALWAYS_INLINE __int_type
424 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
427 __glibcxx_assert(__b != memory_order_release);
428 __glibcxx_assert(__b != memory_order_acq_rel);
430 return __atomic_load_n(&_M_i,
int(__m));
433 _GLIBCXX_ALWAYS_INLINE __int_type
434 exchange(__int_type __i,
437 return __atomic_exchange_n(&_M_i, __i,
int(__m));
441 _GLIBCXX_ALWAYS_INLINE __int_type
442 exchange(__int_type __i,
443 memory_order __m = memory_order_seq_cst)
volatile noexcept
445 return __atomic_exchange_n(&_M_i, __i,
int(__m));
448 _GLIBCXX_ALWAYS_INLINE
bool
449 compare_exchange_weak(__int_type& __i1, __int_type __i2,
454 __glibcxx_assert(__b2 != memory_order_release);
455 __glibcxx_assert(__b2 != memory_order_acq_rel);
456 __glibcxx_assert(__b2 <= __b1);
458 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
459 int(__m1),
int(__m2));
462 _GLIBCXX_ALWAYS_INLINE
bool
463 compare_exchange_weak(__int_type& __i1, __int_type __i2,
469 __glibcxx_assert(__b2 != memory_order_release);
470 __glibcxx_assert(__b2 != memory_order_acq_rel);
471 __glibcxx_assert(__b2 <= __b1);
473 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
474 int(__m1),
int(__m2));
477 _GLIBCXX_ALWAYS_INLINE
bool
478 compare_exchange_weak(__int_type& __i1, __int_type __i2,
481 return compare_exchange_weak(__i1, __i2, __m,
482 __cmpexch_failure_order(__m));
485 _GLIBCXX_ALWAYS_INLINE
bool
486 compare_exchange_weak(__int_type& __i1, __int_type __i2,
487 memory_order __m = memory_order_seq_cst)
volatile noexcept
489 return compare_exchange_weak(__i1, __i2, __m,
490 __cmpexch_failure_order(__m));
493 _GLIBCXX_ALWAYS_INLINE
bool
494 compare_exchange_strong(__int_type& __i1, __int_type __i2,
499 __glibcxx_assert(__b2 != memory_order_release);
500 __glibcxx_assert(__b2 != memory_order_acq_rel);
501 __glibcxx_assert(__b2 <= __b1);
503 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
504 int(__m1),
int(__m2));
507 _GLIBCXX_ALWAYS_INLINE
bool
508 compare_exchange_strong(__int_type& __i1, __int_type __i2,
515 __glibcxx_assert(__b2 != memory_order_release);
516 __glibcxx_assert(__b2 != memory_order_acq_rel);
517 __glibcxx_assert(__b2 <= __b1);
519 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
520 int(__m1),
int(__m2));
523 _GLIBCXX_ALWAYS_INLINE
bool
524 compare_exchange_strong(__int_type& __i1, __int_type __i2,
527 return compare_exchange_strong(__i1, __i2, __m,
528 __cmpexch_failure_order(__m));
531 _GLIBCXX_ALWAYS_INLINE
bool
532 compare_exchange_strong(__int_type& __i1, __int_type __i2,
533 memory_order __m = memory_order_seq_cst)
volatile noexcept
535 return compare_exchange_strong(__i1, __i2, __m,
536 __cmpexch_failure_order(__m));
539 _GLIBCXX_ALWAYS_INLINE __int_type
540 fetch_add(__int_type __i,
542 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
544 _GLIBCXX_ALWAYS_INLINE __int_type
545 fetch_add(__int_type __i,
546 memory_order __m = memory_order_seq_cst)
volatile noexcept
547 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
549 _GLIBCXX_ALWAYS_INLINE __int_type
550 fetch_sub(__int_type __i,
552 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
554 _GLIBCXX_ALWAYS_INLINE __int_type
555 fetch_sub(__int_type __i,
556 memory_order __m = memory_order_seq_cst)
volatile noexcept
557 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
559 _GLIBCXX_ALWAYS_INLINE __int_type
560 fetch_and(__int_type __i,
562 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
564 _GLIBCXX_ALWAYS_INLINE __int_type
565 fetch_and(__int_type __i,
566 memory_order __m = memory_order_seq_cst)
volatile noexcept
567 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
569 _GLIBCXX_ALWAYS_INLINE __int_type
570 fetch_or(__int_type __i,
572 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
574 _GLIBCXX_ALWAYS_INLINE __int_type
575 fetch_or(__int_type __i,
576 memory_order __m = memory_order_seq_cst)
volatile noexcept
577 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
579 _GLIBCXX_ALWAYS_INLINE __int_type
580 fetch_xor(__int_type __i,
582 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
584 _GLIBCXX_ALWAYS_INLINE __int_type
585 fetch_xor(__int_type __i,
586 memory_order __m = memory_order_seq_cst)
volatile noexcept
587 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
592 template<
typename _PTp>
596 typedef _PTp* __pointer_type;
602 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
605 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
615 constexpr
__atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
617 operator __pointer_type()
const noexcept
620 operator __pointer_type()
const volatile noexcept
624 operator=(__pointer_type __p) noexcept
631 operator=(__pointer_type __p)
volatile noexcept
638 operator++(
int) noexcept
639 {
return fetch_add(1); }
642 operator++(
int)
volatile noexcept
643 {
return fetch_add(1); }
646 operator--(
int) noexcept
647 {
return fetch_sub(1); }
650 operator--(
int)
volatile noexcept
651 {
return fetch_sub(1); }
654 operator++() noexcept
655 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
656 int(memory_order_seq_cst)); }
659 operator++()
volatile noexcept
660 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
661 int(memory_order_seq_cst)); }
664 operator--() noexcept
665 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
666 int(memory_order_seq_cst)); }
669 operator--()
volatile noexcept
670 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
671 int(memory_order_seq_cst)); }
674 operator+=(ptrdiff_t __d) noexcept
675 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
676 int(memory_order_seq_cst)); }
679 operator+=(ptrdiff_t __d)
volatile noexcept
680 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
681 int(memory_order_seq_cst)); }
684 operator-=(ptrdiff_t __d) noexcept
685 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
686 int(memory_order_seq_cst)); }
689 operator-=(ptrdiff_t __d)
volatile noexcept
690 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
691 int(memory_order_seq_cst)); }
694 is_lock_free()
const noexcept
697 return __atomic_is_lock_free(
sizeof(_M_p),
698 reinterpret_cast<void *>(-__alignof(_M_p)));
702 is_lock_free()
const volatile noexcept
705 return __atomic_is_lock_free(
sizeof(_M_p),
706 reinterpret_cast<void *>(-__alignof(_M_p)));
709 _GLIBCXX_ALWAYS_INLINE
void
710 store(__pointer_type __p,
715 __glibcxx_assert(__b != memory_order_acquire);
716 __glibcxx_assert(__b != memory_order_acq_rel);
717 __glibcxx_assert(__b != memory_order_consume);
719 __atomic_store_n(&_M_p, __p,
int(__m));
722 _GLIBCXX_ALWAYS_INLINE
void
723 store(__pointer_type __p,
724 memory_order __m = memory_order_seq_cst)
volatile noexcept
727 __glibcxx_assert(__b != memory_order_acquire);
728 __glibcxx_assert(__b != memory_order_acq_rel);
729 __glibcxx_assert(__b != memory_order_consume);
731 __atomic_store_n(&_M_p, __p,
int(__m));
734 _GLIBCXX_ALWAYS_INLINE __pointer_type
735 load(
memory_order __m = memory_order_seq_cst)
const noexcept
738 __glibcxx_assert(__b != memory_order_release);
739 __glibcxx_assert(__b != memory_order_acq_rel);
741 return __atomic_load_n(&_M_p,
int(__m));
744 _GLIBCXX_ALWAYS_INLINE __pointer_type
745 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
748 __glibcxx_assert(__b != memory_order_release);
749 __glibcxx_assert(__b != memory_order_acq_rel);
751 return __atomic_load_n(&_M_p,
int(__m));
754 _GLIBCXX_ALWAYS_INLINE __pointer_type
758 return __atomic_exchange_n(&_M_p, __p,
int(__m));
762 _GLIBCXX_ALWAYS_INLINE __pointer_type
764 memory_order __m = memory_order_seq_cst)
volatile noexcept
766 return __atomic_exchange_n(&_M_p, __p,
int(__m));
769 _GLIBCXX_ALWAYS_INLINE
bool
770 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
776 __glibcxx_assert(__b2 != memory_order_release);
777 __glibcxx_assert(__b2 != memory_order_acq_rel);
778 __glibcxx_assert(__b2 <= __b1);
780 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
781 int(__m1),
int(__m2));
784 _GLIBCXX_ALWAYS_INLINE
bool
785 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
792 __glibcxx_assert(__b2 != memory_order_release);
793 __glibcxx_assert(__b2 != memory_order_acq_rel);
794 __glibcxx_assert(__b2 <= __b1);
796 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
797 int(__m1),
int(__m2));
800 _GLIBCXX_ALWAYS_INLINE __pointer_type
801 fetch_add(ptrdiff_t __d,
803 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
805 _GLIBCXX_ALWAYS_INLINE __pointer_type
806 fetch_add(ptrdiff_t __d,
807 memory_order __m = memory_order_seq_cst)
volatile noexcept
808 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
810 _GLIBCXX_ALWAYS_INLINE __pointer_type
811 fetch_sub(ptrdiff_t __d,
813 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
815 _GLIBCXX_ALWAYS_INLINE __pointer_type
816 fetch_sub(ptrdiff_t __d,
817 memory_order __m = memory_order_seq_cst)
volatile noexcept
818 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
821 #if __cplusplus > 201703L
823 namespace __atomic_impl
826 template<
typename _Tp>
830 template<
typename _Tp>
833 template<
size_t _Size,
size_t _Align>
834 _GLIBCXX_ALWAYS_INLINE
bool
835 is_lock_free() noexcept
838 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
841 template<
typename _Tp>
842 _GLIBCXX_ALWAYS_INLINE
void
843 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m) noexcept
846 template<
typename _Tp>
847 _GLIBCXX_ALWAYS_INLINE _Tp
850 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
851 _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
852 __atomic_load(__ptr, __dest,
int(__m));
856 template<
typename _Tp>
857 _GLIBCXX_ALWAYS_INLINE _Tp
860 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
861 _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
866 template<
typename _Tp>
867 _GLIBCXX_ALWAYS_INLINE
bool
868 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
874 int(__success),
int(__failure));
877 template<
typename _Tp>
878 _GLIBCXX_ALWAYS_INLINE
bool
879 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
885 int(__success),
int(__failure));
888 template<
typename _Tp>
889 _GLIBCXX_ALWAYS_INLINE _Tp
890 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
891 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
893 template<
typename _Tp>
894 _GLIBCXX_ALWAYS_INLINE _Tp
895 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
896 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
898 template<
typename _Tp>
899 _GLIBCXX_ALWAYS_INLINE _Tp
900 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
901 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
903 template<
typename _Tp>
904 _GLIBCXX_ALWAYS_INLINE _Tp
905 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
906 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
908 template<
typename _Tp>
909 _GLIBCXX_ALWAYS_INLINE _Tp
910 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
911 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
913 template<
typename _Tp>
914 _GLIBCXX_ALWAYS_INLINE _Tp
915 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
916 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
918 template<
typename _Tp>
919 _GLIBCXX_ALWAYS_INLINE _Tp
920 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
921 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
923 template<
typename _Tp>
924 _GLIBCXX_ALWAYS_INLINE _Tp
925 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
926 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
928 template<
typename _Tp>
929 _GLIBCXX_ALWAYS_INLINE _Tp
930 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
931 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
933 template<
typename _Tp>
934 _GLIBCXX_ALWAYS_INLINE _Tp
935 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
936 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
938 template<
typename _Tp>
940 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
942 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
943 _Val<_Tp> __newval = __oldval + __i;
944 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
945 memory_order_relaxed))
946 __newval = __oldval + __i;
950 template<
typename _Tp>
952 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
954 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
955 _Val<_Tp> __newval = __oldval - __i;
956 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
957 memory_order_relaxed))
958 __newval = __oldval - __i;
962 template<
typename _Tp>
964 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
966 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
967 _Val<_Tp> __newval = __oldval + __i;
968 while (!compare_exchange_weak(__ptr, __oldval, __newval,
969 memory_order_seq_cst,
970 memory_order_relaxed))
971 __newval = __oldval + __i;
975 template<
typename _Tp>
977 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
979 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
980 _Val<_Tp> __newval = __oldval - __i;
981 while (!compare_exchange_weak(__ptr, __oldval, __newval,
982 memory_order_seq_cst,
983 memory_order_relaxed))
984 __newval = __oldval - __i;
990 template<
typename _Fp>
991 struct __atomic_float
993 static_assert(is_floating_point_v<_Fp>);
995 static constexpr
size_t _S_alignment = __alignof__(_Fp);
998 using value_type = _Fp;
999 using difference_type = value_type;
1001 static constexpr
bool is_always_lock_free
1002 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1004 __atomic_float() =
default;
1007 __atomic_float(_Fp __t) : _M_fp(__t)
1010 __atomic_float(
const __atomic_float&) =
delete;
1011 __atomic_float& operator=(
const __atomic_float&) =
delete;
1012 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1015 operator=(_Fp __t)
volatile noexcept
1022 operator=(_Fp __t) noexcept
1029 is_lock_free() const volatile noexcept
1030 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1033 is_lock_free() const noexcept
1034 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1037 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
volatile noexcept
1038 { __atomic_impl::store(&_M_fp, __t, __m); }
1041 store(_Fp __t,
memory_order __m = memory_order_seq_cst) noexcept
1042 { __atomic_impl::store(&_M_fp, __t, __m); }
1045 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
1046 {
return __atomic_impl::load(&_M_fp, __m); }
1049 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1050 {
return __atomic_impl::load(&_M_fp, __m); }
1052 operator _Fp() const volatile noexcept {
return this->load(); }
1053 operator _Fp() const noexcept {
return this->load(); }
1057 memory_order __m = memory_order_seq_cst)
volatile noexcept
1066 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1070 return __atomic_impl::compare_exchange_weak(&_M_fp,
1071 __expected, __desired,
1072 __success, __failure);
1076 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1080 return __atomic_impl::compare_exchange_weak(&_M_fp,
1081 __expected, __desired,
1082 __success, __failure);
1086 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1090 return __atomic_impl::compare_exchange_strong(&_M_fp,
1091 __expected, __desired,
1092 __success, __failure);
1096 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1100 return __atomic_impl::compare_exchange_strong(&_M_fp,
1101 __expected, __desired,
1102 __success, __failure);
1106 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1110 return compare_exchange_weak(__expected, __desired, __order,
1111 __cmpexch_failure_order(__order));
1115 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1119 return compare_exchange_weak(__expected, __desired, __order,
1120 __cmpexch_failure_order(__order));
1124 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1128 return compare_exchange_strong(__expected, __desired, __order,
1129 __cmpexch_failure_order(__order));
1133 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1137 return compare_exchange_strong(__expected, __desired, __order,
1138 __cmpexch_failure_order(__order));
1142 fetch_add(value_type __i,
1144 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1147 fetch_add(value_type __i,
1148 memory_order __m = memory_order_seq_cst)
volatile noexcept
1149 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1152 fetch_sub(value_type __i,
1154 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1157 fetch_sub(value_type __i,
1158 memory_order __m = memory_order_seq_cst)
volatile noexcept
1159 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1162 operator+=(value_type __i) noexcept
1163 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1166 operator+=(value_type __i)
volatile noexcept
1167 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1170 operator-=(value_type __i) noexcept
1171 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1174 operator-=(value_type __i)
volatile noexcept
1175 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1178 alignas(_S_alignment) _Fp _M_fp;
1181 template<
typename _Tp,
1182 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1183 struct __atomic_ref;
1186 template<
typename _Tp>
1187 struct __atomic_ref<_Tp, false, false>
1189 static_assert(is_trivially_copyable_v<_Tp>);
1192 static constexpr
int _S_min_alignment
1193 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1197 using value_type = _Tp;
1199 static constexpr
bool is_always_lock_free
1200 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1202 static constexpr
size_t required_alignment
1203 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment :
alignof(_Tp);
1205 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1209 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1211 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1214 operator=(_Tp __t)
const noexcept
1220 operator _Tp() const noexcept {
return this->load(); }
1223 is_lock_free() const noexcept
1224 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1227 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1228 { __atomic_impl::store(_M_ptr, __t, __m); }
1231 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1232 {
return __atomic_impl::load(_M_ptr, __m); }
1240 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1244 return __atomic_impl::compare_exchange_weak(_M_ptr,
1245 __expected, __desired,
1246 __success, __failure);
1250 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1254 return __atomic_impl::compare_exchange_strong(_M_ptr,
1255 __expected, __desired,
1256 __success, __failure);
1260 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1264 return compare_exchange_weak(__expected, __desired, __order,
1265 __cmpexch_failure_order(__order));
1269 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1273 return compare_exchange_strong(__expected, __desired, __order,
1274 __cmpexch_failure_order(__order));
1282 template<
typename _Tp>
1283 struct __atomic_ref<_Tp, true, false>
1285 static_assert(is_integral_v<_Tp>);
1288 using value_type = _Tp;
1289 using difference_type = value_type;
1291 static constexpr
bool is_always_lock_free
1292 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1294 static constexpr
size_t required_alignment
1295 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) :
alignof(_Tp);
1297 __atomic_ref() =
delete;
1298 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1301 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1302 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1304 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1307 operator=(_Tp __t)
const noexcept
1313 operator _Tp() const noexcept {
return this->load(); }
1316 is_lock_free() const noexcept
1318 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1322 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1323 { __atomic_impl::store(_M_ptr, __t, __m); }
1326 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1327 {
return __atomic_impl::load(_M_ptr, __m); }
1331 memory_order __m = memory_order_seq_cst)
const noexcept
1335 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1339 return __atomic_impl::compare_exchange_weak(_M_ptr,
1340 __expected, __desired,
1341 __success, __failure);
1345 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1349 return __atomic_impl::compare_exchange_strong(_M_ptr,
1350 __expected, __desired,
1351 __success, __failure);
1355 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1359 return compare_exchange_weak(__expected, __desired, __order,
1360 __cmpexch_failure_order(__order));
1364 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1368 return compare_exchange_strong(__expected, __desired, __order,
1369 __cmpexch_failure_order(__order));
1373 fetch_add(value_type __i,
1374 memory_order __m = memory_order_seq_cst)
const noexcept
1375 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1378 fetch_sub(value_type __i,
1379 memory_order __m = memory_order_seq_cst)
const noexcept
1380 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1383 fetch_and(value_type __i,
1384 memory_order __m = memory_order_seq_cst)
const noexcept
1385 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1388 fetch_or(value_type __i,
1389 memory_order __m = memory_order_seq_cst)
const noexcept
1390 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1393 fetch_xor(value_type __i,
1394 memory_order __m = memory_order_seq_cst)
const noexcept
1395 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1397 _GLIBCXX_ALWAYS_INLINE value_type
1398 operator++(
int)
const noexcept
1399 {
return fetch_add(1); }
1401 _GLIBCXX_ALWAYS_INLINE value_type
1402 operator--(
int)
const noexcept
1403 {
return fetch_sub(1); }
1406 operator++() const noexcept
1407 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1410 operator--() const noexcept
1411 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1414 operator+=(value_type __i)
const noexcept
1415 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1418 operator-=(value_type __i)
const noexcept
1419 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1422 operator&=(value_type __i)
const noexcept
1423 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1426 operator|=(value_type __i)
const noexcept
1427 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1430 operator^=(value_type __i)
const noexcept
1431 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1438 template<
typename _Fp>
1439 struct __atomic_ref<_Fp, false, true>
1441 static_assert(is_floating_point_v<_Fp>);
1444 using value_type = _Fp;
1445 using difference_type = value_type;
1447 static constexpr
bool is_always_lock_free
1448 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1450 static constexpr
size_t required_alignment = __alignof__(_Fp);
1452 __atomic_ref() =
delete;
1453 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1456 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1457 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1459 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1462 operator=(_Fp __t)
const noexcept
1468 operator _Fp() const noexcept {
return this->load(); }
1471 is_lock_free() const noexcept
1473 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1477 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1478 { __atomic_impl::store(_M_ptr, __t, __m); }
1481 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1482 {
return __atomic_impl::load(_M_ptr, __m); }
1486 memory_order __m = memory_order_seq_cst)
const noexcept
1490 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1494 return __atomic_impl::compare_exchange_weak(_M_ptr,
1495 __expected, __desired,
1496 __success, __failure);
1500 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1504 return __atomic_impl::compare_exchange_strong(_M_ptr,
1505 __expected, __desired,
1506 __success, __failure);
1510 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1514 return compare_exchange_weak(__expected, __desired, __order,
1515 __cmpexch_failure_order(__order));
1519 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1523 return compare_exchange_strong(__expected, __desired, __order,
1524 __cmpexch_failure_order(__order));
1528 fetch_add(value_type __i,
1529 memory_order __m = memory_order_seq_cst)
const noexcept
1530 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1533 fetch_sub(value_type __i,
1534 memory_order __m = memory_order_seq_cst)
const noexcept
1535 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1538 operator+=(value_type __i)
const noexcept
1539 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1542 operator-=(value_type __i)
const noexcept
1543 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1550 template<
typename _Tp>
1551 struct __atomic_ref<_Tp*,
false,
false>
1554 using value_type = _Tp*;
1555 using difference_type = ptrdiff_t;
1557 static constexpr
bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1559 static constexpr
size_t required_alignment = __alignof__(_Tp*);
1561 __atomic_ref() =
delete;
1562 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1566 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1568 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1571 operator=(_Tp* __t)
const noexcept
1577 operator _Tp*()
const noexcept {
return this->load(); }
1580 is_lock_free() const noexcept
1582 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1586 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1587 { __atomic_impl::store(_M_ptr, __t, __m); }
1590 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1591 {
return __atomic_impl::load(_M_ptr, __m); }
1595 memory_order __m = memory_order_seq_cst)
const noexcept
1599 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1603 return __atomic_impl::compare_exchange_weak(_M_ptr,
1604 __expected, __desired,
1605 __success, __failure);
1609 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1613 return __atomic_impl::compare_exchange_strong(_M_ptr,
1614 __expected, __desired,
1615 __success, __failure);
1619 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1623 return compare_exchange_weak(__expected, __desired, __order,
1624 __cmpexch_failure_order(__order));
1628 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1632 return compare_exchange_strong(__expected, __desired, __order,
1633 __cmpexch_failure_order(__order));
1636 _GLIBCXX_ALWAYS_INLINE value_type
1637 fetch_add(difference_type __d,
1638 memory_order __m = memory_order_seq_cst)
const noexcept
1639 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1641 _GLIBCXX_ALWAYS_INLINE value_type
1642 fetch_sub(difference_type __d,
1643 memory_order __m = memory_order_seq_cst)
const noexcept
1644 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1647 operator++(
int)
const noexcept
1648 {
return fetch_add(1); }
1651 operator--(
int)
const noexcept
1652 {
return fetch_sub(1); }
1655 operator++() const noexcept
1657 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1661 operator--() const noexcept
1663 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1667 operator+=(difference_type __d)
const noexcept
1669 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1673 operator-=(difference_type __d)
const noexcept
1675 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1679 static constexpr ptrdiff_t
1680 _S_type_size(ptrdiff_t __d) noexcept
1682 static_assert(is_object_v<_Tp>);
1683 return __d *
sizeof(_Tp);
1693 _GLIBCXX_END_NAMESPACE_VERSION