libstdc++
atomic_base.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008-2019 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_base.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <stdint.h>
38 #include <bits/move.h>
39 
40 #ifndef _GLIBCXX_ALWAYS_INLINE
41 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
42 #endif
43 
44 namespace std _GLIBCXX_VISIBILITY(default)
45 {
46 _GLIBCXX_BEGIN_NAMESPACE_VERSION
47 
48  /**
49  * @defgroup atomics Atomics
50  *
51  * Components for performing atomic operations.
52  * @{
53  */
54 
55  /// Enumeration for memory_order
56 #if __cplusplus > 201703L
57  enum class memory_order : int
58  {
59  relaxed,
60  consume,
61  acquire,
62  release,
63  acq_rel,
64  seq_cst
65  };
66 
67  inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
68  inline constexpr memory_order memory_order_consume = memory_order::consume;
69  inline constexpr memory_order memory_order_acquire = memory_order::acquire;
70  inline constexpr memory_order memory_order_release = memory_order::release;
71  inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
72  inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
73 #else
74  typedef enum memory_order
75  {
76  memory_order_relaxed,
77  memory_order_consume,
78  memory_order_acquire,
79  memory_order_release,
80  memory_order_acq_rel,
81  memory_order_seq_cst
82  } memory_order;
83 #endif
84 
85  enum __memory_order_modifier
86  {
87  __memory_order_mask = 0x0ffff,
88  __memory_order_modifier_mask = 0xffff0000,
89  __memory_order_hle_acquire = 0x10000,
90  __memory_order_hle_release = 0x20000
91  };
92 
93  constexpr memory_order
94  operator|(memory_order __m, __memory_order_modifier __mod)
95  {
96  return memory_order(int(__m) | int(__mod));
97  }
98 
99  constexpr memory_order
100  operator&(memory_order __m, __memory_order_modifier __mod)
101  {
102  return memory_order(int(__m) & int(__mod));
103  }
104 
105  // Drop release ordering as per [atomics.types.operations.req]/21
106  constexpr memory_order
107  __cmpexch_failure_order2(memory_order __m) noexcept
108  {
109  return __m == memory_order_acq_rel ? memory_order_acquire
110  : __m == memory_order_release ? memory_order_relaxed : __m;
111  }
112 
113  constexpr memory_order
114  __cmpexch_failure_order(memory_order __m) noexcept
115  {
116  return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
117  | __memory_order_modifier(__m & __memory_order_modifier_mask));
118  }
119 
120  _GLIBCXX_ALWAYS_INLINE void
121  atomic_thread_fence(memory_order __m) noexcept
122  { __atomic_thread_fence(int(__m)); }
123 
124  _GLIBCXX_ALWAYS_INLINE void
125  atomic_signal_fence(memory_order __m) noexcept
126  { __atomic_signal_fence(int(__m)); }
127 
128  /// kill_dependency
129  template<typename _Tp>
130  inline _Tp
131  kill_dependency(_Tp __y) noexcept
132  {
133  _Tp __ret(__y);
134  return __ret;
135  }
136 
137 
138  // Base types for atomics.
139  template<typename _IntTp>
141 
142 
143 #define ATOMIC_VAR_INIT(_VI) { _VI }
144 
145  template<typename _Tp>
146  struct atomic;
147 
148  template<typename _Tp>
149  struct atomic<_Tp*>;
150 
151  /* The target's "set" value for test-and-set may not be exactly 1. */
152 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
153  typedef bool __atomic_flag_data_type;
154 #else
155  typedef unsigned char __atomic_flag_data_type;
156 #endif
157 
158  /**
159  * @brief Base type for atomic_flag.
160  *
161  * Base type is POD with data, allowing atomic_flag to derive from
162  * it and meet the standard layout type requirement. In addition to
163  * compatibility with a C interface, this allows different
164  * implementations of atomic_flag to use the same atomic operation
165  * functions, via a standard conversion to the __atomic_flag_base
166  * argument.
167  */
168  _GLIBCXX_BEGIN_EXTERN_C
169 
171  {
172  __atomic_flag_data_type _M_i;
173  };
174 
175  _GLIBCXX_END_EXTERN_C
176 
177 #define ATOMIC_FLAG_INIT { 0 }
178 
179  /// atomic_flag
181  {
182  atomic_flag() noexcept = default;
183  ~atomic_flag() noexcept = default;
184  atomic_flag(const atomic_flag&) = delete;
185  atomic_flag& operator=(const atomic_flag&) = delete;
186  atomic_flag& operator=(const atomic_flag&) volatile = delete;
187 
188  // Conversion to ATOMIC_FLAG_INIT.
189  constexpr atomic_flag(bool __i) noexcept
190  : __atomic_flag_base{ _S_init(__i) }
191  { }
192 
193  _GLIBCXX_ALWAYS_INLINE bool
194  test_and_set(memory_order __m = memory_order_seq_cst) noexcept
195  {
196  return __atomic_test_and_set (&_M_i, int(__m));
197  }
198 
199  _GLIBCXX_ALWAYS_INLINE bool
200  test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
201  {
202  return __atomic_test_and_set (&_M_i, int(__m));
203  }
204 
205  _GLIBCXX_ALWAYS_INLINE void
206  clear(memory_order __m = memory_order_seq_cst) noexcept
207  {
208  memory_order __b = __m & __memory_order_mask;
209  __glibcxx_assert(__b != memory_order_consume);
210  __glibcxx_assert(__b != memory_order_acquire);
211  __glibcxx_assert(__b != memory_order_acq_rel);
212 
213  __atomic_clear (&_M_i, int(__m));
214  }
215 
216  _GLIBCXX_ALWAYS_INLINE void
217  clear(memory_order __m = memory_order_seq_cst) volatile noexcept
218  {
219  memory_order __b = __m & __memory_order_mask;
220  __glibcxx_assert(__b != memory_order_consume);
221  __glibcxx_assert(__b != memory_order_acquire);
222  __glibcxx_assert(__b != memory_order_acq_rel);
223 
224  __atomic_clear (&_M_i, int(__m));
225  }
226 
227  private:
228  static constexpr __atomic_flag_data_type
229  _S_init(bool __i)
230  { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
231  };
232 
233 
234  /// Base class for atomic integrals.
235  //
236  // For each of the integral types, define atomic_[integral type] struct
237  //
238  // atomic_bool bool
239  // atomic_char char
240  // atomic_schar signed char
241  // atomic_uchar unsigned char
242  // atomic_short short
243  // atomic_ushort unsigned short
244  // atomic_int int
245  // atomic_uint unsigned int
246  // atomic_long long
247  // atomic_ulong unsigned long
248  // atomic_llong long long
249  // atomic_ullong unsigned long long
250  // atomic_char8_t char8_t
251  // atomic_char16_t char16_t
252  // atomic_char32_t char32_t
253  // atomic_wchar_t wchar_t
254  //
255  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
256  // 8 bytes, since that is what GCC built-in functions for atomic
257  // memory access expect.
258  template<typename _ITp>
259  struct __atomic_base
260  {
261  using value_type = _ITp;
262  using difference_type = value_type;
263 
264  private:
265  typedef _ITp __int_type;
266 
267  static constexpr int _S_alignment =
268  sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
269 
270  alignas(_S_alignment) __int_type _M_i;
271 
272  public:
273  __atomic_base() noexcept = default;
274  ~__atomic_base() noexcept = default;
275  __atomic_base(const __atomic_base&) = delete;
276  __atomic_base& operator=(const __atomic_base&) = delete;
277  __atomic_base& operator=(const __atomic_base&) volatile = delete;
278 
279  // Requires __int_type convertible to _M_i.
280  constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
281 
282  operator __int_type() const noexcept
283  { return load(); }
284 
285  operator __int_type() const volatile noexcept
286  { return load(); }
287 
288  __int_type
289  operator=(__int_type __i) noexcept
290  {
291  store(__i);
292  return __i;
293  }
294 
295  __int_type
296  operator=(__int_type __i) volatile noexcept
297  {
298  store(__i);
299  return __i;
300  }
301 
302  __int_type
303  operator++(int) noexcept
304  { return fetch_add(1); }
305 
306  __int_type
307  operator++(int) volatile noexcept
308  { return fetch_add(1); }
309 
310  __int_type
311  operator--(int) noexcept
312  { return fetch_sub(1); }
313 
314  __int_type
315  operator--(int) volatile noexcept
316  { return fetch_sub(1); }
317 
318  __int_type
319  operator++() noexcept
320  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
321 
322  __int_type
323  operator++() volatile noexcept
324  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
325 
326  __int_type
327  operator--() noexcept
328  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
329 
330  __int_type
331  operator--() volatile noexcept
332  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
333 
334  __int_type
335  operator+=(__int_type __i) noexcept
336  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
337 
338  __int_type
339  operator+=(__int_type __i) volatile noexcept
340  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
341 
342  __int_type
343  operator-=(__int_type __i) noexcept
344  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
345 
346  __int_type
347  operator-=(__int_type __i) volatile noexcept
348  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
349 
350  __int_type
351  operator&=(__int_type __i) noexcept
352  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
353 
354  __int_type
355  operator&=(__int_type __i) volatile noexcept
356  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
357 
358  __int_type
359  operator|=(__int_type __i) noexcept
360  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
361 
362  __int_type
363  operator|=(__int_type __i) volatile noexcept
364  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
365 
366  __int_type
367  operator^=(__int_type __i) noexcept
368  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
369 
370  __int_type
371  operator^=(__int_type __i) volatile noexcept
372  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
373 
374  bool
375  is_lock_free() const noexcept
376  {
377  // Use a fake, minimally aligned pointer.
378  return __atomic_is_lock_free(sizeof(_M_i),
379  reinterpret_cast<void *>(-_S_alignment));
380  }
381 
382  bool
383  is_lock_free() const volatile noexcept
384  {
385  // Use a fake, minimally aligned pointer.
386  return __atomic_is_lock_free(sizeof(_M_i),
387  reinterpret_cast<void *>(-_S_alignment));
388  }
389 
390  _GLIBCXX_ALWAYS_INLINE void
391  store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
392  {
393  memory_order __b = __m & __memory_order_mask;
394  __glibcxx_assert(__b != memory_order_acquire);
395  __glibcxx_assert(__b != memory_order_acq_rel);
396  __glibcxx_assert(__b != memory_order_consume);
397 
398  __atomic_store_n(&_M_i, __i, int(__m));
399  }
400 
401  _GLIBCXX_ALWAYS_INLINE void
402  store(__int_type __i,
403  memory_order __m = memory_order_seq_cst) volatile noexcept
404  {
405  memory_order __b = __m & __memory_order_mask;
406  __glibcxx_assert(__b != memory_order_acquire);
407  __glibcxx_assert(__b != memory_order_acq_rel);
408  __glibcxx_assert(__b != memory_order_consume);
409 
410  __atomic_store_n(&_M_i, __i, int(__m));
411  }
412 
413  _GLIBCXX_ALWAYS_INLINE __int_type
414  load(memory_order __m = memory_order_seq_cst) const noexcept
415  {
416  memory_order __b = __m & __memory_order_mask;
417  __glibcxx_assert(__b != memory_order_release);
418  __glibcxx_assert(__b != memory_order_acq_rel);
419 
420  return __atomic_load_n(&_M_i, int(__m));
421  }
422 
423  _GLIBCXX_ALWAYS_INLINE __int_type
424  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
425  {
426  memory_order __b = __m & __memory_order_mask;
427  __glibcxx_assert(__b != memory_order_release);
428  __glibcxx_assert(__b != memory_order_acq_rel);
429 
430  return __atomic_load_n(&_M_i, int(__m));
431  }
432 
433  _GLIBCXX_ALWAYS_INLINE __int_type
434  exchange(__int_type __i,
435  memory_order __m = memory_order_seq_cst) noexcept
436  {
437  return __atomic_exchange_n(&_M_i, __i, int(__m));
438  }
439 
440 
441  _GLIBCXX_ALWAYS_INLINE __int_type
442  exchange(__int_type __i,
443  memory_order __m = memory_order_seq_cst) volatile noexcept
444  {
445  return __atomic_exchange_n(&_M_i, __i, int(__m));
446  }
447 
448  _GLIBCXX_ALWAYS_INLINE bool
449  compare_exchange_weak(__int_type& __i1, __int_type __i2,
450  memory_order __m1, memory_order __m2) noexcept
451  {
452  memory_order __b2 = __m2 & __memory_order_mask;
453  memory_order __b1 = __m1 & __memory_order_mask;
454  __glibcxx_assert(__b2 != memory_order_release);
455  __glibcxx_assert(__b2 != memory_order_acq_rel);
456  __glibcxx_assert(__b2 <= __b1);
457 
458  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
459  int(__m1), int(__m2));
460  }
461 
462  _GLIBCXX_ALWAYS_INLINE bool
463  compare_exchange_weak(__int_type& __i1, __int_type __i2,
464  memory_order __m1,
465  memory_order __m2) volatile noexcept
466  {
467  memory_order __b2 = __m2 & __memory_order_mask;
468  memory_order __b1 = __m1 & __memory_order_mask;
469  __glibcxx_assert(__b2 != memory_order_release);
470  __glibcxx_assert(__b2 != memory_order_acq_rel);
471  __glibcxx_assert(__b2 <= __b1);
472 
473  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
474  int(__m1), int(__m2));
475  }
476 
477  _GLIBCXX_ALWAYS_INLINE bool
478  compare_exchange_weak(__int_type& __i1, __int_type __i2,
479  memory_order __m = memory_order_seq_cst) noexcept
480  {
481  return compare_exchange_weak(__i1, __i2, __m,
482  __cmpexch_failure_order(__m));
483  }
484 
485  _GLIBCXX_ALWAYS_INLINE bool
486  compare_exchange_weak(__int_type& __i1, __int_type __i2,
487  memory_order __m = memory_order_seq_cst) volatile noexcept
488  {
489  return compare_exchange_weak(__i1, __i2, __m,
490  __cmpexch_failure_order(__m));
491  }
492 
493  _GLIBCXX_ALWAYS_INLINE bool
494  compare_exchange_strong(__int_type& __i1, __int_type __i2,
495  memory_order __m1, memory_order __m2) noexcept
496  {
497  memory_order __b2 = __m2 & __memory_order_mask;
498  memory_order __b1 = __m1 & __memory_order_mask;
499  __glibcxx_assert(__b2 != memory_order_release);
500  __glibcxx_assert(__b2 != memory_order_acq_rel);
501  __glibcxx_assert(__b2 <= __b1);
502 
503  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
504  int(__m1), int(__m2));
505  }
506 
507  _GLIBCXX_ALWAYS_INLINE bool
508  compare_exchange_strong(__int_type& __i1, __int_type __i2,
509  memory_order __m1,
510  memory_order __m2) volatile noexcept
511  {
512  memory_order __b2 = __m2 & __memory_order_mask;
513  memory_order __b1 = __m1 & __memory_order_mask;
514 
515  __glibcxx_assert(__b2 != memory_order_release);
516  __glibcxx_assert(__b2 != memory_order_acq_rel);
517  __glibcxx_assert(__b2 <= __b1);
518 
519  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
520  int(__m1), int(__m2));
521  }
522 
523  _GLIBCXX_ALWAYS_INLINE bool
524  compare_exchange_strong(__int_type& __i1, __int_type __i2,
525  memory_order __m = memory_order_seq_cst) noexcept
526  {
527  return compare_exchange_strong(__i1, __i2, __m,
528  __cmpexch_failure_order(__m));
529  }
530 
531  _GLIBCXX_ALWAYS_INLINE bool
532  compare_exchange_strong(__int_type& __i1, __int_type __i2,
533  memory_order __m = memory_order_seq_cst) volatile noexcept
534  {
535  return compare_exchange_strong(__i1, __i2, __m,
536  __cmpexch_failure_order(__m));
537  }
538 
539  _GLIBCXX_ALWAYS_INLINE __int_type
540  fetch_add(__int_type __i,
541  memory_order __m = memory_order_seq_cst) noexcept
542  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
543 
544  _GLIBCXX_ALWAYS_INLINE __int_type
545  fetch_add(__int_type __i,
546  memory_order __m = memory_order_seq_cst) volatile noexcept
547  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
548 
549  _GLIBCXX_ALWAYS_INLINE __int_type
550  fetch_sub(__int_type __i,
551  memory_order __m = memory_order_seq_cst) noexcept
552  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
553 
554  _GLIBCXX_ALWAYS_INLINE __int_type
555  fetch_sub(__int_type __i,
556  memory_order __m = memory_order_seq_cst) volatile noexcept
557  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
558 
559  _GLIBCXX_ALWAYS_INLINE __int_type
560  fetch_and(__int_type __i,
561  memory_order __m = memory_order_seq_cst) noexcept
562  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
563 
564  _GLIBCXX_ALWAYS_INLINE __int_type
565  fetch_and(__int_type __i,
566  memory_order __m = memory_order_seq_cst) volatile noexcept
567  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
568 
569  _GLIBCXX_ALWAYS_INLINE __int_type
570  fetch_or(__int_type __i,
571  memory_order __m = memory_order_seq_cst) noexcept
572  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
573 
574  _GLIBCXX_ALWAYS_INLINE __int_type
575  fetch_or(__int_type __i,
576  memory_order __m = memory_order_seq_cst) volatile noexcept
577  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
578 
579  _GLIBCXX_ALWAYS_INLINE __int_type
580  fetch_xor(__int_type __i,
581  memory_order __m = memory_order_seq_cst) noexcept
582  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
583 
584  _GLIBCXX_ALWAYS_INLINE __int_type
585  fetch_xor(__int_type __i,
586  memory_order __m = memory_order_seq_cst) volatile noexcept
587  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
588  };
589 
590 
591  /// Partial specialization for pointer types.
592  template<typename _PTp>
593  struct __atomic_base<_PTp*>
594  {
595  private:
596  typedef _PTp* __pointer_type;
597 
598  __pointer_type _M_p;
599 
600  // Factored out to facilitate explicit specialization.
601  constexpr ptrdiff_t
602  _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
603 
604  constexpr ptrdiff_t
605  _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
606 
607  public:
608  __atomic_base() noexcept = default;
609  ~__atomic_base() noexcept = default;
610  __atomic_base(const __atomic_base&) = delete;
611  __atomic_base& operator=(const __atomic_base&) = delete;
612  __atomic_base& operator=(const __atomic_base&) volatile = delete;
613 
614  // Requires __pointer_type convertible to _M_p.
615  constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
616 
617  operator __pointer_type() const noexcept
618  { return load(); }
619 
620  operator __pointer_type() const volatile noexcept
621  { return load(); }
622 
623  __pointer_type
624  operator=(__pointer_type __p) noexcept
625  {
626  store(__p);
627  return __p;
628  }
629 
630  __pointer_type
631  operator=(__pointer_type __p) volatile noexcept
632  {
633  store(__p);
634  return __p;
635  }
636 
637  __pointer_type
638  operator++(int) noexcept
639  { return fetch_add(1); }
640 
641  __pointer_type
642  operator++(int) volatile noexcept
643  { return fetch_add(1); }
644 
645  __pointer_type
646  operator--(int) noexcept
647  { return fetch_sub(1); }
648 
649  __pointer_type
650  operator--(int) volatile noexcept
651  { return fetch_sub(1); }
652 
653  __pointer_type
654  operator++() noexcept
655  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
656  int(memory_order_seq_cst)); }
657 
658  __pointer_type
659  operator++() volatile noexcept
660  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
661  int(memory_order_seq_cst)); }
662 
663  __pointer_type
664  operator--() noexcept
665  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
666  int(memory_order_seq_cst)); }
667 
668  __pointer_type
669  operator--() volatile noexcept
670  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
671  int(memory_order_seq_cst)); }
672 
673  __pointer_type
674  operator+=(ptrdiff_t __d) noexcept
675  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
676  int(memory_order_seq_cst)); }
677 
678  __pointer_type
679  operator+=(ptrdiff_t __d) volatile noexcept
680  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
681  int(memory_order_seq_cst)); }
682 
683  __pointer_type
684  operator-=(ptrdiff_t __d) noexcept
685  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
686  int(memory_order_seq_cst)); }
687 
688  __pointer_type
689  operator-=(ptrdiff_t __d) volatile noexcept
690  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
691  int(memory_order_seq_cst)); }
692 
693  bool
694  is_lock_free() const noexcept
695  {
696  // Produce a fake, minimally aligned pointer.
697  return __atomic_is_lock_free(sizeof(_M_p),
698  reinterpret_cast<void *>(-__alignof(_M_p)));
699  }
700 
701  bool
702  is_lock_free() const volatile noexcept
703  {
704  // Produce a fake, minimally aligned pointer.
705  return __atomic_is_lock_free(sizeof(_M_p),
706  reinterpret_cast<void *>(-__alignof(_M_p)));
707  }
708 
709  _GLIBCXX_ALWAYS_INLINE void
710  store(__pointer_type __p,
711  memory_order __m = memory_order_seq_cst) noexcept
712  {
713  memory_order __b = __m & __memory_order_mask;
714 
715  __glibcxx_assert(__b != memory_order_acquire);
716  __glibcxx_assert(__b != memory_order_acq_rel);
717  __glibcxx_assert(__b != memory_order_consume);
718 
719  __atomic_store_n(&_M_p, __p, int(__m));
720  }
721 
722  _GLIBCXX_ALWAYS_INLINE void
723  store(__pointer_type __p,
724  memory_order __m = memory_order_seq_cst) volatile noexcept
725  {
726  memory_order __b = __m & __memory_order_mask;
727  __glibcxx_assert(__b != memory_order_acquire);
728  __glibcxx_assert(__b != memory_order_acq_rel);
729  __glibcxx_assert(__b != memory_order_consume);
730 
731  __atomic_store_n(&_M_p, __p, int(__m));
732  }
733 
734  _GLIBCXX_ALWAYS_INLINE __pointer_type
735  load(memory_order __m = memory_order_seq_cst) const noexcept
736  {
737  memory_order __b = __m & __memory_order_mask;
738  __glibcxx_assert(__b != memory_order_release);
739  __glibcxx_assert(__b != memory_order_acq_rel);
740 
741  return __atomic_load_n(&_M_p, int(__m));
742  }
743 
744  _GLIBCXX_ALWAYS_INLINE __pointer_type
745  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
746  {
747  memory_order __b = __m & __memory_order_mask;
748  __glibcxx_assert(__b != memory_order_release);
749  __glibcxx_assert(__b != memory_order_acq_rel);
750 
751  return __atomic_load_n(&_M_p, int(__m));
752  }
753 
754  _GLIBCXX_ALWAYS_INLINE __pointer_type
755  exchange(__pointer_type __p,
756  memory_order __m = memory_order_seq_cst) noexcept
757  {
758  return __atomic_exchange_n(&_M_p, __p, int(__m));
759  }
760 
761 
762  _GLIBCXX_ALWAYS_INLINE __pointer_type
763  exchange(__pointer_type __p,
764  memory_order __m = memory_order_seq_cst) volatile noexcept
765  {
766  return __atomic_exchange_n(&_M_p, __p, int(__m));
767  }
768 
769  _GLIBCXX_ALWAYS_INLINE bool
770  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
771  memory_order __m1,
772  memory_order __m2) noexcept
773  {
774  memory_order __b2 = __m2 & __memory_order_mask;
775  memory_order __b1 = __m1 & __memory_order_mask;
776  __glibcxx_assert(__b2 != memory_order_release);
777  __glibcxx_assert(__b2 != memory_order_acq_rel);
778  __glibcxx_assert(__b2 <= __b1);
779 
780  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
781  int(__m1), int(__m2));
782  }
783 
784  _GLIBCXX_ALWAYS_INLINE bool
785  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
786  memory_order __m1,
787  memory_order __m2) volatile noexcept
788  {
789  memory_order __b2 = __m2 & __memory_order_mask;
790  memory_order __b1 = __m1 & __memory_order_mask;
791 
792  __glibcxx_assert(__b2 != memory_order_release);
793  __glibcxx_assert(__b2 != memory_order_acq_rel);
794  __glibcxx_assert(__b2 <= __b1);
795 
796  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
797  int(__m1), int(__m2));
798  }
799 
800  _GLIBCXX_ALWAYS_INLINE __pointer_type
801  fetch_add(ptrdiff_t __d,
802  memory_order __m = memory_order_seq_cst) noexcept
803  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
804 
805  _GLIBCXX_ALWAYS_INLINE __pointer_type
806  fetch_add(ptrdiff_t __d,
807  memory_order __m = memory_order_seq_cst) volatile noexcept
808  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
809 
810  _GLIBCXX_ALWAYS_INLINE __pointer_type
811  fetch_sub(ptrdiff_t __d,
812  memory_order __m = memory_order_seq_cst) noexcept
813  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
814 
815  _GLIBCXX_ALWAYS_INLINE __pointer_type
816  fetch_sub(ptrdiff_t __d,
817  memory_order __m = memory_order_seq_cst) volatile noexcept
818  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
819  };
820 
821 #if __cplusplus > 201703L
822  // Implementation details of atomic_ref and atomic<floating-point>.
823  namespace __atomic_impl
824  {
825  // Remove volatile and create a non-deduced context for value arguments.
826  template<typename _Tp>
827  using _Val = remove_volatile_t<_Tp>;
828 
829  // As above, but for difference_type arguments.
830  template<typename _Tp>
831  using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
832 
833  template<size_t _Size, size_t _Align>
834  _GLIBCXX_ALWAYS_INLINE bool
835  is_lock_free() noexcept
836  {
837  // Produce a fake, minimally aligned pointer.
838  return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
839  }
840 
841  template<typename _Tp>
842  _GLIBCXX_ALWAYS_INLINE void
843  store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
844  { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
845 
846  template<typename _Tp>
847  _GLIBCXX_ALWAYS_INLINE _Tp
848  load(_Tp* __ptr, memory_order __m) noexcept
849  {
850  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
851  _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
852  __atomic_load(__ptr, __dest, int(__m));
853  return *__dest;
854  }
855 
856  template<typename _Tp>
857  _GLIBCXX_ALWAYS_INLINE _Tp
858  exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
859  {
860  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
861  _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
862  __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
863  return *__dest;
864  }
865 
866  template<typename _Tp>
867  _GLIBCXX_ALWAYS_INLINE bool
868  compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
869  _Val<_Tp> __desired, memory_order __success,
870  memory_order __failure) noexcept
871  {
872  return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
873  std::__addressof(__desired), true,
874  int(__success), int(__failure));
875  }
876 
877  template<typename _Tp>
878  _GLIBCXX_ALWAYS_INLINE bool
879  compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
880  _Val<_Tp> __desired, memory_order __success,
881  memory_order __failure) noexcept
882  {
883  return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
884  std::__addressof(__desired), false,
885  int(__success), int(__failure));
886  }
887 
888  template<typename _Tp>
889  _GLIBCXX_ALWAYS_INLINE _Tp
890  fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
891  { return __atomic_fetch_add(__ptr, __i, int(__m)); }
892 
893  template<typename _Tp>
894  _GLIBCXX_ALWAYS_INLINE _Tp
895  fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
896  { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
897 
898  template<typename _Tp>
899  _GLIBCXX_ALWAYS_INLINE _Tp
900  fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
901  { return __atomic_fetch_and(__ptr, __i, int(__m)); }
902 
903  template<typename _Tp>
904  _GLIBCXX_ALWAYS_INLINE _Tp
905  fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
906  { return __atomic_fetch_or(__ptr, __i, int(__m)); }
907 
908  template<typename _Tp>
909  _GLIBCXX_ALWAYS_INLINE _Tp
910  fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
911  { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
912 
913  template<typename _Tp>
914  _GLIBCXX_ALWAYS_INLINE _Tp
915  __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
916  { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
917 
918  template<typename _Tp>
919  _GLIBCXX_ALWAYS_INLINE _Tp
920  __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
921  { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
922 
923  template<typename _Tp>
924  _GLIBCXX_ALWAYS_INLINE _Tp
925  __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
926  { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
927 
928  template<typename _Tp>
929  _GLIBCXX_ALWAYS_INLINE _Tp
930  __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
931  { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
932 
933  template<typename _Tp>
934  _GLIBCXX_ALWAYS_INLINE _Tp
935  __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
936  { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
937 
938  template<typename _Tp>
939  _Tp
940  __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
941  {
942  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
943  _Val<_Tp> __newval = __oldval + __i;
944  while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
945  memory_order_relaxed))
946  __newval = __oldval + __i;
947  return __oldval;
948  }
949 
950  template<typename _Tp>
951  _Tp
952  __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
953  {
954  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
955  _Val<_Tp> __newval = __oldval - __i;
956  while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
957  memory_order_relaxed))
958  __newval = __oldval - __i;
959  return __oldval;
960  }
961 
962  template<typename _Tp>
963  _Tp
964  __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
965  {
966  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
967  _Val<_Tp> __newval = __oldval + __i;
968  while (!compare_exchange_weak(__ptr, __oldval, __newval,
969  memory_order_seq_cst,
970  memory_order_relaxed))
971  __newval = __oldval + __i;
972  return __newval;
973  }
974 
975  template<typename _Tp>
976  _Tp
977  __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
978  {
979  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
980  _Val<_Tp> __newval = __oldval - __i;
981  while (!compare_exchange_weak(__ptr, __oldval, __newval,
982  memory_order_seq_cst,
983  memory_order_relaxed))
984  __newval = __oldval - __i;
985  return __newval;
986  }
987  } // namespace __atomic_impl
988 
989  // base class for atomic<floating-point-type>
990  template<typename _Fp>
991  struct __atomic_float
992  {
993  static_assert(is_floating_point_v<_Fp>);
994 
995  static constexpr size_t _S_alignment = __alignof__(_Fp);
996 
997  public:
998  using value_type = _Fp;
999  using difference_type = value_type;
1000 
1001  static constexpr bool is_always_lock_free
1002  = __atomic_always_lock_free(sizeof(_Fp), 0);
1003 
1004  __atomic_float() = default;
1005 
1006  constexpr
1007  __atomic_float(_Fp __t) : _M_fp(__t)
1008  { }
1009 
1010  __atomic_float(const __atomic_float&) = delete;
1011  __atomic_float& operator=(const __atomic_float&) = delete;
1012  __atomic_float& operator=(const __atomic_float&) volatile = delete;
1013 
1014  _Fp
1015  operator=(_Fp __t) volatile noexcept
1016  {
1017  this->store(__t);
1018  return __t;
1019  }
1020 
1021  _Fp
1022  operator=(_Fp __t) noexcept
1023  {
1024  this->store(__t);
1025  return __t;
1026  }
1027 
1028  bool
1029  is_lock_free() const volatile noexcept
1030  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1031 
1032  bool
1033  is_lock_free() const noexcept
1034  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1035 
1036  void
1037  store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1038  { __atomic_impl::store(&_M_fp, __t, __m); }
1039 
1040  void
1041  store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1042  { __atomic_impl::store(&_M_fp, __t, __m); }
1043 
1044  _Fp
1045  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1046  { return __atomic_impl::load(&_M_fp, __m); }
1047 
1048  _Fp
1049  load(memory_order __m = memory_order_seq_cst) const noexcept
1050  { return __atomic_impl::load(&_M_fp, __m); }
1051 
1052  operator _Fp() const volatile noexcept { return this->load(); }
1053  operator _Fp() const noexcept { return this->load(); }
1054 
1055  _Fp
1056  exchange(_Fp __desired,
1057  memory_order __m = memory_order_seq_cst) volatile noexcept
1058  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1059 
1060  _Fp
1061  exchange(_Fp __desired,
1062  memory_order __m = memory_order_seq_cst) noexcept
1063  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1064 
1065  bool
1066  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1067  memory_order __success,
1068  memory_order __failure) noexcept
1069  {
1070  return __atomic_impl::compare_exchange_weak(&_M_fp,
1071  __expected, __desired,
1072  __success, __failure);
1073  }
1074 
1075  bool
1076  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1077  memory_order __success,
1078  memory_order __failure) volatile noexcept
1079  {
1080  return __atomic_impl::compare_exchange_weak(&_M_fp,
1081  __expected, __desired,
1082  __success, __failure);
1083  }
1084 
1085  bool
1086  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1087  memory_order __success,
1088  memory_order __failure) noexcept
1089  {
1090  return __atomic_impl::compare_exchange_strong(&_M_fp,
1091  __expected, __desired,
1092  __success, __failure);
1093  }
1094 
1095  bool
1096  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1097  memory_order __success,
1098  memory_order __failure) volatile noexcept
1099  {
1100  return __atomic_impl::compare_exchange_strong(&_M_fp,
1101  __expected, __desired,
1102  __success, __failure);
1103  }
1104 
1105  bool
1106  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1107  memory_order __order = memory_order_seq_cst)
1108  noexcept
1109  {
1110  return compare_exchange_weak(__expected, __desired, __order,
1111  __cmpexch_failure_order(__order));
1112  }
1113 
1114  bool
1115  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1116  memory_order __order = memory_order_seq_cst)
1117  volatile noexcept
1118  {
1119  return compare_exchange_weak(__expected, __desired, __order,
1120  __cmpexch_failure_order(__order));
1121  }
1122 
1123  bool
1124  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1125  memory_order __order = memory_order_seq_cst)
1126  noexcept
1127  {
1128  return compare_exchange_strong(__expected, __desired, __order,
1129  __cmpexch_failure_order(__order));
1130  }
1131 
1132  bool
1133  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1134  memory_order __order = memory_order_seq_cst)
1135  volatile noexcept
1136  {
1137  return compare_exchange_strong(__expected, __desired, __order,
1138  __cmpexch_failure_order(__order));
1139  }
1140 
1141  value_type
1142  fetch_add(value_type __i,
1143  memory_order __m = memory_order_seq_cst) noexcept
1144  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1145 
1146  value_type
1147  fetch_add(value_type __i,
1148  memory_order __m = memory_order_seq_cst) volatile noexcept
1149  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1150 
1151  value_type
1152  fetch_sub(value_type __i,
1153  memory_order __m = memory_order_seq_cst) noexcept
1154  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1155 
1156  value_type
1157  fetch_sub(value_type __i,
1158  memory_order __m = memory_order_seq_cst) volatile noexcept
1159  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1160 
1161  value_type
1162  operator+=(value_type __i) noexcept
1163  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1164 
1165  value_type
1166  operator+=(value_type __i) volatile noexcept
1167  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1168 
1169  value_type
1170  operator-=(value_type __i) noexcept
1171  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1172 
1173  value_type
1174  operator-=(value_type __i) volatile noexcept
1175  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1176 
1177  private:
1178  alignas(_S_alignment) _Fp _M_fp;
1179  };
1180 
1181  template<typename _Tp,
1182  bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
1183  struct __atomic_ref;
1184 
1185  // base class for non-integral, non-floating-point, non-pointer types
1186  template<typename _Tp>
1187  struct __atomic_ref<_Tp, false, false>
1188  {
1189  static_assert(is_trivially_copyable_v<_Tp>);
1190 
1191  // 1/2/4/8/16-byte types must be aligned to at least their size.
1192  static constexpr int _S_min_alignment
1193  = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1194  ? 0 : sizeof(_Tp);
1195 
1196  public:
1197  using value_type = _Tp;
1198 
1199  static constexpr bool is_always_lock_free
1200  = __atomic_always_lock_free(sizeof(_Tp), 0);
1201 
1202  static constexpr size_t required_alignment
1203  = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1204 
1205  __atomic_ref& operator=(const __atomic_ref&) = delete;
1206 
1207  explicit
1208  __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1209  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1210 
1211  __atomic_ref(const __atomic_ref&) noexcept = default;
1212 
1213  _Tp
1214  operator=(_Tp __t) const noexcept
1215  {
1216  this->store(__t);
1217  return __t;
1218  }
1219 
1220  operator _Tp() const noexcept { return this->load(); }
1221 
1222  bool
1223  is_lock_free() const noexcept
1224  { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1225 
1226  void
1227  store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1228  { __atomic_impl::store(_M_ptr, __t, __m); }
1229 
1230  _Tp
1231  load(memory_order __m = memory_order_seq_cst) const noexcept
1232  { return __atomic_impl::load(_M_ptr, __m); }
1233 
1234  _Tp
1235  exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1236  const noexcept
1237  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1238 
1239  bool
1240  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1241  memory_order __success,
1242  memory_order __failure) const noexcept
1243  {
1244  return __atomic_impl::compare_exchange_weak(_M_ptr,
1245  __expected, __desired,
1246  __success, __failure);
1247  }
1248 
1249  bool
1250  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1251  memory_order __success,
1252  memory_order __failure) const noexcept
1253  {
1254  return __atomic_impl::compare_exchange_strong(_M_ptr,
1255  __expected, __desired,
1256  __success, __failure);
1257  }
1258 
1259  bool
1260  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1261  memory_order __order = memory_order_seq_cst)
1262  const noexcept
1263  {
1264  return compare_exchange_weak(__expected, __desired, __order,
1265  __cmpexch_failure_order(__order));
1266  }
1267 
1268  bool
1269  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1270  memory_order __order = memory_order_seq_cst)
1271  const noexcept
1272  {
1273  return compare_exchange_strong(__expected, __desired, __order,
1274  __cmpexch_failure_order(__order));
1275  }
1276 
1277  private:
1278  _Tp* _M_ptr;
1279  };
1280 
1281  // base class for atomic_ref<integral-type>
1282  template<typename _Tp>
1283  struct __atomic_ref<_Tp, true, false>
1284  {
1285  static_assert(is_integral_v<_Tp>);
1286 
1287  public:
1288  using value_type = _Tp;
1289  using difference_type = value_type;
1290 
1291  static constexpr bool is_always_lock_free
1292  = __atomic_always_lock_free(sizeof(_Tp), 0);
1293 
1294  static constexpr size_t required_alignment
1295  = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1296 
1297  __atomic_ref() = delete;
1298  __atomic_ref& operator=(const __atomic_ref&) = delete;
1299 
1300  explicit
1301  __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1302  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1303 
1304  __atomic_ref(const __atomic_ref&) noexcept = default;
1305 
1306  _Tp
1307  operator=(_Tp __t) const noexcept
1308  {
1309  this->store(__t);
1310  return __t;
1311  }
1312 
1313  operator _Tp() const noexcept { return this->load(); }
1314 
1315  bool
1316  is_lock_free() const noexcept
1317  {
1318  return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1319  }
1320 
1321  void
1322  store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1323  { __atomic_impl::store(_M_ptr, __t, __m); }
1324 
1325  _Tp
1326  load(memory_order __m = memory_order_seq_cst) const noexcept
1327  { return __atomic_impl::load(_M_ptr, __m); }
1328 
1329  _Tp
1330  exchange(_Tp __desired,
1331  memory_order __m = memory_order_seq_cst) const noexcept
1332  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1333 
1334  bool
1335  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1336  memory_order __success,
1337  memory_order __failure) const noexcept
1338  {
1339  return __atomic_impl::compare_exchange_weak(_M_ptr,
1340  __expected, __desired,
1341  __success, __failure);
1342  }
1343 
1344  bool
1345  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1346  memory_order __success,
1347  memory_order __failure) const noexcept
1348  {
1349  return __atomic_impl::compare_exchange_strong(_M_ptr,
1350  __expected, __desired,
1351  __success, __failure);
1352  }
1353 
1354  bool
1355  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1356  memory_order __order = memory_order_seq_cst)
1357  const noexcept
1358  {
1359  return compare_exchange_weak(__expected, __desired, __order,
1360  __cmpexch_failure_order(__order));
1361  }
1362 
1363  bool
1364  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1365  memory_order __order = memory_order_seq_cst)
1366  const noexcept
1367  {
1368  return compare_exchange_strong(__expected, __desired, __order,
1369  __cmpexch_failure_order(__order));
1370  }
1371 
1372  value_type
1373  fetch_add(value_type __i,
1374  memory_order __m = memory_order_seq_cst) const noexcept
1375  { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1376 
1377  value_type
1378  fetch_sub(value_type __i,
1379  memory_order __m = memory_order_seq_cst) const noexcept
1380  { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1381 
1382  value_type
1383  fetch_and(value_type __i,
1384  memory_order __m = memory_order_seq_cst) const noexcept
1385  { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1386 
1387  value_type
1388  fetch_or(value_type __i,
1389  memory_order __m = memory_order_seq_cst) const noexcept
1390  { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1391 
1392  value_type
1393  fetch_xor(value_type __i,
1394  memory_order __m = memory_order_seq_cst) const noexcept
1395  { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1396 
1397  _GLIBCXX_ALWAYS_INLINE value_type
1398  operator++(int) const noexcept
1399  { return fetch_add(1); }
1400 
1401  _GLIBCXX_ALWAYS_INLINE value_type
1402  operator--(int) const noexcept
1403  { return fetch_sub(1); }
1404 
1405  value_type
1406  operator++() const noexcept
1407  { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1408 
1409  value_type
1410  operator--() const noexcept
1411  { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1412 
1413  value_type
1414  operator+=(value_type __i) const noexcept
1415  { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1416 
1417  value_type
1418  operator-=(value_type __i) const noexcept
1419  { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1420 
1421  value_type
1422  operator&=(value_type __i) const noexcept
1423  { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1424 
1425  value_type
1426  operator|=(value_type __i) const noexcept
1427  { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1428 
1429  value_type
1430  operator^=(value_type __i) const noexcept
1431  { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1432 
1433  private:
1434  _Tp* _M_ptr;
1435  };
1436 
1437  // base class for atomic_ref<floating-point-type>
1438  template<typename _Fp>
1439  struct __atomic_ref<_Fp, false, true>
1440  {
1441  static_assert(is_floating_point_v<_Fp>);
1442 
1443  public:
1444  using value_type = _Fp;
1445  using difference_type = value_type;
1446 
1447  static constexpr bool is_always_lock_free
1448  = __atomic_always_lock_free(sizeof(_Fp), 0);
1449 
1450  static constexpr size_t required_alignment = __alignof__(_Fp);
1451 
1452  __atomic_ref() = delete;
1453  __atomic_ref& operator=(const __atomic_ref&) = delete;
1454 
1455  explicit
1456  __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1457  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1458 
1459  __atomic_ref(const __atomic_ref&) noexcept = default;
1460 
1461  _Fp
1462  operator=(_Fp __t) const noexcept
1463  {
1464  this->store(__t);
1465  return __t;
1466  }
1467 
1468  operator _Fp() const noexcept { return this->load(); }
1469 
1470  bool
1471  is_lock_free() const noexcept
1472  {
1473  return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1474  }
1475 
1476  void
1477  store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1478  { __atomic_impl::store(_M_ptr, __t, __m); }
1479 
1480  _Fp
1481  load(memory_order __m = memory_order_seq_cst) const noexcept
1482  { return __atomic_impl::load(_M_ptr, __m); }
1483 
1484  _Fp
1485  exchange(_Fp __desired,
1486  memory_order __m = memory_order_seq_cst) const noexcept
1487  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1488 
1489  bool
1490  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1491  memory_order __success,
1492  memory_order __failure) const noexcept
1493  {
1494  return __atomic_impl::compare_exchange_weak(_M_ptr,
1495  __expected, __desired,
1496  __success, __failure);
1497  }
1498 
1499  bool
1500  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1501  memory_order __success,
1502  memory_order __failure) const noexcept
1503  {
1504  return __atomic_impl::compare_exchange_strong(_M_ptr,
1505  __expected, __desired,
1506  __success, __failure);
1507  }
1508 
1509  bool
1510  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1511  memory_order __order = memory_order_seq_cst)
1512  const noexcept
1513  {
1514  return compare_exchange_weak(__expected, __desired, __order,
1515  __cmpexch_failure_order(__order));
1516  }
1517 
1518  bool
1519  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1520  memory_order __order = memory_order_seq_cst)
1521  const noexcept
1522  {
1523  return compare_exchange_strong(__expected, __desired, __order,
1524  __cmpexch_failure_order(__order));
1525  }
1526 
1527  value_type
1528  fetch_add(value_type __i,
1529  memory_order __m = memory_order_seq_cst) const noexcept
1530  { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1531 
1532  value_type
1533  fetch_sub(value_type __i,
1534  memory_order __m = memory_order_seq_cst) const noexcept
1535  { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1536 
1537  value_type
1538  operator+=(value_type __i) const noexcept
1539  { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1540 
1541  value_type
1542  operator-=(value_type __i) const noexcept
1543  { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1544 
1545  private:
1546  _Fp* _M_ptr;
1547  };
1548 
1549  // base class for atomic_ref<pointer-type>
1550  template<typename _Tp>
1551  struct __atomic_ref<_Tp*, false, false>
1552  {
1553  public:
1554  using value_type = _Tp*;
1555  using difference_type = ptrdiff_t;
1556 
1557  static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1558 
1559  static constexpr size_t required_alignment = __alignof__(_Tp*);
1560 
1561  __atomic_ref() = delete;
1562  __atomic_ref& operator=(const __atomic_ref&) = delete;
1563 
1564  explicit
1565  __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1566  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1567 
1568  __atomic_ref(const __atomic_ref&) noexcept = default;
1569 
1570  _Tp*
1571  operator=(_Tp* __t) const noexcept
1572  {
1573  this->store(__t);
1574  return __t;
1575  }
1576 
1577  operator _Tp*() const noexcept { return this->load(); }
1578 
1579  bool
1580  is_lock_free() const noexcept
1581  {
1582  return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1583  }
1584 
1585  void
1586  store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1587  { __atomic_impl::store(_M_ptr, __t, __m); }
1588 
1589  _Tp*
1590  load(memory_order __m = memory_order_seq_cst) const noexcept
1591  { return __atomic_impl::load(_M_ptr, __m); }
1592 
1593  _Tp*
1594  exchange(_Tp* __desired,
1595  memory_order __m = memory_order_seq_cst) const noexcept
1596  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1597 
1598  bool
1599  compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1600  memory_order __success,
1601  memory_order __failure) const noexcept
1602  {
1603  return __atomic_impl::compare_exchange_weak(_M_ptr,
1604  __expected, __desired,
1605  __success, __failure);
1606  }
1607 
1608  bool
1609  compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1610  memory_order __success,
1611  memory_order __failure) const noexcept
1612  {
1613  return __atomic_impl::compare_exchange_strong(_M_ptr,
1614  __expected, __desired,
1615  __success, __failure);
1616  }
1617 
1618  bool
1619  compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1620  memory_order __order = memory_order_seq_cst)
1621  const noexcept
1622  {
1623  return compare_exchange_weak(__expected, __desired, __order,
1624  __cmpexch_failure_order(__order));
1625  }
1626 
1627  bool
1628  compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1629  memory_order __order = memory_order_seq_cst)
1630  const noexcept
1631  {
1632  return compare_exchange_strong(__expected, __desired, __order,
1633  __cmpexch_failure_order(__order));
1634  }
1635 
1636  _GLIBCXX_ALWAYS_INLINE value_type
1637  fetch_add(difference_type __d,
1638  memory_order __m = memory_order_seq_cst) const noexcept
1639  { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1640 
1641  _GLIBCXX_ALWAYS_INLINE value_type
1642  fetch_sub(difference_type __d,
1643  memory_order __m = memory_order_seq_cst) const noexcept
1644  { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1645 
1646  value_type
1647  operator++(int) const noexcept
1648  { return fetch_add(1); }
1649 
1650  value_type
1651  operator--(int) const noexcept
1652  { return fetch_sub(1); }
1653 
1654  value_type
1655  operator++() const noexcept
1656  {
1657  return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1658  }
1659 
1660  value_type
1661  operator--() const noexcept
1662  {
1663  return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1664  }
1665 
1666  value_type
1667  operator+=(difference_type __d) const noexcept
1668  {
1669  return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1670  }
1671 
1672  value_type
1673  operator-=(difference_type __d) const noexcept
1674  {
1675  return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1676  }
1677 
1678  private:
1679  static constexpr ptrdiff_t
1680  _S_type_size(ptrdiff_t __d) noexcept
1681  {
1682  static_assert(is_object_v<_Tp>);
1683  return __d * sizeof(_Tp);
1684  }
1685 
1686  _Tp** _M_ptr;
1687  };
1688 
1689 #endif // C++2a
1690 
1691  // @} group atomics
1692 
1693 _GLIBCXX_END_NAMESPACE_VERSION
1694 } // namespace std
1695 
1696 #endif
std::__addressof
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
std::atomic_flag
atomic_flag
Definition: atomic_base.h:180
std::operator&
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1433
std::__atomic_flag_base
Base type for atomic_flag.
Definition: atomic_base.h:170
std
ISO C++ entities toplevel namespace is std.
std::kill_dependency
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
Definition: atomic_base.h:131
std::remove_volatile_t
typename remove_volatile< _Tp >::type remove_volatile_t
Alias template for remove_volatile.
Definition: type_traits:1567
c++config.h
std::exchange
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
Definition: utility:291
std::memory_order
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:74
std::atomic
Generic atomic type, primary class template.
Definition: atomic:57
move.h
std::__atomic_base
Base class for atomic integrals.
Definition: atomic_base.h:140
std::conditional_t
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
Definition: type_traits:2555
atomic_lockfree_defines.h