1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2024 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#pragma GCC system_header
34
35#include <bits/c++config.h>
36#include <new> // For placement new
37#include <stdint.h>
38#include <bits/atomic_lockfree_defines.h>
39#include <bits/move.h>
40
41#if __cplusplus > 201703L && _GLIBCXX_HOSTED
42#include <bits/atomic_wait.h>
43#endif
44
45#ifndef _GLIBCXX_ALWAYS_INLINE
46#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
47#endif
48
49#include <bits/version.h>
50
51namespace std _GLIBCXX_VISIBILITY(default)
52{
53_GLIBCXX_BEGIN_NAMESPACE_VERSION
54
55 /**
56 * @defgroup atomics Atomics
57 *
58 * Components for performing atomic operations.
59 * @{
60 */
61
62 /// Enumeration for memory_order
63#if __cplusplus > 201703L
64 enum class memory_order : int
65 {
66 relaxed,
67 consume,
68 acquire,
69 release,
70 acq_rel,
71 seq_cst
72 };
73
74 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
75 inline constexpr memory_order memory_order_consume = memory_order::consume;
76 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
77 inline constexpr memory_order memory_order_release = memory_order::release;
78 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
79 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
80#else
81 typedef enum memory_order
82 {
83 memory_order_relaxed,
84 memory_order_consume,
85 memory_order_acquire,
86 memory_order_release,
87 memory_order_acq_rel,
88 memory_order_seq_cst
89 } memory_order;
90#endif
91
92 /// @cond undocumented
93 enum __memory_order_modifier
94 {
95 __memory_order_mask = 0x0ffff,
96 __memory_order_modifier_mask = 0xffff0000,
97 __memory_order_hle_acquire = 0x10000,
98 __memory_order_hle_release = 0x20000
99 };
100 /// @endcond
101
102 constexpr memory_order
103 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
104 {
105 return memory_order(int(__m) | int(__mod));
106 }
107
108 constexpr memory_order
109 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
110 {
111 return memory_order(int(__m) & int(__mod));
112 }
113
114 /// @cond undocumented
115
116 // Drop release ordering as per [atomics.types.operations.req]/21
117 constexpr memory_order
118 __cmpexch_failure_order2(memory_order __m) noexcept
119 {
120 return __m == memory_order_acq_rel ? memory_order_acquire
121 : __m == memory_order_release ? memory_order_relaxed : __m;
122 }
123
124 constexpr memory_order
125 __cmpexch_failure_order(memory_order __m) noexcept
126 {
127 return memory_order(__cmpexch_failure_order2(m: __m & __memory_order_mask)
128 | __memory_order_modifier(__m & __memory_order_modifier_mask));
129 }
130
131 constexpr bool
132 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
133 {
134 return (__m & __memory_order_mask) != memory_order_release
135 && (__m & __memory_order_mask) != memory_order_acq_rel;
136 }
137
138 // Base types for atomics.
139 template<typename _IntTp>
140 struct __atomic_base;
141
142 /// @endcond
143
144 _GLIBCXX_ALWAYS_INLINE void
145 atomic_thread_fence(memory_order __m) noexcept
146 { __atomic_thread_fence(int(__m)); }
147
148 _GLIBCXX_ALWAYS_INLINE void
149 atomic_signal_fence(memory_order __m) noexcept
150 { __atomic_signal_fence(int(__m)); }
151
152 /// kill_dependency
153 template<typename _Tp>
154 inline _Tp
155 kill_dependency(_Tp __y) noexcept
156 {
157 _Tp __ret(__y);
158 return __ret;
159 }
160
161/// @cond undocumented
162#if __glibcxx_atomic_value_initialization
163# define _GLIBCXX20_INIT(I) = I
164#else
165# define _GLIBCXX20_INIT(I)
166#endif
167/// @endcond
168
169#define ATOMIC_VAR_INIT(_VI) { _VI }
170
171 template<typename _Tp>
172 struct atomic;
173
174 template<typename _Tp>
175 struct atomic<_Tp*>;
176
177 /* The target's "set" value for test-and-set may not be exactly 1. */
178#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
179 typedef bool __atomic_flag_data_type;
180#else
181 typedef unsigned char __atomic_flag_data_type;
182#endif
183
184 /// @cond undocumented
185
186 /*
187 * Base type for atomic_flag.
188 *
189 * Base type is POD with data, allowing atomic_flag to derive from
190 * it and meet the standard layout type requirement. In addition to
191 * compatibility with a C interface, this allows different
192 * implementations of atomic_flag to use the same atomic operation
193 * functions, via a standard conversion to the __atomic_flag_base
194 * argument.
195 */
196 _GLIBCXX_BEGIN_EXTERN_C
197
198 struct __atomic_flag_base
199 {
200 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
201 };
202
203 _GLIBCXX_END_EXTERN_C
204
205 /// @endcond
206
207#define ATOMIC_FLAG_INIT { 0 }
208
209 /// atomic_flag
210 struct atomic_flag : public __atomic_flag_base
211 {
212 atomic_flag() noexcept = default;
213 ~atomic_flag() noexcept = default;
214 atomic_flag(const atomic_flag&) = delete;
215 atomic_flag& operator=(const atomic_flag&) = delete;
216 atomic_flag& operator=(const atomic_flag&) volatile = delete;
217
218 // Conversion to ATOMIC_FLAG_INIT.
219 constexpr atomic_flag(bool __i) noexcept
220 : __atomic_flag_base{ ._M_i: _S_init(__i) }
221 { }
222
223 _GLIBCXX_ALWAYS_INLINE bool
224 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
225 {
226 return __atomic_test_and_set (&_M_i, int(__m));
227 }
228
229 _GLIBCXX_ALWAYS_INLINE bool
230 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
231 {
232 return __atomic_test_and_set (&_M_i, int(__m));
233 }
234
235#ifdef __glibcxx_atomic_flag_test // C++ >= 20
236 _GLIBCXX_ALWAYS_INLINE bool
237 test(memory_order __m = memory_order_seq_cst) const noexcept
238 {
239 __atomic_flag_data_type __v;
240 __atomic_load(&_M_i, &__v, int(__m));
241 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
242 }
243
244 _GLIBCXX_ALWAYS_INLINE bool
245 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
246 {
247 __atomic_flag_data_type __v;
248 __atomic_load(&_M_i, &__v, int(__m));
249 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
250 }
251#endif
252
253#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
254 _GLIBCXX_ALWAYS_INLINE void
255 wait(bool __old,
256 memory_order __m = memory_order_seq_cst) const noexcept
257 {
258 const __atomic_flag_data_type __v
259 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
260
261 std::__atomic_wait_address_v(&_M_i, __v,
262 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
263 }
264
265 // TODO add const volatile overload
266
267 _GLIBCXX_ALWAYS_INLINE void
268 notify_one() noexcept
269 { std::__atomic_notify_address(&_M_i, false); }
270
271 // TODO add const volatile overload
272
273 _GLIBCXX_ALWAYS_INLINE void
274 notify_all() noexcept
275 { std::__atomic_notify_address(&_M_i, true); }
276
277 // TODO add const volatile overload
278#endif // __glibcxx_atomic_wait
279
280 _GLIBCXX_ALWAYS_INLINE void
281 clear(memory_order __m = memory_order_seq_cst) noexcept
282 {
283 memory_order __b __attribute__ ((__unused__))
284 = __m & __memory_order_mask;
285 __glibcxx_assert(__b != memory_order_consume);
286 __glibcxx_assert(__b != memory_order_acquire);
287 __glibcxx_assert(__b != memory_order_acq_rel);
288
289 __atomic_clear (&_M_i, int(__m));
290 }
291
292 _GLIBCXX_ALWAYS_INLINE void
293 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
294 {
295 memory_order __b __attribute__ ((__unused__))
296 = __m & __memory_order_mask;
297 __glibcxx_assert(__b != memory_order_consume);
298 __glibcxx_assert(__b != memory_order_acquire);
299 __glibcxx_assert(__b != memory_order_acq_rel);
300
301 __atomic_clear (&_M_i, int(__m));
302 }
303
304 private:
305 static constexpr __atomic_flag_data_type
306 _S_init(bool __i)
307 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
308 };
309
310 /// @cond undocumented
311
312 /// Base class for atomic integrals.
313 //
314 // For each of the integral types, define atomic_[integral type] struct
315 //
316 // atomic_bool bool
317 // atomic_char char
318 // atomic_schar signed char
319 // atomic_uchar unsigned char
320 // atomic_short short
321 // atomic_ushort unsigned short
322 // atomic_int int
323 // atomic_uint unsigned int
324 // atomic_long long
325 // atomic_ulong unsigned long
326 // atomic_llong long long
327 // atomic_ullong unsigned long long
328 // atomic_char8_t char8_t
329 // atomic_char16_t char16_t
330 // atomic_char32_t char32_t
331 // atomic_wchar_t wchar_t
332 //
333 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
334 // 8 bytes, since that is what GCC built-in functions for atomic
335 // memory access expect.
336 template<typename _ITp>
337 struct __atomic_base
338 {
339 using value_type = _ITp;
340 using difference_type = value_type;
341
342 private:
343 typedef _ITp __int_type;
344
345 static constexpr int _S_alignment =
346 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
347
348 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
349
350 public:
351 __atomic_base() noexcept = default;
352 ~__atomic_base() noexcept = default;
353 __atomic_base(const __atomic_base&) = delete;
354 __atomic_base& operator=(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) volatile = delete;
356
357 // Requires __int_type convertible to _M_i.
358 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
359
360 operator __int_type() const noexcept
361 { return load(); }
362
363 operator __int_type() const volatile noexcept
364 { return load(); }
365
366 __int_type
367 operator=(__int_type __i) noexcept
368 {
369 store(__i);
370 return __i;
371 }
372
373 __int_type
374 operator=(__int_type __i) volatile noexcept
375 {
376 store(__i);
377 return __i;
378 }
379
380 __int_type
381 operator++(int) noexcept
382 { return fetch_add(1); }
383
384 __int_type
385 operator++(int) volatile noexcept
386 { return fetch_add(1); }
387
388 __int_type
389 operator--(int) noexcept
390 { return fetch_sub(1); }
391
392 __int_type
393 operator--(int) volatile noexcept
394 { return fetch_sub(1); }
395
396 __int_type
397 operator++() noexcept
398 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
399
400 __int_type
401 operator++() volatile noexcept
402 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
403
404 __int_type
405 operator--() noexcept
406 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
407
408 __int_type
409 operator--() volatile noexcept
410 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
411
412 __int_type
413 operator+=(__int_type __i) noexcept
414 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
415
416 __int_type
417 operator+=(__int_type __i) volatile noexcept
418 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
419
420 __int_type
421 operator-=(__int_type __i) noexcept
422 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
423
424 __int_type
425 operator-=(__int_type __i) volatile noexcept
426 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
427
428 __int_type
429 operator&=(__int_type __i) noexcept
430 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
431
432 __int_type
433 operator&=(__int_type __i) volatile noexcept
434 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
435
436 __int_type
437 operator|=(__int_type __i) noexcept
438 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
439
440 __int_type
441 operator|=(__int_type __i) volatile noexcept
442 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
443
444 __int_type
445 operator^=(__int_type __i) noexcept
446 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
447
448 __int_type
449 operator^=(__int_type __i) volatile noexcept
450 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
451
452 bool
453 is_lock_free() const noexcept
454 {
455 // Use a fake, minimally aligned pointer.
456 return __atomic_is_lock_free(sizeof(_M_i),
457 reinterpret_cast<void *>(-_S_alignment));
458 }
459
460 bool
461 is_lock_free() const volatile noexcept
462 {
463 // Use a fake, minimally aligned pointer.
464 return __atomic_is_lock_free(sizeof(_M_i),
465 reinterpret_cast<void *>(-_S_alignment));
466 }
467
468 _GLIBCXX_ALWAYS_INLINE void
469 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
470 {
471 memory_order __b __attribute__ ((__unused__))
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
476
477 __atomic_store_n(&_M_i, __i, int(__m));
478 }
479
480 _GLIBCXX_ALWAYS_INLINE void
481 store(__int_type __i,
482 memory_order __m = memory_order_seq_cst) volatile noexcept
483 {
484 memory_order __b __attribute__ ((__unused__))
485 = __m & __memory_order_mask;
486 __glibcxx_assert(__b != memory_order_acquire);
487 __glibcxx_assert(__b != memory_order_acq_rel);
488 __glibcxx_assert(__b != memory_order_consume);
489
490 __atomic_store_n(&_M_i, __i, int(__m));
491 }
492
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 load(memory_order __m = memory_order_seq_cst) const noexcept
495 {
496 memory_order __b __attribute__ ((__unused__))
497 = __m & __memory_order_mask;
498 __glibcxx_assert(__b != memory_order_release);
499 __glibcxx_assert(__b != memory_order_acq_rel);
500
501 return __atomic_load_n(&_M_i, int(__m));
502 }
503
504 _GLIBCXX_ALWAYS_INLINE __int_type
505 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
506 {
507 memory_order __b __attribute__ ((__unused__))
508 = __m & __memory_order_mask;
509 __glibcxx_assert(__b != memory_order_release);
510 __glibcxx_assert(__b != memory_order_acq_rel);
511
512 return __atomic_load_n(&_M_i, int(__m));
513 }
514
515 _GLIBCXX_ALWAYS_INLINE __int_type
516 exchange(__int_type __i,
517 memory_order __m = memory_order_seq_cst) noexcept
518 {
519 return __atomic_exchange_n(&_M_i, __i, int(__m));
520 }
521
522
523 _GLIBCXX_ALWAYS_INLINE __int_type
524 exchange(__int_type __i,
525 memory_order __m = memory_order_seq_cst) volatile noexcept
526 {
527 return __atomic_exchange_n(&_M_i, __i, int(__m));
528 }
529
530 _GLIBCXX_ALWAYS_INLINE bool
531 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 memory_order __m1, memory_order __m2) noexcept
533 {
534 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
535
536 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
537 int(__m1), int(__m2));
538 }
539
540 _GLIBCXX_ALWAYS_INLINE bool
541 compare_exchange_weak(__int_type& __i1, __int_type __i2,
542 memory_order __m1,
543 memory_order __m2) volatile noexcept
544 {
545 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
546
547 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
548 int(__m1), int(__m2));
549 }
550
551 _GLIBCXX_ALWAYS_INLINE bool
552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst) noexcept
554 {
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
557 }
558
559 _GLIBCXX_ALWAYS_INLINE bool
560 compare_exchange_weak(__int_type& __i1, __int_type __i2,
561 memory_order __m = memory_order_seq_cst) volatile noexcept
562 {
563 return compare_exchange_weak(__i1, __i2, __m,
564 __cmpexch_failure_order(__m));
565 }
566
567 _GLIBCXX_ALWAYS_INLINE bool
568 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 memory_order __m1, memory_order __m2) noexcept
570 {
571 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
572
573 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
574 int(__m1), int(__m2));
575 }
576
577 _GLIBCXX_ALWAYS_INLINE bool
578 compare_exchange_strong(__int_type& __i1, __int_type __i2,
579 memory_order __m1,
580 memory_order __m2) volatile noexcept
581 {
582 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
583
584 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
585 int(__m1), int(__m2));
586 }
587
588 _GLIBCXX_ALWAYS_INLINE bool
589 compare_exchange_strong(__int_type& __i1, __int_type __i2,
590 memory_order __m = memory_order_seq_cst) noexcept
591 {
592 return compare_exchange_strong(__i1, __i2, __m,
593 __cmpexch_failure_order(__m));
594 }
595
596 _GLIBCXX_ALWAYS_INLINE bool
597 compare_exchange_strong(__int_type& __i1, __int_type __i2,
598 memory_order __m = memory_order_seq_cst) volatile noexcept
599 {
600 return compare_exchange_strong(__i1, __i2, __m,
601 __cmpexch_failure_order(__m));
602 }
603
604#if __glibcxx_atomic_wait
605 _GLIBCXX_ALWAYS_INLINE void
606 wait(__int_type __old,
607 memory_order __m = memory_order_seq_cst) const noexcept
608 {
609 std::__atomic_wait_address_v(&_M_i, __old,
610 [__m, this] { return this->load(__m); });
611 }
612
613 // TODO add const volatile overload
614
615 _GLIBCXX_ALWAYS_INLINE void
616 notify_one() noexcept
617 { std::__atomic_notify_address(&_M_i, false); }
618
619 // TODO add const volatile overload
620
621 _GLIBCXX_ALWAYS_INLINE void
622 notify_all() noexcept
623 { std::__atomic_notify_address(&_M_i, true); }
624
625 // TODO add const volatile overload
626#endif // __glibcxx_atomic_wait
627
628 _GLIBCXX_ALWAYS_INLINE __int_type
629 fetch_add(__int_type __i,
630 memory_order __m = memory_order_seq_cst) noexcept
631 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
632
633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
635 memory_order __m = memory_order_seq_cst) volatile noexcept
636 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
637
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_sub(__int_type __i,
640 memory_order __m = memory_order_seq_cst) noexcept
641 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
642
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
645 memory_order __m = memory_order_seq_cst) volatile noexcept
646 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
647
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_and(__int_type __i,
650 memory_order __m = memory_order_seq_cst) noexcept
651 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
652
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
655 memory_order __m = memory_order_seq_cst) volatile noexcept
656 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
657
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_or(__int_type __i,
660 memory_order __m = memory_order_seq_cst) noexcept
661 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
662
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
665 memory_order __m = memory_order_seq_cst) volatile noexcept
666 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
667
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_xor(__int_type __i,
670 memory_order __m = memory_order_seq_cst) noexcept
671 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
672
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
675 memory_order __m = memory_order_seq_cst) volatile noexcept
676 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
677 };
678
679
680 /// Partial specialization for pointer types.
681 template<typename _PTp>
682 struct __atomic_base<_PTp*>
683 {
684 private:
685 typedef _PTp* __pointer_type;
686
687 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
688
689 // Factored out to facilitate explicit specialization.
690 constexpr ptrdiff_t
691 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
692
693 constexpr ptrdiff_t
694 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
695
696 public:
697 __atomic_base() noexcept = default;
698 ~__atomic_base() noexcept = default;
699 __atomic_base(const __atomic_base&) = delete;
700 __atomic_base& operator=(const __atomic_base&) = delete;
701 __atomic_base& operator=(const __atomic_base&) volatile = delete;
702
703 // Requires __pointer_type convertible to _M_p.
704 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
705
706 operator __pointer_type() const noexcept
707 { return load(); }
708
709 operator __pointer_type() const volatile noexcept
710 { return load(); }
711
712 __pointer_type
713 operator=(__pointer_type __p) noexcept
714 {
715 store(__p);
716 return __p;
717 }
718
719 __pointer_type
720 operator=(__pointer_type __p) volatile noexcept
721 {
722 store(__p);
723 return __p;
724 }
725
726 __pointer_type
727 operator++(int) noexcept
728 { return fetch_add(1); }
729
730 __pointer_type
731 operator++(int) volatile noexcept
732 { return fetch_add(1); }
733
734 __pointer_type
735 operator--(int) noexcept
736 { return fetch_sub(1); }
737
738 __pointer_type
739 operator--(int) volatile noexcept
740 { return fetch_sub(1); }
741
742 __pointer_type
743 operator++() noexcept
744 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
745 int(memory_order_seq_cst)); }
746
747 __pointer_type
748 operator++() volatile noexcept
749 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
750 int(memory_order_seq_cst)); }
751
752 __pointer_type
753 operator--() noexcept
754 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
755 int(memory_order_seq_cst)); }
756
757 __pointer_type
758 operator--() volatile noexcept
759 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
760 int(memory_order_seq_cst)); }
761
762 __pointer_type
763 operator+=(ptrdiff_t __d) noexcept
764 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
765 int(memory_order_seq_cst)); }
766
767 __pointer_type
768 operator+=(ptrdiff_t __d) volatile noexcept
769 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
770 int(memory_order_seq_cst)); }
771
772 __pointer_type
773 operator-=(ptrdiff_t __d) noexcept
774 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
775 int(memory_order_seq_cst)); }
776
777 __pointer_type
778 operator-=(ptrdiff_t __d) volatile noexcept
779 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
780 int(memory_order_seq_cst)); }
781
782 bool
783 is_lock_free() const noexcept
784 {
785 // Produce a fake, minimally aligned pointer.
786 return __atomic_is_lock_free(sizeof(_M_p),
787 reinterpret_cast<void *>(-__alignof(_M_p)));
788 }
789
790 bool
791 is_lock_free() const volatile noexcept
792 {
793 // Produce a fake, minimally aligned pointer.
794 return __atomic_is_lock_free(sizeof(_M_p),
795 reinterpret_cast<void *>(-__alignof(_M_p)));
796 }
797
798 _GLIBCXX_ALWAYS_INLINE void
799 store(__pointer_type __p,
800 memory_order __m = memory_order_seq_cst) noexcept
801 {
802 memory_order __b __attribute__ ((__unused__))
803 = __m & __memory_order_mask;
804
805 __glibcxx_assert(__b != memory_order_acquire);
806 __glibcxx_assert(__b != memory_order_acq_rel);
807 __glibcxx_assert(__b != memory_order_consume);
808
809 __atomic_store_n(&_M_p, __p, int(__m));
810 }
811
812 _GLIBCXX_ALWAYS_INLINE void
813 store(__pointer_type __p,
814 memory_order __m = memory_order_seq_cst) volatile noexcept
815 {
816 memory_order __b __attribute__ ((__unused__))
817 = __m & __memory_order_mask;
818 __glibcxx_assert(__b != memory_order_acquire);
819 __glibcxx_assert(__b != memory_order_acq_rel);
820 __glibcxx_assert(__b != memory_order_consume);
821
822 __atomic_store_n(&_M_p, __p, int(__m));
823 }
824
825 _GLIBCXX_ALWAYS_INLINE __pointer_type
826 load(memory_order __m = memory_order_seq_cst) const noexcept
827 {
828 memory_order __b __attribute__ ((__unused__))
829 = __m & __memory_order_mask;
830 __glibcxx_assert(__b != memory_order_release);
831 __glibcxx_assert(__b != memory_order_acq_rel);
832
833 return __atomic_load_n(&_M_p, int(__m));
834 }
835
836 _GLIBCXX_ALWAYS_INLINE __pointer_type
837 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
838 {
839 memory_order __b __attribute__ ((__unused__))
840 = __m & __memory_order_mask;
841 __glibcxx_assert(__b != memory_order_release);
842 __glibcxx_assert(__b != memory_order_acq_rel);
843
844 return __atomic_load_n(&_M_p, int(__m));
845 }
846
847 _GLIBCXX_ALWAYS_INLINE __pointer_type
848 exchange(__pointer_type __p,
849 memory_order __m = memory_order_seq_cst) noexcept
850 {
851 return __atomic_exchange_n(&_M_p, __p, int(__m));
852 }
853
854
855 _GLIBCXX_ALWAYS_INLINE __pointer_type
856 exchange(__pointer_type __p,
857 memory_order __m = memory_order_seq_cst) volatile noexcept
858 {
859 return __atomic_exchange_n(&_M_p, __p, int(__m));
860 }
861
862 _GLIBCXX_ALWAYS_INLINE bool
863 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
864 memory_order __m1,
865 memory_order __m2) noexcept
866 {
867 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
868
869 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
870 int(__m1), int(__m2));
871 }
872
873 _GLIBCXX_ALWAYS_INLINE bool
874 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
875 memory_order __m1,
876 memory_order __m2) volatile noexcept
877 {
878 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
879
880 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
881 int(__m1), int(__m2));
882 }
883
884 _GLIBCXX_ALWAYS_INLINE bool
885 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
886 memory_order __m1,
887 memory_order __m2) noexcept
888 {
889 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
890
891 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
892 int(__m1), int(__m2));
893 }
894
895 _GLIBCXX_ALWAYS_INLINE bool
896 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
897 memory_order __m1,
898 memory_order __m2) volatile noexcept
899 {
900 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
901
902 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
903 int(__m1), int(__m2));
904 }
905
906#if __glibcxx_atomic_wait
907 _GLIBCXX_ALWAYS_INLINE void
908 wait(__pointer_type __old,
909 memory_order __m = memory_order_seq_cst) const noexcept
910 {
911 std::__atomic_wait_address_v(&_M_p, __old,
912 [__m, this]
913 { return this->load(__m); });
914 }
915
916 // TODO add const volatile overload
917
918 _GLIBCXX_ALWAYS_INLINE void
919 notify_one() const noexcept
920 { std::__atomic_notify_address(&_M_p, false); }
921
922 // TODO add const volatile overload
923
924 _GLIBCXX_ALWAYS_INLINE void
925 notify_all() const noexcept
926 { std::__atomic_notify_address(&_M_p, true); }
927
928 // TODO add const volatile overload
929#endif // __glibcxx_atomic_wait
930
931 _GLIBCXX_ALWAYS_INLINE __pointer_type
932 fetch_add(ptrdiff_t __d,
933 memory_order __m = memory_order_seq_cst) noexcept
934 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
935
936 _GLIBCXX_ALWAYS_INLINE __pointer_type
937 fetch_add(ptrdiff_t __d,
938 memory_order __m = memory_order_seq_cst) volatile noexcept
939 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
940
941 _GLIBCXX_ALWAYS_INLINE __pointer_type
942 fetch_sub(ptrdiff_t __d,
943 memory_order __m = memory_order_seq_cst) noexcept
944 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
945
946 _GLIBCXX_ALWAYS_INLINE __pointer_type
947 fetch_sub(ptrdiff_t __d,
948 memory_order __m = memory_order_seq_cst) volatile noexcept
949 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
950 };
951
952 namespace __atomic_impl
953 {
954 // Implementation details of atomic padding handling
955
956 template<typename _Tp>
957 constexpr bool
958 __maybe_has_padding()
959 {
960#if ! __has_builtin(__builtin_clear_padding)
961 return false;
962#elif __has_builtin(__has_unique_object_representations)
963 return !__has_unique_object_representations(_Tp)
964 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
965#else
966 return true;
967#endif
968 }
969
970 template<typename _Tp>
971 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
972 __clear_padding(_Tp& __val) noexcept
973 {
974 auto* __ptr = std::__addressof(__val);
975#if __has_builtin(__builtin_clear_padding)
976 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
977 __builtin_clear_padding(__ptr);
978#endif
979 return __ptr;
980 }
981
982 // Remove volatile and create a non-deduced context for value arguments.
983 template<typename _Tp>
984 using _Val = typename remove_volatile<_Tp>::type;
985
986#pragma GCC diagnostic push
987#pragma GCC diagnostic ignored "-Wc++17-extensions"
988
989 template<bool _AtomicRef = false, typename _Tp>
990 _GLIBCXX_ALWAYS_INLINE bool
991 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
992 bool __is_weak,
993 memory_order __s, memory_order __f) noexcept
994 {
995 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
996
997 using _Vp = _Val<_Tp>;
998 _Tp* const __pval = std::__addressof(__val);
999
1000 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1001 {
1002 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1003 std::__addressof(__i), __is_weak,
1004 int(__s), int(__f));
1005 }
1006 else if constexpr (!_AtomicRef) // std::atomic<T>
1007 {
1008 // Clear padding of the value we want to set:
1009 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1010 // Only allowed to modify __e on failure, so make a copy:
1011 _Vp __exp = __e;
1012 // Clear padding of the expected value:
1013 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1014
1015 // For std::atomic<T> we know that the contained value will already
1016 // have zeroed padding, so trivial memcmp semantics are OK.
1017 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1018 __is_weak, int(__s), int(__f)))
1019 return true;
1020 // Value bits must be different, copy from __exp back to __e:
1021 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1022 return false;
1023 }
1024 else // std::atomic_ref<T> where T has padding bits.
1025 {
1026 // Clear padding of the value we want to set:
1027 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1028
1029 // Only allowed to modify __e on failure, so make a copy:
1030 _Vp __exp = __e;
1031 // Optimistically assume that a previous store had zeroed padding
1032 // so that zeroing it in the expected value will match first time.
1033 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1034
1035 // compare_exchange is specified to compare value representations.
1036 // Need to check whether a failure is 'real' or just due to
1037 // differences in padding bits. This loop should run no more than
1038 // three times, because the worst case scenario is:
1039 // First CAS fails because the actual value has non-zero padding.
1040 // Second CAS fails because another thread stored the same value,
1041 // but now with padding cleared. Third CAS succeeds.
1042 // We will never need to loop a fourth time, because any value
1043 // written by another thread (whether via store, exchange or
1044 // compare_exchange) will have had its padding cleared.
1045 while (true)
1046 {
1047 // Copy of the expected value so we can clear its padding.
1048 _Vp __orig = __exp;
1049
1050 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1051 __is_weak, int(__s), int(__f)))
1052 return true;
1053
1054 // Copy of the actual value so we can clear its padding.
1055 _Vp __curr = __exp;
1056
1057 // Compare value representations (i.e. ignoring padding).
1058 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1059 __atomic_impl::__clear_padding(__curr),
1060 sizeof(_Vp)))
1061 {
1062 // Value representations compare unequal, real failure.
1063 __builtin_memcpy(std::__addressof(__e), __pexp,
1064 sizeof(_Vp));
1065 return false;
1066 }
1067 }
1068 }
1069 }
1070#pragma GCC diagnostic pop
1071 } // namespace __atomic_impl
1072
1073#if __cplusplus > 201703L
1074 // Implementation details of atomic_ref and atomic<floating-point>.
1075 namespace __atomic_impl
1076 {
1077 // Like _Val<T> above, but for difference_type arguments.
1078 template<typename _Tp>
1079 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1080
1081 template<size_t _Size, size_t _Align>
1082 _GLIBCXX_ALWAYS_INLINE bool
1083 is_lock_free() noexcept
1084 {
1085 // Produce a fake, minimally aligned pointer.
1086 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1087 }
1088
1089 template<typename _Tp>
1090 _GLIBCXX_ALWAYS_INLINE void
1091 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1092 {
1093 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1094 }
1095
1096 template<typename _Tp>
1097 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1098 load(const _Tp* __ptr, memory_order __m) noexcept
1099 {
1100 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1101 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1102 __atomic_load(__ptr, __dest, int(__m));
1103 return *__dest;
1104 }
1105
1106 template<typename _Tp>
1107 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1108 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1109 {
1110 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1111 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1112 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1113 __dest, int(__m));
1114 return *__dest;
1115 }
1116
1117 template<bool _AtomicRef = false, typename _Tp>
1118 _GLIBCXX_ALWAYS_INLINE bool
1119 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1120 _Val<_Tp> __desired, memory_order __success,
1121 memory_order __failure,
1122 bool __check_padding = false) noexcept
1123 {
1124 return __atomic_impl::__compare_exchange<_AtomicRef>(
1125 *__ptr, __expected, __desired, true, __success, __failure);
1126 }
1127
1128 template<bool _AtomicRef = false, typename _Tp>
1129 _GLIBCXX_ALWAYS_INLINE bool
1130 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1131 _Val<_Tp> __desired, memory_order __success,
1132 memory_order __failure,
1133 bool __ignore_padding = false) noexcept
1134 {
1135 return __atomic_impl::__compare_exchange<_AtomicRef>(
1136 *__ptr, __expected, __desired, false, __success, __failure);
1137 }
1138
1139#if __glibcxx_atomic_wait
1140 template<typename _Tp>
1141 _GLIBCXX_ALWAYS_INLINE void
1142 wait(const _Tp* __ptr, _Val<_Tp> __old,
1143 memory_order __m = memory_order_seq_cst) noexcept
1144 {
1145 std::__atomic_wait_address_v(__ptr, __old,
1146 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1147 }
1148
1149 // TODO add const volatile overload
1150
1151 template<typename _Tp>
1152 _GLIBCXX_ALWAYS_INLINE void
1153 notify_one(const _Tp* __ptr) noexcept
1154 { std::__atomic_notify_address(__ptr, false); }
1155
1156 // TODO add const volatile overload
1157
1158 template<typename _Tp>
1159 _GLIBCXX_ALWAYS_INLINE void
1160 notify_all(const _Tp* __ptr) noexcept
1161 { std::__atomic_notify_address(__ptr, true); }
1162
1163 // TODO add const volatile overload
1164#endif // __glibcxx_atomic_wait
1165
1166 template<typename _Tp>
1167 _GLIBCXX_ALWAYS_INLINE _Tp
1168 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1169 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1170
1171 template<typename _Tp>
1172 _GLIBCXX_ALWAYS_INLINE _Tp
1173 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1174 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1175
1176 template<typename _Tp>
1177 _GLIBCXX_ALWAYS_INLINE _Tp
1178 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1179 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1180
1181 template<typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE _Tp
1183 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1184 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1185
1186 template<typename _Tp>
1187 _GLIBCXX_ALWAYS_INLINE _Tp
1188 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1189 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1190
1191 template<typename _Tp>
1192 _GLIBCXX_ALWAYS_INLINE _Tp
1193 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1194 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1195
1196 template<typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1199 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1200
1201 template<typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1204 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1205
1206 template<typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1209 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1210
1211 template<typename _Tp>
1212 _GLIBCXX_ALWAYS_INLINE _Tp
1213 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1214 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1215
1216 template<typename _Tp>
1217 _Tp
1218 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1219 {
1220 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1221 _Val<_Tp> __newval = __oldval + __i;
1222 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1223 memory_order_relaxed))
1224 __newval = __oldval + __i;
1225 return __oldval;
1226 }
1227
1228 template<typename _Tp>
1229 _Tp
1230 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1231 {
1232 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1233 _Val<_Tp> __newval = __oldval - __i;
1234 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1235 memory_order_relaxed))
1236 __newval = __oldval - __i;
1237 return __oldval;
1238 }
1239
1240 template<typename _Tp>
1241 _Tp
1242 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1243 {
1244 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1245 _Val<_Tp> __newval = __oldval + __i;
1246 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1247 memory_order_seq_cst,
1248 memory_order_relaxed))
1249 __newval = __oldval + __i;
1250 return __newval;
1251 }
1252
1253 template<typename _Tp>
1254 _Tp
1255 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1256 {
1257 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1258 _Val<_Tp> __newval = __oldval - __i;
1259 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1260 memory_order_seq_cst,
1261 memory_order_relaxed))
1262 __newval = __oldval - __i;
1263 return __newval;
1264 }
1265 } // namespace __atomic_impl
1266
1267 // base class for atomic<floating-point-type>
1268 template<typename _Fp>
1269 struct __atomic_float
1270 {
1271 static_assert(is_floating_point_v<_Fp>);
1272
1273 static constexpr size_t _S_alignment = __alignof__(_Fp);
1274
1275 public:
1276 using value_type = _Fp;
1277 using difference_type = value_type;
1278
1279 static constexpr bool is_always_lock_free
1280 = __atomic_always_lock_free(sizeof(_Fp), 0);
1281
1282 __atomic_float() = default;
1283
1284 constexpr
1285 __atomic_float(_Fp __t) : _M_fp(__t)
1286 { __atomic_impl::__clear_padding(_M_fp); }
1287
1288 __atomic_float(const __atomic_float&) = delete;
1289 __atomic_float& operator=(const __atomic_float&) = delete;
1290 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1291
1292 _Fp
1293 operator=(_Fp __t) volatile noexcept
1294 {
1295 this->store(__t);
1296 return __t;
1297 }
1298
1299 _Fp
1300 operator=(_Fp __t) noexcept
1301 {
1302 this->store(__t);
1303 return __t;
1304 }
1305
1306 bool
1307 is_lock_free() const volatile noexcept
1308 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1309
1310 bool
1311 is_lock_free() const noexcept
1312 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1313
1314 void
1315 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1316 { __atomic_impl::store(&_M_fp, __t, __m); }
1317
1318 void
1319 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1320 { __atomic_impl::store(&_M_fp, __t, __m); }
1321
1322 _Fp
1323 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1324 { return __atomic_impl::load(&_M_fp, __m); }
1325
1326 _Fp
1327 load(memory_order __m = memory_order_seq_cst) const noexcept
1328 { return __atomic_impl::load(&_M_fp, __m); }
1329
1330 operator _Fp() const volatile noexcept { return this->load(); }
1331 operator _Fp() const noexcept { return this->load(); }
1332
1333 _Fp
1334 exchange(_Fp __desired,
1335 memory_order __m = memory_order_seq_cst) volatile noexcept
1336 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1337
1338 _Fp
1339 exchange(_Fp __desired,
1340 memory_order __m = memory_order_seq_cst) noexcept
1341 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1342
1343 bool
1344 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1345 memory_order __success,
1346 memory_order __failure) noexcept
1347 {
1348 return __atomic_impl::compare_exchange_weak(&_M_fp,
1349 __expected, __desired,
1350 __success, __failure);
1351 }
1352
1353 bool
1354 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1355 memory_order __success,
1356 memory_order __failure) volatile noexcept
1357 {
1358 return __atomic_impl::compare_exchange_weak(&_M_fp,
1359 __expected, __desired,
1360 __success, __failure);
1361 }
1362
1363 bool
1364 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1365 memory_order __success,
1366 memory_order __failure) noexcept
1367 {
1368 return __atomic_impl::compare_exchange_strong(&_M_fp,
1369 __expected, __desired,
1370 __success, __failure);
1371 }
1372
1373 bool
1374 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1375 memory_order __success,
1376 memory_order __failure) volatile noexcept
1377 {
1378 return __atomic_impl::compare_exchange_strong(&_M_fp,
1379 __expected, __desired,
1380 __success, __failure);
1381 }
1382
1383 bool
1384 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1385 memory_order __order = memory_order_seq_cst)
1386 noexcept
1387 {
1388 return compare_exchange_weak(__expected, __desired, __order,
1389 __cmpexch_failure_order(m: __order));
1390 }
1391
1392 bool
1393 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1394 memory_order __order = memory_order_seq_cst)
1395 volatile noexcept
1396 {
1397 return compare_exchange_weak(__expected, __desired, __order,
1398 __cmpexch_failure_order(m: __order));
1399 }
1400
1401 bool
1402 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1403 memory_order __order = memory_order_seq_cst)
1404 noexcept
1405 {
1406 return compare_exchange_strong(__expected, __desired, __order,
1407 __cmpexch_failure_order(m: __order));
1408 }
1409
1410 bool
1411 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1412 memory_order __order = memory_order_seq_cst)
1413 volatile noexcept
1414 {
1415 return compare_exchange_strong(__expected, __desired, __order,
1416 __cmpexch_failure_order(m: __order));
1417 }
1418
1419#if __glibcxx_atomic_wait
1420 _GLIBCXX_ALWAYS_INLINE void
1421 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1422 { __atomic_impl::wait(&_M_fp, __old, __m); }
1423
1424 // TODO add const volatile overload
1425
1426 _GLIBCXX_ALWAYS_INLINE void
1427 notify_one() const noexcept
1428 { __atomic_impl::notify_one(&_M_fp); }
1429
1430 // TODO add const volatile overload
1431
1432 _GLIBCXX_ALWAYS_INLINE void
1433 notify_all() const noexcept
1434 { __atomic_impl::notify_all(&_M_fp); }
1435
1436 // TODO add const volatile overload
1437#endif // __glibcxx_atomic_wait
1438
1439 value_type
1440 fetch_add(value_type __i,
1441 memory_order __m = memory_order_seq_cst) noexcept
1442 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1443
1444 value_type
1445 fetch_add(value_type __i,
1446 memory_order __m = memory_order_seq_cst) volatile noexcept
1447 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1448
1449 value_type
1450 fetch_sub(value_type __i,
1451 memory_order __m = memory_order_seq_cst) noexcept
1452 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1453
1454 value_type
1455 fetch_sub(value_type __i,
1456 memory_order __m = memory_order_seq_cst) volatile noexcept
1457 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1458
1459 value_type
1460 operator+=(value_type __i) noexcept
1461 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1462
1463 value_type
1464 operator+=(value_type __i) volatile noexcept
1465 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1466
1467 value_type
1468 operator-=(value_type __i) noexcept
1469 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1470
1471 value_type
1472 operator-=(value_type __i) volatile noexcept
1473 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1474
1475 private:
1476 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1477 };
1478#undef _GLIBCXX20_INIT
1479
1480 template<typename _Tp,
1481 bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
1482 struct __atomic_ref;
1483
1484 // base class for non-integral, non-floating-point, non-pointer types
1485 template<typename _Tp>
1486 struct __atomic_ref<_Tp, false, false>
1487 {
1488 static_assert(is_trivially_copyable_v<_Tp>);
1489
1490 // 1/2/4/8/16-byte types must be aligned to at least their size.
1491 static constexpr int _S_min_alignment
1492 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1493 ? 0 : sizeof(_Tp);
1494
1495 public:
1496 using value_type = _Tp;
1497
1498 static constexpr bool is_always_lock_free
1499 = __atomic_always_lock_free(sizeof(_Tp), 0);
1500
1501 static constexpr size_t required_alignment
1502 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1503
1504 __atomic_ref& operator=(const __atomic_ref&) = delete;
1505
1506 explicit
1507 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1508 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1509
1510 __atomic_ref(const __atomic_ref&) noexcept = default;
1511
1512 _Tp
1513 operator=(_Tp __t) const noexcept
1514 {
1515 this->store(__t);
1516 return __t;
1517 }
1518
1519 operator _Tp() const noexcept { return this->load(); }
1520
1521 bool
1522 is_lock_free() const noexcept
1523 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1524
1525 void
1526 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1527 { __atomic_impl::store(_M_ptr, __t, __m); }
1528
1529 _Tp
1530 load(memory_order __m = memory_order_seq_cst) const noexcept
1531 { return __atomic_impl::load(_M_ptr, __m); }
1532
1533 _Tp
1534 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1535 const noexcept
1536 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1537
1538 bool
1539 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1540 memory_order __success,
1541 memory_order __failure) const noexcept
1542 {
1543 return __atomic_impl::compare_exchange_weak<true>(
1544 _M_ptr, __expected, __desired, __success, __failure);
1545 }
1546
1547 bool
1548 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1549 memory_order __success,
1550 memory_order __failure) const noexcept
1551 {
1552 return __atomic_impl::compare_exchange_strong<true>(
1553 _M_ptr, __expected, __desired, __success, __failure);
1554 }
1555
1556 bool
1557 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1558 memory_order __order = memory_order_seq_cst)
1559 const noexcept
1560 {
1561 return compare_exchange_weak(__expected, __desired, __order,
1562 __cmpexch_failure_order(m: __order));
1563 }
1564
1565 bool
1566 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1567 memory_order __order = memory_order_seq_cst)
1568 const noexcept
1569 {
1570 return compare_exchange_strong(__expected, __desired, __order,
1571 __cmpexch_failure_order(m: __order));
1572 }
1573
1574#if __glibcxx_atomic_wait
1575 _GLIBCXX_ALWAYS_INLINE void
1576 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1577 { __atomic_impl::wait(_M_ptr, __old, __m); }
1578
1579 // TODO add const volatile overload
1580
1581 _GLIBCXX_ALWAYS_INLINE void
1582 notify_one() const noexcept
1583 { __atomic_impl::notify_one(_M_ptr); }
1584
1585 // TODO add const volatile overload
1586
1587 _GLIBCXX_ALWAYS_INLINE void
1588 notify_all() const noexcept
1589 { __atomic_impl::notify_all(_M_ptr); }
1590
1591 // TODO add const volatile overload
1592#endif // __glibcxx_atomic_wait
1593
1594 private:
1595 _Tp* _M_ptr;
1596 };
1597
1598 // base class for atomic_ref<integral-type>
1599 template<typename _Tp>
1600 struct __atomic_ref<_Tp, true, false>
1601 {
1602 static_assert(is_integral_v<_Tp>);
1603
1604 public:
1605 using value_type = _Tp;
1606 using difference_type = value_type;
1607
1608 static constexpr bool is_always_lock_free
1609 = __atomic_always_lock_free(sizeof(_Tp), 0);
1610
1611 static constexpr size_t required_alignment
1612 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1613
1614 __atomic_ref() = delete;
1615 __atomic_ref& operator=(const __atomic_ref&) = delete;
1616
1617 explicit
1618 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1619 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1620
1621 __atomic_ref(const __atomic_ref&) noexcept = default;
1622
1623 _Tp
1624 operator=(_Tp __t) const noexcept
1625 {
1626 this->store(__t);
1627 return __t;
1628 }
1629
1630 operator _Tp() const noexcept { return this->load(); }
1631
1632 bool
1633 is_lock_free() const noexcept
1634 {
1635 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1636 }
1637
1638 void
1639 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1640 { __atomic_impl::store(_M_ptr, __t, __m); }
1641
1642 _Tp
1643 load(memory_order __m = memory_order_seq_cst) const noexcept
1644 { return __atomic_impl::load(_M_ptr, __m); }
1645
1646 _Tp
1647 exchange(_Tp __desired,
1648 memory_order __m = memory_order_seq_cst) const noexcept
1649 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1650
1651 bool
1652 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1653 memory_order __success,
1654 memory_order __failure) const noexcept
1655 {
1656 return __atomic_impl::compare_exchange_weak<true>(
1657 _M_ptr, __expected, __desired, __success, __failure);
1658 }
1659
1660 bool
1661 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1662 memory_order __success,
1663 memory_order __failure) const noexcept
1664 {
1665 return __atomic_impl::compare_exchange_strong<true>(
1666 _M_ptr, __expected, __desired, __success, __failure);
1667 }
1668
1669 bool
1670 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1671 memory_order __order = memory_order_seq_cst)
1672 const noexcept
1673 {
1674 return compare_exchange_weak(__expected, __desired, __order,
1675 __cmpexch_failure_order(m: __order));
1676 }
1677
1678 bool
1679 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1680 memory_order __order = memory_order_seq_cst)
1681 const noexcept
1682 {
1683 return compare_exchange_strong(__expected, __desired, __order,
1684 __cmpexch_failure_order(m: __order));
1685 }
1686
1687#if __glibcxx_atomic_wait
1688 _GLIBCXX_ALWAYS_INLINE void
1689 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1690 { __atomic_impl::wait(_M_ptr, __old, __m); }
1691
1692 // TODO add const volatile overload
1693
1694 _GLIBCXX_ALWAYS_INLINE void
1695 notify_one() const noexcept
1696 { __atomic_impl::notify_one(_M_ptr); }
1697
1698 // TODO add const volatile overload
1699
1700 _GLIBCXX_ALWAYS_INLINE void
1701 notify_all() const noexcept
1702 { __atomic_impl::notify_all(_M_ptr); }
1703
1704 // TODO add const volatile overload
1705#endif // __glibcxx_atomic_wait
1706
1707 value_type
1708 fetch_add(value_type __i,
1709 memory_order __m = memory_order_seq_cst) const noexcept
1710 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1711
1712 value_type
1713 fetch_sub(value_type __i,
1714 memory_order __m = memory_order_seq_cst) const noexcept
1715 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1716
1717 value_type
1718 fetch_and(value_type __i,
1719 memory_order __m = memory_order_seq_cst) const noexcept
1720 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1721
1722 value_type
1723 fetch_or(value_type __i,
1724 memory_order __m = memory_order_seq_cst) const noexcept
1725 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1726
1727 value_type
1728 fetch_xor(value_type __i,
1729 memory_order __m = memory_order_seq_cst) const noexcept
1730 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1731
1732 _GLIBCXX_ALWAYS_INLINE value_type
1733 operator++(int) const noexcept
1734 { return fetch_add(i: 1); }
1735
1736 _GLIBCXX_ALWAYS_INLINE value_type
1737 operator--(int) const noexcept
1738 { return fetch_sub(i: 1); }
1739
1740 value_type
1741 operator++() const noexcept
1742 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1743
1744 value_type
1745 operator--() const noexcept
1746 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1747
1748 value_type
1749 operator+=(value_type __i) const noexcept
1750 { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1751
1752 value_type
1753 operator-=(value_type __i) const noexcept
1754 { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1755
1756 value_type
1757 operator&=(value_type __i) const noexcept
1758 { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1759
1760 value_type
1761 operator|=(value_type __i) const noexcept
1762 { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1763
1764 value_type
1765 operator^=(value_type __i) const noexcept
1766 { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1767
1768 private:
1769 _Tp* _M_ptr;
1770 };
1771
1772 // base class for atomic_ref<floating-point-type>
1773 template<typename _Fp>
1774 struct __atomic_ref<_Fp, false, true>
1775 {
1776 static_assert(is_floating_point_v<_Fp>);
1777
1778 public:
1779 using value_type = _Fp;
1780 using difference_type = value_type;
1781
1782 static constexpr bool is_always_lock_free
1783 = __atomic_always_lock_free(sizeof(_Fp), 0);
1784
1785 static constexpr size_t required_alignment = __alignof__(_Fp);
1786
1787 __atomic_ref() = delete;
1788 __atomic_ref& operator=(const __atomic_ref&) = delete;
1789
1790 explicit
1791 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1792 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1793
1794 __atomic_ref(const __atomic_ref&) noexcept = default;
1795
1796 _Fp
1797 operator=(_Fp __t) const noexcept
1798 {
1799 this->store(__t);
1800 return __t;
1801 }
1802
1803 operator _Fp() const noexcept { return this->load(); }
1804
1805 bool
1806 is_lock_free() const noexcept
1807 {
1808 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1809 }
1810
1811 void
1812 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1813 { __atomic_impl::store(_M_ptr, __t, __m); }
1814
1815 _Fp
1816 load(memory_order __m = memory_order_seq_cst) const noexcept
1817 { return __atomic_impl::load(_M_ptr, __m); }
1818
1819 _Fp
1820 exchange(_Fp __desired,
1821 memory_order __m = memory_order_seq_cst) const noexcept
1822 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1823
1824 bool
1825 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1826 memory_order __success,
1827 memory_order __failure) const noexcept
1828 {
1829 return __atomic_impl::compare_exchange_weak<true>(
1830 _M_ptr, __expected, __desired, __success, __failure);
1831 }
1832
1833 bool
1834 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1835 memory_order __success,
1836 memory_order __failure) const noexcept
1837 {
1838 return __atomic_impl::compare_exchange_strong<true>(
1839 _M_ptr, __expected, __desired, __success, __failure);
1840 }
1841
1842 bool
1843 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1844 memory_order __order = memory_order_seq_cst)
1845 const noexcept
1846 {
1847 return compare_exchange_weak(__expected, __desired, __order,
1848 __cmpexch_failure_order(m: __order));
1849 }
1850
1851 bool
1852 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1853 memory_order __order = memory_order_seq_cst)
1854 const noexcept
1855 {
1856 return compare_exchange_strong(__expected, __desired, __order,
1857 __cmpexch_failure_order(m: __order));
1858 }
1859
1860#if __glibcxx_atomic_wait
1861 _GLIBCXX_ALWAYS_INLINE void
1862 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1863 { __atomic_impl::wait(_M_ptr, __old, __m); }
1864
1865 // TODO add const volatile overload
1866
1867 _GLIBCXX_ALWAYS_INLINE void
1868 notify_one() const noexcept
1869 { __atomic_impl::notify_one(_M_ptr); }
1870
1871 // TODO add const volatile overload
1872
1873 _GLIBCXX_ALWAYS_INLINE void
1874 notify_all() const noexcept
1875 { __atomic_impl::notify_all(_M_ptr); }
1876
1877 // TODO add const volatile overload
1878#endif // __glibcxx_atomic_wait
1879
1880 value_type
1881 fetch_add(value_type __i,
1882 memory_order __m = memory_order_seq_cst) const noexcept
1883 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1884
1885 value_type
1886 fetch_sub(value_type __i,
1887 memory_order __m = memory_order_seq_cst) const noexcept
1888 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1889
1890 value_type
1891 operator+=(value_type __i) const noexcept
1892 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1893
1894 value_type
1895 operator-=(value_type __i) const noexcept
1896 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1897
1898 private:
1899 _Fp* _M_ptr;
1900 };
1901
1902 // base class for atomic_ref<pointer-type>
1903 template<typename _Tp>
1904 struct __atomic_ref<_Tp*, false, false>
1905 {
1906 public:
1907 using value_type = _Tp*;
1908 using difference_type = ptrdiff_t;
1909
1910 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1911
1912 static constexpr size_t required_alignment = __alignof__(_Tp*);
1913
1914 __atomic_ref() = delete;
1915 __atomic_ref& operator=(const __atomic_ref&) = delete;
1916
1917 explicit
1918 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1919 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1920
1921 __atomic_ref(const __atomic_ref&) noexcept = default;
1922
1923 _Tp*
1924 operator=(_Tp* __t) const noexcept
1925 {
1926 this->store(__t);
1927 return __t;
1928 }
1929
1930 operator _Tp*() const noexcept { return this->load(); }
1931
1932 bool
1933 is_lock_free() const noexcept
1934 {
1935 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1936 }
1937
1938 void
1939 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1940 { __atomic_impl::store(_M_ptr, __t, __m); }
1941
1942 _Tp*
1943 load(memory_order __m = memory_order_seq_cst) const noexcept
1944 { return __atomic_impl::load(_M_ptr, __m); }
1945
1946 _Tp*
1947 exchange(_Tp* __desired,
1948 memory_order __m = memory_order_seq_cst) const noexcept
1949 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1950
1951 bool
1952 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1953 memory_order __success,
1954 memory_order __failure) const noexcept
1955 {
1956 return __atomic_impl::compare_exchange_weak<true>(
1957 _M_ptr, __expected, __desired, __success, __failure);
1958 }
1959
1960 bool
1961 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1962 memory_order __success,
1963 memory_order __failure) const noexcept
1964 {
1965 return __atomic_impl::compare_exchange_strong<true>(
1966 _M_ptr, __expected, __desired, __success, __failure);
1967 }
1968
1969 bool
1970 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1971 memory_order __order = memory_order_seq_cst)
1972 const noexcept
1973 {
1974 return compare_exchange_weak(__expected, __desired, __order,
1975 __cmpexch_failure_order(m: __order));
1976 }
1977
1978 bool
1979 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1980 memory_order __order = memory_order_seq_cst)
1981 const noexcept
1982 {
1983 return compare_exchange_strong(__expected, __desired, __order,
1984 __cmpexch_failure_order(m: __order));
1985 }
1986
1987#if __glibcxx_atomic_wait
1988 _GLIBCXX_ALWAYS_INLINE void
1989 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
1990 { __atomic_impl::wait(_M_ptr, __old, __m); }
1991
1992 // TODO add const volatile overload
1993
1994 _GLIBCXX_ALWAYS_INLINE void
1995 notify_one() const noexcept
1996 { __atomic_impl::notify_one(_M_ptr); }
1997
1998 // TODO add const volatile overload
1999
2000 _GLIBCXX_ALWAYS_INLINE void
2001 notify_all() const noexcept
2002 { __atomic_impl::notify_all(_M_ptr); }
2003
2004 // TODO add const volatile overload
2005#endif // __glibcxx_atomic_wait
2006
2007 _GLIBCXX_ALWAYS_INLINE value_type
2008 fetch_add(difference_type __d,
2009 memory_order __m = memory_order_seq_cst) const noexcept
2010 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2011
2012 _GLIBCXX_ALWAYS_INLINE value_type
2013 fetch_sub(difference_type __d,
2014 memory_order __m = memory_order_seq_cst) const noexcept
2015 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2016
2017 value_type
2018 operator++(int) const noexcept
2019 { return fetch_add(d: 1); }
2020
2021 value_type
2022 operator--(int) const noexcept
2023 { return fetch_sub(d: 1); }
2024
2025 value_type
2026 operator++() const noexcept
2027 {
2028 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(d: 1));
2029 }
2030
2031 value_type
2032 operator--() const noexcept
2033 {
2034 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(d: 1));
2035 }
2036
2037 value_type
2038 operator+=(difference_type __d) const noexcept
2039 {
2040 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2041 }
2042
2043 value_type
2044 operator-=(difference_type __d) const noexcept
2045 {
2046 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2047 }
2048
2049 private:
2050 static constexpr ptrdiff_t
2051 _S_type_size(ptrdiff_t __d) noexcept
2052 {
2053 static_assert(is_object_v<_Tp>);
2054 return __d * sizeof(_Tp);
2055 }
2056
2057 _Tp** _M_ptr;
2058 };
2059#endif // C++2a
2060
2061 /// @endcond
2062
2063 /// @} group atomics
2064
2065_GLIBCXX_END_NAMESPACE_VERSION
2066} // namespace std
2067
2068#endif
2069