1 | // -*- C++ -*- header. |
2 | |
3 | // Copyright (C) 2008-2023 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file bits/atomic_base.h |
26 | * This is an internal header file, included by other library headers. |
27 | * Do not attempt to use it directly. @headername{atomic} |
28 | */ |
29 | |
30 | #ifndef _GLIBCXX_ATOMIC_BASE_H |
31 | #define _GLIBCXX_ATOMIC_BASE_H 1 |
32 | |
33 | #pragma GCC system_header |
34 | |
35 | #include <bits/c++config.h> |
36 | #include <new> // For placement new |
37 | #include <stdint.h> |
38 | #include <bits/atomic_lockfree_defines.h> |
39 | #include <bits/move.h> |
40 | |
41 | #if __cplusplus > 201703L && _GLIBCXX_HOSTED |
42 | #include <bits/atomic_wait.h> |
43 | #endif |
44 | |
45 | #ifndef _GLIBCXX_ALWAYS_INLINE |
46 | #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__)) |
47 | #endif |
48 | |
49 | namespace std _GLIBCXX_VISIBILITY(default) |
50 | { |
51 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
52 | |
53 | /** |
54 | * @defgroup atomics Atomics |
55 | * |
56 | * Components for performing atomic operations. |
57 | * @{ |
58 | */ |
59 | |
60 | /// Enumeration for memory_order |
61 | #if __cplusplus > 201703L |
62 | enum class memory_order : int |
63 | { |
64 | relaxed, |
65 | consume, |
66 | acquire, |
67 | release, |
68 | acq_rel, |
69 | seq_cst |
70 | }; |
71 | |
72 | inline constexpr memory_order memory_order_relaxed = memory_order::relaxed; |
73 | inline constexpr memory_order memory_order_consume = memory_order::consume; |
74 | inline constexpr memory_order memory_order_acquire = memory_order::acquire; |
75 | inline constexpr memory_order memory_order_release = memory_order::release; |
76 | inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel; |
77 | inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst; |
78 | #else |
79 | typedef enum memory_order |
80 | { |
81 | memory_order_relaxed, |
82 | memory_order_consume, |
83 | memory_order_acquire, |
84 | memory_order_release, |
85 | memory_order_acq_rel, |
86 | memory_order_seq_cst |
87 | } memory_order; |
88 | #endif |
89 | |
90 | /// @cond undocumented |
91 | enum __memory_order_modifier |
92 | { |
93 | __memory_order_mask = 0x0ffff, |
94 | __memory_order_modifier_mask = 0xffff0000, |
95 | __memory_order_hle_acquire = 0x10000, |
96 | __memory_order_hle_release = 0x20000 |
97 | }; |
98 | /// @endcond |
99 | |
100 | constexpr memory_order |
101 | operator|(memory_order __m, __memory_order_modifier __mod) |
102 | { |
103 | return memory_order(int(__m) | int(__mod)); |
104 | } |
105 | |
106 | constexpr memory_order |
107 | operator&(memory_order __m, __memory_order_modifier __mod) |
108 | { |
109 | return memory_order(int(__m) & int(__mod)); |
110 | } |
111 | |
112 | /// @cond undocumented |
113 | |
114 | // Drop release ordering as per [atomics.types.operations.req]/21 |
115 | constexpr memory_order |
116 | __cmpexch_failure_order2(memory_order __m) noexcept |
117 | { |
118 | return __m == memory_order_acq_rel ? memory_order_acquire |
119 | : __m == memory_order_release ? memory_order_relaxed : __m; |
120 | } |
121 | |
122 | constexpr memory_order |
123 | __cmpexch_failure_order(memory_order __m) noexcept |
124 | { |
125 | return memory_order(__cmpexch_failure_order2(m: __m & __memory_order_mask) |
126 | | __memory_order_modifier(__m & __memory_order_modifier_mask)); |
127 | } |
128 | |
129 | constexpr bool |
130 | __is_valid_cmpexch_failure_order(memory_order __m) noexcept |
131 | { |
132 | return (__m & __memory_order_mask) != memory_order_release |
133 | && (__m & __memory_order_mask) != memory_order_acq_rel; |
134 | } |
135 | |
136 | // Base types for atomics. |
137 | template<typename _IntTp> |
138 | struct __atomic_base; |
139 | |
140 | /// @endcond |
141 | |
142 | _GLIBCXX_ALWAYS_INLINE void |
143 | atomic_thread_fence(memory_order __m) noexcept |
144 | { __atomic_thread_fence(int(__m)); } |
145 | |
146 | _GLIBCXX_ALWAYS_INLINE void |
147 | atomic_signal_fence(memory_order __m) noexcept |
148 | { __atomic_signal_fence(int(__m)); } |
149 | |
150 | /// kill_dependency |
151 | template<typename _Tp> |
152 | inline _Tp |
153 | kill_dependency(_Tp __y) noexcept |
154 | { |
155 | _Tp __ret(__y); |
156 | return __ret; |
157 | } |
158 | |
159 | #if __cplusplus >= 202002L |
160 | # define __cpp_lib_atomic_value_initialization 201911L |
161 | #endif |
162 | |
163 | /// @cond undocumented |
164 | #if __cpp_lib_atomic_value_initialization |
165 | # define _GLIBCXX20_INIT(I) = I |
166 | #else |
167 | # define _GLIBCXX20_INIT(I) |
168 | #endif |
169 | /// @endcond |
170 | |
171 | #define ATOMIC_VAR_INIT(_VI) { _VI } |
172 | |
173 | template<typename _Tp> |
174 | struct atomic; |
175 | |
176 | template<typename _Tp> |
177 | struct atomic<_Tp*>; |
178 | |
179 | /* The target's "set" value for test-and-set may not be exactly 1. */ |
180 | #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 |
181 | typedef bool __atomic_flag_data_type; |
182 | #else |
183 | typedef unsigned char __atomic_flag_data_type; |
184 | #endif |
185 | |
186 | /// @cond undocumented |
187 | |
188 | /* |
189 | * Base type for atomic_flag. |
190 | * |
191 | * Base type is POD with data, allowing atomic_flag to derive from |
192 | * it and meet the standard layout type requirement. In addition to |
193 | * compatibility with a C interface, this allows different |
194 | * implementations of atomic_flag to use the same atomic operation |
195 | * functions, via a standard conversion to the __atomic_flag_base |
196 | * argument. |
197 | */ |
198 | _GLIBCXX_BEGIN_EXTERN_C |
199 | |
200 | struct __atomic_flag_base |
201 | { |
202 | __atomic_flag_data_type _M_i _GLIBCXX20_INIT({}); |
203 | }; |
204 | |
205 | _GLIBCXX_END_EXTERN_C |
206 | |
207 | /// @endcond |
208 | |
209 | #define ATOMIC_FLAG_INIT { 0 } |
210 | |
211 | /// atomic_flag |
212 | struct atomic_flag : public __atomic_flag_base |
213 | { |
214 | atomic_flag() noexcept = default; |
215 | ~atomic_flag() noexcept = default; |
216 | atomic_flag(const atomic_flag&) = delete; |
217 | atomic_flag& operator=(const atomic_flag&) = delete; |
218 | atomic_flag& operator=(const atomic_flag&) volatile = delete; |
219 | |
220 | // Conversion to ATOMIC_FLAG_INIT. |
221 | constexpr atomic_flag(bool __i) noexcept |
222 | : __atomic_flag_base{ ._M_i: _S_init(__i) } |
223 | { } |
224 | |
225 | _GLIBCXX_ALWAYS_INLINE bool |
226 | test_and_set(memory_order __m = memory_order_seq_cst) noexcept |
227 | { |
228 | return __atomic_test_and_set (&_M_i, int(__m)); |
229 | } |
230 | |
231 | _GLIBCXX_ALWAYS_INLINE bool |
232 | test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept |
233 | { |
234 | return __atomic_test_and_set (&_M_i, int(__m)); |
235 | } |
236 | |
237 | #if __cplusplus > 201703L |
238 | #define __cpp_lib_atomic_flag_test 201907L |
239 | |
240 | _GLIBCXX_ALWAYS_INLINE bool |
241 | test(memory_order __m = memory_order_seq_cst) const noexcept |
242 | { |
243 | __atomic_flag_data_type __v; |
244 | __atomic_load(&_M_i, &__v, int(__m)); |
245 | return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL; |
246 | } |
247 | |
248 | _GLIBCXX_ALWAYS_INLINE bool |
249 | test(memory_order __m = memory_order_seq_cst) const volatile noexcept |
250 | { |
251 | __atomic_flag_data_type __v; |
252 | __atomic_load(&_M_i, &__v, int(__m)); |
253 | return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL; |
254 | } |
255 | |
256 | #if __cpp_lib_atomic_wait |
257 | _GLIBCXX_ALWAYS_INLINE void |
258 | wait(bool __old, |
259 | memory_order __m = memory_order_seq_cst) const noexcept |
260 | { |
261 | const __atomic_flag_data_type __v |
262 | = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; |
263 | |
264 | std::__atomic_wait_address_v(&_M_i, __v, |
265 | [__m, this] { return __atomic_load_n(&_M_i, int(__m)); }); |
266 | } |
267 | |
268 | // TODO add const volatile overload |
269 | |
270 | _GLIBCXX_ALWAYS_INLINE void |
271 | notify_one() noexcept |
272 | { std::__atomic_notify_address(&_M_i, false); } |
273 | |
274 | // TODO add const volatile overload |
275 | |
276 | _GLIBCXX_ALWAYS_INLINE void |
277 | notify_all() noexcept |
278 | { std::__atomic_notify_address(&_M_i, true); } |
279 | |
280 | // TODO add const volatile overload |
281 | #endif // __cpp_lib_atomic_wait |
282 | #endif // C++20 |
283 | |
284 | _GLIBCXX_ALWAYS_INLINE void |
285 | clear(memory_order __m = memory_order_seq_cst) noexcept |
286 | { |
287 | memory_order __b __attribute__ ((__unused__)) |
288 | = __m & __memory_order_mask; |
289 | __glibcxx_assert(__b != memory_order_consume); |
290 | __glibcxx_assert(__b != memory_order_acquire); |
291 | __glibcxx_assert(__b != memory_order_acq_rel); |
292 | |
293 | __atomic_clear (&_M_i, int(__m)); |
294 | } |
295 | |
296 | _GLIBCXX_ALWAYS_INLINE void |
297 | clear(memory_order __m = memory_order_seq_cst) volatile noexcept |
298 | { |
299 | memory_order __b __attribute__ ((__unused__)) |
300 | = __m & __memory_order_mask; |
301 | __glibcxx_assert(__b != memory_order_consume); |
302 | __glibcxx_assert(__b != memory_order_acquire); |
303 | __glibcxx_assert(__b != memory_order_acq_rel); |
304 | |
305 | __atomic_clear (&_M_i, int(__m)); |
306 | } |
307 | |
308 | private: |
309 | static constexpr __atomic_flag_data_type |
310 | _S_init(bool __i) |
311 | { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; } |
312 | }; |
313 | |
314 | /// @cond undocumented |
315 | |
316 | /// Base class for atomic integrals. |
317 | // |
318 | // For each of the integral types, define atomic_[integral type] struct |
319 | // |
320 | // atomic_bool bool |
321 | // atomic_char char |
322 | // atomic_schar signed char |
323 | // atomic_uchar unsigned char |
324 | // atomic_short short |
325 | // atomic_ushort unsigned short |
326 | // atomic_int int |
327 | // atomic_uint unsigned int |
328 | // atomic_long long |
329 | // atomic_ulong unsigned long |
330 | // atomic_llong long long |
331 | // atomic_ullong unsigned long long |
332 | // atomic_char8_t char8_t |
333 | // atomic_char16_t char16_t |
334 | // atomic_char32_t char32_t |
335 | // atomic_wchar_t wchar_t |
336 | // |
337 | // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or |
338 | // 8 bytes, since that is what GCC built-in functions for atomic |
339 | // memory access expect. |
340 | template<typename _ITp> |
341 | struct __atomic_base |
342 | { |
343 | using value_type = _ITp; |
344 | using difference_type = value_type; |
345 | |
346 | private: |
347 | typedef _ITp __int_type; |
348 | |
349 | static constexpr int _S_alignment = |
350 | sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp); |
351 | |
352 | alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0); |
353 | |
354 | public: |
355 | __atomic_base() noexcept = default; |
356 | ~__atomic_base() noexcept = default; |
357 | __atomic_base(const __atomic_base&) = delete; |
358 | __atomic_base& operator=(const __atomic_base&) = delete; |
359 | __atomic_base& operator=(const __atomic_base&) volatile = delete; |
360 | |
361 | // Requires __int_type convertible to _M_i. |
362 | constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { } |
363 | |
364 | operator __int_type() const noexcept |
365 | { return load(); } |
366 | |
367 | operator __int_type() const volatile noexcept |
368 | { return load(); } |
369 | |
370 | __int_type |
371 | operator=(__int_type __i) noexcept |
372 | { |
373 | store(__i); |
374 | return __i; |
375 | } |
376 | |
377 | __int_type |
378 | operator=(__int_type __i) volatile noexcept |
379 | { |
380 | store(__i); |
381 | return __i; |
382 | } |
383 | |
384 | __int_type |
385 | operator++(int) noexcept |
386 | { return fetch_add(1); } |
387 | |
388 | __int_type |
389 | operator++(int) volatile noexcept |
390 | { return fetch_add(1); } |
391 | |
392 | __int_type |
393 | operator--(int) noexcept |
394 | { return fetch_sub(1); } |
395 | |
396 | __int_type |
397 | operator--(int) volatile noexcept |
398 | { return fetch_sub(1); } |
399 | |
400 | __int_type |
401 | operator++() noexcept |
402 | { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } |
403 | |
404 | __int_type |
405 | operator++() volatile noexcept |
406 | { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } |
407 | |
408 | __int_type |
409 | operator--() noexcept |
410 | { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } |
411 | |
412 | __int_type |
413 | operator--() volatile noexcept |
414 | { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } |
415 | |
416 | __int_type |
417 | operator+=(__int_type __i) noexcept |
418 | { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
419 | |
420 | __int_type |
421 | operator+=(__int_type __i) volatile noexcept |
422 | { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
423 | |
424 | __int_type |
425 | operator-=(__int_type __i) noexcept |
426 | { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
427 | |
428 | __int_type |
429 | operator-=(__int_type __i) volatile noexcept |
430 | { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
431 | |
432 | __int_type |
433 | operator&=(__int_type __i) noexcept |
434 | { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
435 | |
436 | __int_type |
437 | operator&=(__int_type __i) volatile noexcept |
438 | { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
439 | |
440 | __int_type |
441 | operator|=(__int_type __i) noexcept |
442 | { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
443 | |
444 | __int_type |
445 | operator|=(__int_type __i) volatile noexcept |
446 | { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
447 | |
448 | __int_type |
449 | operator^=(__int_type __i) noexcept |
450 | { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
451 | |
452 | __int_type |
453 | operator^=(__int_type __i) volatile noexcept |
454 | { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } |
455 | |
456 | bool |
457 | is_lock_free() const noexcept |
458 | { |
459 | // Use a fake, minimally aligned pointer. |
460 | return __atomic_is_lock_free(sizeof(_M_i), |
461 | reinterpret_cast<void *>(-_S_alignment)); |
462 | } |
463 | |
464 | bool |
465 | is_lock_free() const volatile noexcept |
466 | { |
467 | // Use a fake, minimally aligned pointer. |
468 | return __atomic_is_lock_free(sizeof(_M_i), |
469 | reinterpret_cast<void *>(-_S_alignment)); |
470 | } |
471 | |
472 | _GLIBCXX_ALWAYS_INLINE void |
473 | store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept |
474 | { |
475 | memory_order __b __attribute__ ((__unused__)) |
476 | = __m & __memory_order_mask; |
477 | __glibcxx_assert(__b != memory_order_acquire); |
478 | __glibcxx_assert(__b != memory_order_acq_rel); |
479 | __glibcxx_assert(__b != memory_order_consume); |
480 | |
481 | __atomic_store_n(&_M_i, __i, int(__m)); |
482 | } |
483 | |
484 | _GLIBCXX_ALWAYS_INLINE void |
485 | store(__int_type __i, |
486 | memory_order __m = memory_order_seq_cst) volatile noexcept |
487 | { |
488 | memory_order __b __attribute__ ((__unused__)) |
489 | = __m & __memory_order_mask; |
490 | __glibcxx_assert(__b != memory_order_acquire); |
491 | __glibcxx_assert(__b != memory_order_acq_rel); |
492 | __glibcxx_assert(__b != memory_order_consume); |
493 | |
494 | __atomic_store_n(&_M_i, __i, int(__m)); |
495 | } |
496 | |
497 | _GLIBCXX_ALWAYS_INLINE __int_type |
498 | load(memory_order __m = memory_order_seq_cst) const noexcept |
499 | { |
500 | memory_order __b __attribute__ ((__unused__)) |
501 | = __m & __memory_order_mask; |
502 | __glibcxx_assert(__b != memory_order_release); |
503 | __glibcxx_assert(__b != memory_order_acq_rel); |
504 | |
505 | return __atomic_load_n(&_M_i, int(__m)); |
506 | } |
507 | |
508 | _GLIBCXX_ALWAYS_INLINE __int_type |
509 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
510 | { |
511 | memory_order __b __attribute__ ((__unused__)) |
512 | = __m & __memory_order_mask; |
513 | __glibcxx_assert(__b != memory_order_release); |
514 | __glibcxx_assert(__b != memory_order_acq_rel); |
515 | |
516 | return __atomic_load_n(&_M_i, int(__m)); |
517 | } |
518 | |
519 | _GLIBCXX_ALWAYS_INLINE __int_type |
520 | exchange(__int_type __i, |
521 | memory_order __m = memory_order_seq_cst) noexcept |
522 | { |
523 | return __atomic_exchange_n(&_M_i, __i, int(__m)); |
524 | } |
525 | |
526 | |
527 | _GLIBCXX_ALWAYS_INLINE __int_type |
528 | exchange(__int_type __i, |
529 | memory_order __m = memory_order_seq_cst) volatile noexcept |
530 | { |
531 | return __atomic_exchange_n(&_M_i, __i, int(__m)); |
532 | } |
533 | |
534 | _GLIBCXX_ALWAYS_INLINE bool |
535 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
536 | memory_order __m1, memory_order __m2) noexcept |
537 | { |
538 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
539 | |
540 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, |
541 | int(__m1), int(__m2)); |
542 | } |
543 | |
544 | _GLIBCXX_ALWAYS_INLINE bool |
545 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
546 | memory_order __m1, |
547 | memory_order __m2) volatile noexcept |
548 | { |
549 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
550 | |
551 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, |
552 | int(__m1), int(__m2)); |
553 | } |
554 | |
555 | _GLIBCXX_ALWAYS_INLINE bool |
556 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
557 | memory_order __m = memory_order_seq_cst) noexcept |
558 | { |
559 | return compare_exchange_weak(__i1, __i2, __m, |
560 | __cmpexch_failure_order(__m)); |
561 | } |
562 | |
563 | _GLIBCXX_ALWAYS_INLINE bool |
564 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
565 | memory_order __m = memory_order_seq_cst) volatile noexcept |
566 | { |
567 | return compare_exchange_weak(__i1, __i2, __m, |
568 | __cmpexch_failure_order(__m)); |
569 | } |
570 | |
571 | _GLIBCXX_ALWAYS_INLINE bool |
572 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
573 | memory_order __m1, memory_order __m2) noexcept |
574 | { |
575 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
576 | |
577 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, |
578 | int(__m1), int(__m2)); |
579 | } |
580 | |
581 | _GLIBCXX_ALWAYS_INLINE bool |
582 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
583 | memory_order __m1, |
584 | memory_order __m2) volatile noexcept |
585 | { |
586 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
587 | |
588 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, |
589 | int(__m1), int(__m2)); |
590 | } |
591 | |
592 | _GLIBCXX_ALWAYS_INLINE bool |
593 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
594 | memory_order __m = memory_order_seq_cst) noexcept |
595 | { |
596 | return compare_exchange_strong(__i1, __i2, __m, |
597 | __cmpexch_failure_order(__m)); |
598 | } |
599 | |
600 | _GLIBCXX_ALWAYS_INLINE bool |
601 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
602 | memory_order __m = memory_order_seq_cst) volatile noexcept |
603 | { |
604 | return compare_exchange_strong(__i1, __i2, __m, |
605 | __cmpexch_failure_order(__m)); |
606 | } |
607 | |
608 | #if __cpp_lib_atomic_wait |
609 | _GLIBCXX_ALWAYS_INLINE void |
610 | wait(__int_type __old, |
611 | memory_order __m = memory_order_seq_cst) const noexcept |
612 | { |
613 | std::__atomic_wait_address_v(&_M_i, __old, |
614 | [__m, this] { return this->load(__m); }); |
615 | } |
616 | |
617 | // TODO add const volatile overload |
618 | |
619 | _GLIBCXX_ALWAYS_INLINE void |
620 | notify_one() noexcept |
621 | { std::__atomic_notify_address(&_M_i, false); } |
622 | |
623 | // TODO add const volatile overload |
624 | |
625 | _GLIBCXX_ALWAYS_INLINE void |
626 | notify_all() noexcept |
627 | { std::__atomic_notify_address(&_M_i, true); } |
628 | |
629 | // TODO add const volatile overload |
630 | #endif // __cpp_lib_atomic_wait |
631 | |
632 | _GLIBCXX_ALWAYS_INLINE __int_type |
633 | fetch_add(__int_type __i, |
634 | memory_order __m = memory_order_seq_cst) noexcept |
635 | { return __atomic_fetch_add(&_M_i, __i, int(__m)); } |
636 | |
637 | _GLIBCXX_ALWAYS_INLINE __int_type |
638 | fetch_add(__int_type __i, |
639 | memory_order __m = memory_order_seq_cst) volatile noexcept |
640 | { return __atomic_fetch_add(&_M_i, __i, int(__m)); } |
641 | |
642 | _GLIBCXX_ALWAYS_INLINE __int_type |
643 | fetch_sub(__int_type __i, |
644 | memory_order __m = memory_order_seq_cst) noexcept |
645 | { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } |
646 | |
647 | _GLIBCXX_ALWAYS_INLINE __int_type |
648 | fetch_sub(__int_type __i, |
649 | memory_order __m = memory_order_seq_cst) volatile noexcept |
650 | { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } |
651 | |
652 | _GLIBCXX_ALWAYS_INLINE __int_type |
653 | fetch_and(__int_type __i, |
654 | memory_order __m = memory_order_seq_cst) noexcept |
655 | { return __atomic_fetch_and(&_M_i, __i, int(__m)); } |
656 | |
657 | _GLIBCXX_ALWAYS_INLINE __int_type |
658 | fetch_and(__int_type __i, |
659 | memory_order __m = memory_order_seq_cst) volatile noexcept |
660 | { return __atomic_fetch_and(&_M_i, __i, int(__m)); } |
661 | |
662 | _GLIBCXX_ALWAYS_INLINE __int_type |
663 | fetch_or(__int_type __i, |
664 | memory_order __m = memory_order_seq_cst) noexcept |
665 | { return __atomic_fetch_or(&_M_i, __i, int(__m)); } |
666 | |
667 | _GLIBCXX_ALWAYS_INLINE __int_type |
668 | fetch_or(__int_type __i, |
669 | memory_order __m = memory_order_seq_cst) volatile noexcept |
670 | { return __atomic_fetch_or(&_M_i, __i, int(__m)); } |
671 | |
672 | _GLIBCXX_ALWAYS_INLINE __int_type |
673 | fetch_xor(__int_type __i, |
674 | memory_order __m = memory_order_seq_cst) noexcept |
675 | { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } |
676 | |
677 | _GLIBCXX_ALWAYS_INLINE __int_type |
678 | fetch_xor(__int_type __i, |
679 | memory_order __m = memory_order_seq_cst) volatile noexcept |
680 | { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } |
681 | }; |
682 | |
683 | |
684 | /// Partial specialization for pointer types. |
685 | template<typename _PTp> |
686 | struct __atomic_base<_PTp*> |
687 | { |
688 | private: |
689 | typedef _PTp* __pointer_type; |
690 | |
691 | __pointer_type _M_p _GLIBCXX20_INIT(nullptr); |
692 | |
693 | // Factored out to facilitate explicit specialization. |
694 | constexpr ptrdiff_t |
695 | _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); } |
696 | |
697 | constexpr ptrdiff_t |
698 | _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); } |
699 | |
700 | public: |
701 | __atomic_base() noexcept = default; |
702 | ~__atomic_base() noexcept = default; |
703 | __atomic_base(const __atomic_base&) = delete; |
704 | __atomic_base& operator=(const __atomic_base&) = delete; |
705 | __atomic_base& operator=(const __atomic_base&) volatile = delete; |
706 | |
707 | // Requires __pointer_type convertible to _M_p. |
708 | constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { } |
709 | |
710 | operator __pointer_type() const noexcept |
711 | { return load(); } |
712 | |
713 | operator __pointer_type() const volatile noexcept |
714 | { return load(); } |
715 | |
716 | __pointer_type |
717 | operator=(__pointer_type __p) noexcept |
718 | { |
719 | store(__p); |
720 | return __p; |
721 | } |
722 | |
723 | __pointer_type |
724 | operator=(__pointer_type __p) volatile noexcept |
725 | { |
726 | store(__p); |
727 | return __p; |
728 | } |
729 | |
730 | __pointer_type |
731 | operator++(int) noexcept |
732 | { return fetch_add(1); } |
733 | |
734 | __pointer_type |
735 | operator++(int) volatile noexcept |
736 | { return fetch_add(1); } |
737 | |
738 | __pointer_type |
739 | operator--(int) noexcept |
740 | { return fetch_sub(1); } |
741 | |
742 | __pointer_type |
743 | operator--(int) volatile noexcept |
744 | { return fetch_sub(1); } |
745 | |
746 | __pointer_type |
747 | operator++() noexcept |
748 | { return __atomic_add_fetch(&_M_p, _M_type_size(1), |
749 | int(memory_order_seq_cst)); } |
750 | |
751 | __pointer_type |
752 | operator++() volatile noexcept |
753 | { return __atomic_add_fetch(&_M_p, _M_type_size(1), |
754 | int(memory_order_seq_cst)); } |
755 | |
756 | __pointer_type |
757 | operator--() noexcept |
758 | { return __atomic_sub_fetch(&_M_p, _M_type_size(1), |
759 | int(memory_order_seq_cst)); } |
760 | |
761 | __pointer_type |
762 | operator--() volatile noexcept |
763 | { return __atomic_sub_fetch(&_M_p, _M_type_size(1), |
764 | int(memory_order_seq_cst)); } |
765 | |
766 | __pointer_type |
767 | operator+=(ptrdiff_t __d) noexcept |
768 | { return __atomic_add_fetch(&_M_p, _M_type_size(__d), |
769 | int(memory_order_seq_cst)); } |
770 | |
771 | __pointer_type |
772 | operator+=(ptrdiff_t __d) volatile noexcept |
773 | { return __atomic_add_fetch(&_M_p, _M_type_size(__d), |
774 | int(memory_order_seq_cst)); } |
775 | |
776 | __pointer_type |
777 | operator-=(ptrdiff_t __d) noexcept |
778 | { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), |
779 | int(memory_order_seq_cst)); } |
780 | |
781 | __pointer_type |
782 | operator-=(ptrdiff_t __d) volatile noexcept |
783 | { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), |
784 | int(memory_order_seq_cst)); } |
785 | |
786 | bool |
787 | is_lock_free() const noexcept |
788 | { |
789 | // Produce a fake, minimally aligned pointer. |
790 | return __atomic_is_lock_free(sizeof(_M_p), |
791 | reinterpret_cast<void *>(-__alignof(_M_p))); |
792 | } |
793 | |
794 | bool |
795 | is_lock_free() const volatile noexcept |
796 | { |
797 | // Produce a fake, minimally aligned pointer. |
798 | return __atomic_is_lock_free(sizeof(_M_p), |
799 | reinterpret_cast<void *>(-__alignof(_M_p))); |
800 | } |
801 | |
802 | _GLIBCXX_ALWAYS_INLINE void |
803 | store(__pointer_type __p, |
804 | memory_order __m = memory_order_seq_cst) noexcept |
805 | { |
806 | memory_order __b __attribute__ ((__unused__)) |
807 | = __m & __memory_order_mask; |
808 | |
809 | __glibcxx_assert(__b != memory_order_acquire); |
810 | __glibcxx_assert(__b != memory_order_acq_rel); |
811 | __glibcxx_assert(__b != memory_order_consume); |
812 | |
813 | __atomic_store_n(&_M_p, __p, int(__m)); |
814 | } |
815 | |
816 | _GLIBCXX_ALWAYS_INLINE void |
817 | store(__pointer_type __p, |
818 | memory_order __m = memory_order_seq_cst) volatile noexcept |
819 | { |
820 | memory_order __b __attribute__ ((__unused__)) |
821 | = __m & __memory_order_mask; |
822 | __glibcxx_assert(__b != memory_order_acquire); |
823 | __glibcxx_assert(__b != memory_order_acq_rel); |
824 | __glibcxx_assert(__b != memory_order_consume); |
825 | |
826 | __atomic_store_n(&_M_p, __p, int(__m)); |
827 | } |
828 | |
829 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
830 | load(memory_order __m = memory_order_seq_cst) const noexcept |
831 | { |
832 | memory_order __b __attribute__ ((__unused__)) |
833 | = __m & __memory_order_mask; |
834 | __glibcxx_assert(__b != memory_order_release); |
835 | __glibcxx_assert(__b != memory_order_acq_rel); |
836 | |
837 | return __atomic_load_n(&_M_p, int(__m)); |
838 | } |
839 | |
840 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
841 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
842 | { |
843 | memory_order __b __attribute__ ((__unused__)) |
844 | = __m & __memory_order_mask; |
845 | __glibcxx_assert(__b != memory_order_release); |
846 | __glibcxx_assert(__b != memory_order_acq_rel); |
847 | |
848 | return __atomic_load_n(&_M_p, int(__m)); |
849 | } |
850 | |
851 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
852 | exchange(__pointer_type __p, |
853 | memory_order __m = memory_order_seq_cst) noexcept |
854 | { |
855 | return __atomic_exchange_n(&_M_p, __p, int(__m)); |
856 | } |
857 | |
858 | |
859 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
860 | exchange(__pointer_type __p, |
861 | memory_order __m = memory_order_seq_cst) volatile noexcept |
862 | { |
863 | return __atomic_exchange_n(&_M_p, __p, int(__m)); |
864 | } |
865 | |
866 | _GLIBCXX_ALWAYS_INLINE bool |
867 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
868 | memory_order __m1, |
869 | memory_order __m2) noexcept |
870 | { |
871 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
872 | |
873 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1, |
874 | int(__m1), int(__m2)); |
875 | } |
876 | |
877 | _GLIBCXX_ALWAYS_INLINE bool |
878 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
879 | memory_order __m1, |
880 | memory_order __m2) volatile noexcept |
881 | { |
882 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
883 | |
884 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1, |
885 | int(__m1), int(__m2)); |
886 | } |
887 | |
888 | _GLIBCXX_ALWAYS_INLINE bool |
889 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
890 | memory_order __m1, |
891 | memory_order __m2) noexcept |
892 | { |
893 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
894 | |
895 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, |
896 | int(__m1), int(__m2)); |
897 | } |
898 | |
899 | _GLIBCXX_ALWAYS_INLINE bool |
900 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
901 | memory_order __m1, |
902 | memory_order __m2) volatile noexcept |
903 | { |
904 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); |
905 | |
906 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, |
907 | int(__m1), int(__m2)); |
908 | } |
909 | |
910 | #if __cpp_lib_atomic_wait |
911 | _GLIBCXX_ALWAYS_INLINE void |
912 | wait(__pointer_type __old, |
913 | memory_order __m = memory_order_seq_cst) const noexcept |
914 | { |
915 | std::__atomic_wait_address_v(&_M_p, __old, |
916 | [__m, this] |
917 | { return this->load(__m); }); |
918 | } |
919 | |
920 | // TODO add const volatile overload |
921 | |
922 | _GLIBCXX_ALWAYS_INLINE void |
923 | notify_one() const noexcept |
924 | { std::__atomic_notify_address(&_M_p, false); } |
925 | |
926 | // TODO add const volatile overload |
927 | |
928 | _GLIBCXX_ALWAYS_INLINE void |
929 | notify_all() const noexcept |
930 | { std::__atomic_notify_address(&_M_p, true); } |
931 | |
932 | // TODO add const volatile overload |
933 | #endif // __cpp_lib_atomic_wait |
934 | |
935 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
936 | fetch_add(ptrdiff_t __d, |
937 | memory_order __m = memory_order_seq_cst) noexcept |
938 | { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } |
939 | |
940 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
941 | fetch_add(ptrdiff_t __d, |
942 | memory_order __m = memory_order_seq_cst) volatile noexcept |
943 | { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } |
944 | |
945 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
946 | fetch_sub(ptrdiff_t __d, |
947 | memory_order __m = memory_order_seq_cst) noexcept |
948 | { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } |
949 | |
950 | _GLIBCXX_ALWAYS_INLINE __pointer_type |
951 | fetch_sub(ptrdiff_t __d, |
952 | memory_order __m = memory_order_seq_cst) volatile noexcept |
953 | { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } |
954 | }; |
955 | |
956 | namespace __atomic_impl |
957 | { |
958 | // Implementation details of atomic padding handling |
959 | |
960 | template<typename _Tp> |
961 | constexpr bool |
962 | __maybe_has_padding() |
963 | { |
964 | #if ! __has_builtin(__builtin_clear_padding) |
965 | return false; |
966 | #elif __has_builtin(__has_unique_object_representations) |
967 | return !__has_unique_object_representations(_Tp) |
968 | && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value; |
969 | #else |
970 | return true; |
971 | #endif |
972 | } |
973 | |
974 | template<typename _Tp> |
975 | _GLIBCXX_ALWAYS_INLINE _Tp* |
976 | __clear_padding(_Tp& __val) noexcept |
977 | { |
978 | auto* __ptr = std::__addressof(__val); |
979 | #if __has_builtin(__builtin_clear_padding) |
980 | if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>()) |
981 | __builtin_clear_padding(__ptr); |
982 | #endif |
983 | return __ptr; |
984 | } |
985 | |
986 | // Remove volatile and create a non-deduced context for value arguments. |
987 | template<typename _Tp> |
988 | using _Val = typename remove_volatile<_Tp>::type; |
989 | |
990 | template<typename _Tp> |
991 | _GLIBCXX_ALWAYS_INLINE bool |
992 | __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i, |
993 | bool __is_weak, |
994 | memory_order __s, memory_order __f) noexcept |
995 | { |
996 | __glibcxx_assert(__is_valid_cmpexch_failure_order(__f)); |
997 | |
998 | using _Vp = _Val<_Tp>; |
999 | |
1000 | if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Vp>()) |
1001 | { |
1002 | // We must not modify __e on success, so cannot clear its padding. |
1003 | // Copy into a buffer and clear that, then copy back on failure. |
1004 | alignas(_Vp) unsigned char __buf[sizeof(_Vp)]; |
1005 | _Vp* __exp = ::new((void*)__buf) _Vp(__e); |
1006 | __atomic_impl::__clear_padding(*__exp); |
1007 | if (__atomic_compare_exchange(std::__addressof(__val), __exp, |
1008 | __atomic_impl::__clear_padding(__i), |
1009 | __is_weak, int(__s), int(__f))) |
1010 | return true; |
1011 | __builtin_memcpy(std::__addressof(__e), __exp, sizeof(_Vp)); |
1012 | return false; |
1013 | } |
1014 | else |
1015 | return __atomic_compare_exchange(std::__addressof(__val), |
1016 | std::__addressof(__e), |
1017 | std::__addressof(__i), |
1018 | __is_weak, int(__s), int(__f)); |
1019 | } |
1020 | } // namespace __atomic_impl |
1021 | |
1022 | #if __cplusplus > 201703L |
1023 | // Implementation details of atomic_ref and atomic<floating-point>. |
1024 | namespace __atomic_impl |
1025 | { |
1026 | // Like _Val<T> above, but for difference_type arguments. |
1027 | template<typename _Tp> |
1028 | using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>; |
1029 | |
1030 | template<size_t _Size, size_t _Align> |
1031 | _GLIBCXX_ALWAYS_INLINE bool |
1032 | is_lock_free() noexcept |
1033 | { |
1034 | // Produce a fake, minimally aligned pointer. |
1035 | return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align)); |
1036 | } |
1037 | |
1038 | template<typename _Tp> |
1039 | _GLIBCXX_ALWAYS_INLINE void |
1040 | store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept |
1041 | { |
1042 | __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m)); |
1043 | } |
1044 | |
1045 | template<typename _Tp> |
1046 | _GLIBCXX_ALWAYS_INLINE _Val<_Tp> |
1047 | load(const _Tp* __ptr, memory_order __m) noexcept |
1048 | { |
1049 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
1050 | auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf); |
1051 | __atomic_load(__ptr, __dest, int(__m)); |
1052 | return *__dest; |
1053 | } |
1054 | |
1055 | template<typename _Tp> |
1056 | _GLIBCXX_ALWAYS_INLINE _Val<_Tp> |
1057 | exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept |
1058 | { |
1059 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
1060 | auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf); |
1061 | __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired), |
1062 | __dest, int(__m)); |
1063 | return *__dest; |
1064 | } |
1065 | |
1066 | template<typename _Tp> |
1067 | _GLIBCXX_ALWAYS_INLINE bool |
1068 | compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected, |
1069 | _Val<_Tp> __desired, memory_order __success, |
1070 | memory_order __failure) noexcept |
1071 | { |
1072 | return __atomic_impl::__compare_exchange(*__ptr, __expected, __desired, |
1073 | true, __success, __failure); |
1074 | } |
1075 | |
1076 | template<typename _Tp> |
1077 | _GLIBCXX_ALWAYS_INLINE bool |
1078 | compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected, |
1079 | _Val<_Tp> __desired, memory_order __success, |
1080 | memory_order __failure) noexcept |
1081 | { |
1082 | return __atomic_impl::__compare_exchange(*__ptr, __expected, __desired, |
1083 | false, __success, __failure); |
1084 | } |
1085 | |
1086 | #if __cpp_lib_atomic_wait |
1087 | template<typename _Tp> |
1088 | _GLIBCXX_ALWAYS_INLINE void |
1089 | wait(const _Tp* __ptr, _Val<_Tp> __old, |
1090 | memory_order __m = memory_order_seq_cst) noexcept |
1091 | { |
1092 | std::__atomic_wait_address_v(__ptr, __old, |
1093 | [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); }); |
1094 | } |
1095 | |
1096 | // TODO add const volatile overload |
1097 | |
1098 | template<typename _Tp> |
1099 | _GLIBCXX_ALWAYS_INLINE void |
1100 | notify_one(const _Tp* __ptr) noexcept |
1101 | { std::__atomic_notify_address(__ptr, false); } |
1102 | |
1103 | // TODO add const volatile overload |
1104 | |
1105 | template<typename _Tp> |
1106 | _GLIBCXX_ALWAYS_INLINE void |
1107 | notify_all(const _Tp* __ptr) noexcept |
1108 | { std::__atomic_notify_address(__ptr, true); } |
1109 | |
1110 | // TODO add const volatile overload |
1111 | #endif // __cpp_lib_atomic_wait |
1112 | |
1113 | template<typename _Tp> |
1114 | _GLIBCXX_ALWAYS_INLINE _Tp |
1115 | fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept |
1116 | { return __atomic_fetch_add(__ptr, __i, int(__m)); } |
1117 | |
1118 | template<typename _Tp> |
1119 | _GLIBCXX_ALWAYS_INLINE _Tp |
1120 | fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept |
1121 | { return __atomic_fetch_sub(__ptr, __i, int(__m)); } |
1122 | |
1123 | template<typename _Tp> |
1124 | _GLIBCXX_ALWAYS_INLINE _Tp |
1125 | fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept |
1126 | { return __atomic_fetch_and(__ptr, __i, int(__m)); } |
1127 | |
1128 | template<typename _Tp> |
1129 | _GLIBCXX_ALWAYS_INLINE _Tp |
1130 | fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept |
1131 | { return __atomic_fetch_or(__ptr, __i, int(__m)); } |
1132 | |
1133 | template<typename _Tp> |
1134 | _GLIBCXX_ALWAYS_INLINE _Tp |
1135 | fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept |
1136 | { return __atomic_fetch_xor(__ptr, __i, int(__m)); } |
1137 | |
1138 | template<typename _Tp> |
1139 | _GLIBCXX_ALWAYS_INLINE _Tp |
1140 | __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept |
1141 | { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } |
1142 | |
1143 | template<typename _Tp> |
1144 | _GLIBCXX_ALWAYS_INLINE _Tp |
1145 | __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept |
1146 | { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } |
1147 | |
1148 | template<typename _Tp> |
1149 | _GLIBCXX_ALWAYS_INLINE _Tp |
1150 | __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept |
1151 | { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } |
1152 | |
1153 | template<typename _Tp> |
1154 | _GLIBCXX_ALWAYS_INLINE _Tp |
1155 | __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept |
1156 | { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } |
1157 | |
1158 | template<typename _Tp> |
1159 | _GLIBCXX_ALWAYS_INLINE _Tp |
1160 | __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept |
1161 | { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } |
1162 | |
1163 | template<typename _Tp> |
1164 | _Tp |
1165 | __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept |
1166 | { |
1167 | _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); |
1168 | _Val<_Tp> __newval = __oldval + __i; |
1169 | while (!compare_exchange_weak(__ptr, __oldval, __newval, __m, |
1170 | memory_order_relaxed)) |
1171 | __newval = __oldval + __i; |
1172 | return __oldval; |
1173 | } |
1174 | |
1175 | template<typename _Tp> |
1176 | _Tp |
1177 | __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept |
1178 | { |
1179 | _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); |
1180 | _Val<_Tp> __newval = __oldval - __i; |
1181 | while (!compare_exchange_weak(__ptr, __oldval, __newval, __m, |
1182 | memory_order_relaxed)) |
1183 | __newval = __oldval - __i; |
1184 | return __oldval; |
1185 | } |
1186 | |
1187 | template<typename _Tp> |
1188 | _Tp |
1189 | __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept |
1190 | { |
1191 | _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); |
1192 | _Val<_Tp> __newval = __oldval + __i; |
1193 | while (!compare_exchange_weak(__ptr, __oldval, __newval, |
1194 | memory_order_seq_cst, |
1195 | memory_order_relaxed)) |
1196 | __newval = __oldval + __i; |
1197 | return __newval; |
1198 | } |
1199 | |
1200 | template<typename _Tp> |
1201 | _Tp |
1202 | __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept |
1203 | { |
1204 | _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); |
1205 | _Val<_Tp> __newval = __oldval - __i; |
1206 | while (!compare_exchange_weak(__ptr, __oldval, __newval, |
1207 | memory_order_seq_cst, |
1208 | memory_order_relaxed)) |
1209 | __newval = __oldval - __i; |
1210 | return __newval; |
1211 | } |
1212 | } // namespace __atomic_impl |
1213 | |
1214 | // base class for atomic<floating-point-type> |
1215 | template<typename _Fp> |
1216 | struct __atomic_float |
1217 | { |
1218 | static_assert(is_floating_point_v<_Fp>); |
1219 | |
1220 | static constexpr size_t _S_alignment = __alignof__(_Fp); |
1221 | |
1222 | public: |
1223 | using value_type = _Fp; |
1224 | using difference_type = value_type; |
1225 | |
1226 | static constexpr bool is_always_lock_free |
1227 | = __atomic_always_lock_free(sizeof(_Fp), 0); |
1228 | |
1229 | __atomic_float() = default; |
1230 | |
1231 | constexpr |
1232 | __atomic_float(_Fp __t) : _M_fp(__t) |
1233 | { } |
1234 | |
1235 | __atomic_float(const __atomic_float&) = delete; |
1236 | __atomic_float& operator=(const __atomic_float&) = delete; |
1237 | __atomic_float& operator=(const __atomic_float&) volatile = delete; |
1238 | |
1239 | _Fp |
1240 | operator=(_Fp __t) volatile noexcept |
1241 | { |
1242 | this->store(__t); |
1243 | return __t; |
1244 | } |
1245 | |
1246 | _Fp |
1247 | operator=(_Fp __t) noexcept |
1248 | { |
1249 | this->store(__t); |
1250 | return __t; |
1251 | } |
1252 | |
1253 | bool |
1254 | is_lock_free() const volatile noexcept |
1255 | { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); } |
1256 | |
1257 | bool |
1258 | is_lock_free() const noexcept |
1259 | { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); } |
1260 | |
1261 | void |
1262 | store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept |
1263 | { __atomic_impl::store(&_M_fp, __t, __m); } |
1264 | |
1265 | void |
1266 | store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept |
1267 | { __atomic_impl::store(&_M_fp, __t, __m); } |
1268 | |
1269 | _Fp |
1270 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
1271 | { return __atomic_impl::load(&_M_fp, __m); } |
1272 | |
1273 | _Fp |
1274 | load(memory_order __m = memory_order_seq_cst) const noexcept |
1275 | { return __atomic_impl::load(&_M_fp, __m); } |
1276 | |
1277 | operator _Fp() const volatile noexcept { return this->load(); } |
1278 | operator _Fp() const noexcept { return this->load(); } |
1279 | |
1280 | _Fp |
1281 | exchange(_Fp __desired, |
1282 | memory_order __m = memory_order_seq_cst) volatile noexcept |
1283 | { return __atomic_impl::exchange(&_M_fp, __desired, __m); } |
1284 | |
1285 | _Fp |
1286 | exchange(_Fp __desired, |
1287 | memory_order __m = memory_order_seq_cst) noexcept |
1288 | { return __atomic_impl::exchange(&_M_fp, __desired, __m); } |
1289 | |
1290 | bool |
1291 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1292 | memory_order __success, |
1293 | memory_order __failure) noexcept |
1294 | { |
1295 | return __atomic_impl::compare_exchange_weak(&_M_fp, |
1296 | __expected, __desired, |
1297 | __success, __failure); |
1298 | } |
1299 | |
1300 | bool |
1301 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1302 | memory_order __success, |
1303 | memory_order __failure) volatile noexcept |
1304 | { |
1305 | return __atomic_impl::compare_exchange_weak(&_M_fp, |
1306 | __expected, __desired, |
1307 | __success, __failure); |
1308 | } |
1309 | |
1310 | bool |
1311 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1312 | memory_order __success, |
1313 | memory_order __failure) noexcept |
1314 | { |
1315 | return __atomic_impl::compare_exchange_strong(&_M_fp, |
1316 | __expected, __desired, |
1317 | __success, __failure); |
1318 | } |
1319 | |
1320 | bool |
1321 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1322 | memory_order __success, |
1323 | memory_order __failure) volatile noexcept |
1324 | { |
1325 | return __atomic_impl::compare_exchange_strong(&_M_fp, |
1326 | __expected, __desired, |
1327 | __success, __failure); |
1328 | } |
1329 | |
1330 | bool |
1331 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1332 | memory_order __order = memory_order_seq_cst) |
1333 | noexcept |
1334 | { |
1335 | return compare_exchange_weak(__expected, __desired, __order, |
1336 | __cmpexch_failure_order(__order)); |
1337 | } |
1338 | |
1339 | bool |
1340 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1341 | memory_order __order = memory_order_seq_cst) |
1342 | volatile noexcept |
1343 | { |
1344 | return compare_exchange_weak(__expected, __desired, __order, |
1345 | __cmpexch_failure_order(__order)); |
1346 | } |
1347 | |
1348 | bool |
1349 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1350 | memory_order __order = memory_order_seq_cst) |
1351 | noexcept |
1352 | { |
1353 | return compare_exchange_strong(__expected, __desired, __order, |
1354 | __cmpexch_failure_order(__order)); |
1355 | } |
1356 | |
1357 | bool |
1358 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1359 | memory_order __order = memory_order_seq_cst) |
1360 | volatile noexcept |
1361 | { |
1362 | return compare_exchange_strong(__expected, __desired, __order, |
1363 | __cmpexch_failure_order(__order)); |
1364 | } |
1365 | |
1366 | #if __cpp_lib_atomic_wait |
1367 | _GLIBCXX_ALWAYS_INLINE void |
1368 | wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept |
1369 | { __atomic_impl::wait(&_M_fp, __old, __m); } |
1370 | |
1371 | // TODO add const volatile overload |
1372 | |
1373 | _GLIBCXX_ALWAYS_INLINE void |
1374 | notify_one() const noexcept |
1375 | { __atomic_impl::notify_one(&_M_fp); } |
1376 | |
1377 | // TODO add const volatile overload |
1378 | |
1379 | _GLIBCXX_ALWAYS_INLINE void |
1380 | notify_all() const noexcept |
1381 | { __atomic_impl::notify_all(&_M_fp); } |
1382 | |
1383 | // TODO add const volatile overload |
1384 | #endif // __cpp_lib_atomic_wait |
1385 | |
1386 | value_type |
1387 | fetch_add(value_type __i, |
1388 | memory_order __m = memory_order_seq_cst) noexcept |
1389 | { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); } |
1390 | |
1391 | value_type |
1392 | fetch_add(value_type __i, |
1393 | memory_order __m = memory_order_seq_cst) volatile noexcept |
1394 | { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); } |
1395 | |
1396 | value_type |
1397 | fetch_sub(value_type __i, |
1398 | memory_order __m = memory_order_seq_cst) noexcept |
1399 | { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); } |
1400 | |
1401 | value_type |
1402 | fetch_sub(value_type __i, |
1403 | memory_order __m = memory_order_seq_cst) volatile noexcept |
1404 | { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); } |
1405 | |
1406 | value_type |
1407 | operator+=(value_type __i) noexcept |
1408 | { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); } |
1409 | |
1410 | value_type |
1411 | operator+=(value_type __i) volatile noexcept |
1412 | { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); } |
1413 | |
1414 | value_type |
1415 | operator-=(value_type __i) noexcept |
1416 | { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); } |
1417 | |
1418 | value_type |
1419 | operator-=(value_type __i) volatile noexcept |
1420 | { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); } |
1421 | |
1422 | private: |
1423 | alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0); |
1424 | }; |
1425 | #undef _GLIBCXX20_INIT |
1426 | |
1427 | template<typename _Tp, |
1428 | bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>> |
1429 | struct __atomic_ref; |
1430 | |
1431 | // base class for non-integral, non-floating-point, non-pointer types |
1432 | template<typename _Tp> |
1433 | struct __atomic_ref<_Tp, false, false> |
1434 | { |
1435 | static_assert(is_trivially_copyable_v<_Tp>); |
1436 | |
1437 | // 1/2/4/8/16-byte types must be aligned to at least their size. |
1438 | static constexpr int _S_min_alignment |
1439 | = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16 |
1440 | ? 0 : sizeof(_Tp); |
1441 | |
1442 | public: |
1443 | using value_type = _Tp; |
1444 | |
1445 | static constexpr bool is_always_lock_free |
1446 | = __atomic_always_lock_free(sizeof(_Tp), 0); |
1447 | |
1448 | static constexpr size_t required_alignment |
1449 | = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp); |
1450 | |
1451 | __atomic_ref& operator=(const __atomic_ref&) = delete; |
1452 | |
1453 | explicit |
1454 | __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t)) |
1455 | { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } |
1456 | |
1457 | __atomic_ref(const __atomic_ref&) noexcept = default; |
1458 | |
1459 | _Tp |
1460 | operator=(_Tp __t) const noexcept |
1461 | { |
1462 | this->store(__t); |
1463 | return __t; |
1464 | } |
1465 | |
1466 | operator _Tp() const noexcept { return this->load(); } |
1467 | |
1468 | bool |
1469 | is_lock_free() const noexcept |
1470 | { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); } |
1471 | |
1472 | void |
1473 | store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept |
1474 | { __atomic_impl::store(_M_ptr, __t, __m); } |
1475 | |
1476 | _Tp |
1477 | load(memory_order __m = memory_order_seq_cst) const noexcept |
1478 | { return __atomic_impl::load(_M_ptr, __m); } |
1479 | |
1480 | _Tp |
1481 | exchange(_Tp __desired, memory_order __m = memory_order_seq_cst) |
1482 | const noexcept |
1483 | { return __atomic_impl::exchange(_M_ptr, __desired, __m); } |
1484 | |
1485 | bool |
1486 | compare_exchange_weak(_Tp& __expected, _Tp __desired, |
1487 | memory_order __success, |
1488 | memory_order __failure) const noexcept |
1489 | { |
1490 | return __atomic_impl::compare_exchange_weak(_M_ptr, |
1491 | __expected, __desired, |
1492 | __success, __failure); |
1493 | } |
1494 | |
1495 | bool |
1496 | compare_exchange_strong(_Tp& __expected, _Tp __desired, |
1497 | memory_order __success, |
1498 | memory_order __failure) const noexcept |
1499 | { |
1500 | return __atomic_impl::compare_exchange_strong(_M_ptr, |
1501 | __expected, __desired, |
1502 | __success, __failure); |
1503 | } |
1504 | |
1505 | bool |
1506 | compare_exchange_weak(_Tp& __expected, _Tp __desired, |
1507 | memory_order __order = memory_order_seq_cst) |
1508 | const noexcept |
1509 | { |
1510 | return compare_exchange_weak(__expected, __desired, __order, |
1511 | __cmpexch_failure_order(__order)); |
1512 | } |
1513 | |
1514 | bool |
1515 | compare_exchange_strong(_Tp& __expected, _Tp __desired, |
1516 | memory_order __order = memory_order_seq_cst) |
1517 | const noexcept |
1518 | { |
1519 | return compare_exchange_strong(__expected, __desired, __order, |
1520 | __cmpexch_failure_order(__order)); |
1521 | } |
1522 | |
1523 | #if __cpp_lib_atomic_wait |
1524 | _GLIBCXX_ALWAYS_INLINE void |
1525 | wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept |
1526 | { __atomic_impl::wait(_M_ptr, __old, __m); } |
1527 | |
1528 | // TODO add const volatile overload |
1529 | |
1530 | _GLIBCXX_ALWAYS_INLINE void |
1531 | notify_one() const noexcept |
1532 | { __atomic_impl::notify_one(_M_ptr); } |
1533 | |
1534 | // TODO add const volatile overload |
1535 | |
1536 | _GLIBCXX_ALWAYS_INLINE void |
1537 | notify_all() const noexcept |
1538 | { __atomic_impl::notify_all(_M_ptr); } |
1539 | |
1540 | // TODO add const volatile overload |
1541 | #endif // __cpp_lib_atomic_wait |
1542 | |
1543 | private: |
1544 | _Tp* _M_ptr; |
1545 | }; |
1546 | |
1547 | // base class for atomic_ref<integral-type> |
1548 | template<typename _Tp> |
1549 | struct __atomic_ref<_Tp, true, false> |
1550 | { |
1551 | static_assert(is_integral_v<_Tp>); |
1552 | |
1553 | public: |
1554 | using value_type = _Tp; |
1555 | using difference_type = value_type; |
1556 | |
1557 | static constexpr bool is_always_lock_free |
1558 | = __atomic_always_lock_free(sizeof(_Tp), 0); |
1559 | |
1560 | static constexpr size_t required_alignment |
1561 | = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp); |
1562 | |
1563 | __atomic_ref() = delete; |
1564 | __atomic_ref& operator=(const __atomic_ref&) = delete; |
1565 | |
1566 | explicit |
1567 | __atomic_ref(_Tp& __t) : _M_ptr(&__t) |
1568 | { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } |
1569 | |
1570 | __atomic_ref(const __atomic_ref&) noexcept = default; |
1571 | |
1572 | _Tp |
1573 | operator=(_Tp __t) const noexcept |
1574 | { |
1575 | this->store(__t); |
1576 | return __t; |
1577 | } |
1578 | |
1579 | operator _Tp() const noexcept { return this->load(); } |
1580 | |
1581 | bool |
1582 | is_lock_free() const noexcept |
1583 | { |
1584 | return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); |
1585 | } |
1586 | |
1587 | void |
1588 | store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept |
1589 | { __atomic_impl::store(_M_ptr, __t, __m); } |
1590 | |
1591 | _Tp |
1592 | load(memory_order __m = memory_order_seq_cst) const noexcept |
1593 | { return __atomic_impl::load(_M_ptr, __m); } |
1594 | |
1595 | _Tp |
1596 | exchange(_Tp __desired, |
1597 | memory_order __m = memory_order_seq_cst) const noexcept |
1598 | { return __atomic_impl::exchange(_M_ptr, __desired, __m); } |
1599 | |
1600 | bool |
1601 | compare_exchange_weak(_Tp& __expected, _Tp __desired, |
1602 | memory_order __success, |
1603 | memory_order __failure) const noexcept |
1604 | { |
1605 | return __atomic_impl::compare_exchange_weak(_M_ptr, |
1606 | __expected, __desired, |
1607 | __success, __failure); |
1608 | } |
1609 | |
1610 | bool |
1611 | compare_exchange_strong(_Tp& __expected, _Tp __desired, |
1612 | memory_order __success, |
1613 | memory_order __failure) const noexcept |
1614 | { |
1615 | return __atomic_impl::compare_exchange_strong(_M_ptr, |
1616 | __expected, __desired, |
1617 | __success, __failure); |
1618 | } |
1619 | |
1620 | bool |
1621 | compare_exchange_weak(_Tp& __expected, _Tp __desired, |
1622 | memory_order __order = memory_order_seq_cst) |
1623 | const noexcept |
1624 | { |
1625 | return compare_exchange_weak(__expected, __desired, __order, |
1626 | __cmpexch_failure_order(__order)); |
1627 | } |
1628 | |
1629 | bool |
1630 | compare_exchange_strong(_Tp& __expected, _Tp __desired, |
1631 | memory_order __order = memory_order_seq_cst) |
1632 | const noexcept |
1633 | { |
1634 | return compare_exchange_strong(__expected, __desired, __order, |
1635 | __cmpexch_failure_order(__order)); |
1636 | } |
1637 | |
1638 | #if __cpp_lib_atomic_wait |
1639 | _GLIBCXX_ALWAYS_INLINE void |
1640 | wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept |
1641 | { __atomic_impl::wait(_M_ptr, __old, __m); } |
1642 | |
1643 | // TODO add const volatile overload |
1644 | |
1645 | _GLIBCXX_ALWAYS_INLINE void |
1646 | notify_one() const noexcept |
1647 | { __atomic_impl::notify_one(_M_ptr); } |
1648 | |
1649 | // TODO add const volatile overload |
1650 | |
1651 | _GLIBCXX_ALWAYS_INLINE void |
1652 | notify_all() const noexcept |
1653 | { __atomic_impl::notify_all(_M_ptr); } |
1654 | |
1655 | // TODO add const volatile overload |
1656 | #endif // __cpp_lib_atomic_wait |
1657 | |
1658 | value_type |
1659 | fetch_add(value_type __i, |
1660 | memory_order __m = memory_order_seq_cst) const noexcept |
1661 | { return __atomic_impl::fetch_add(_M_ptr, __i, __m); } |
1662 | |
1663 | value_type |
1664 | fetch_sub(value_type __i, |
1665 | memory_order __m = memory_order_seq_cst) const noexcept |
1666 | { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); } |
1667 | |
1668 | value_type |
1669 | fetch_and(value_type __i, |
1670 | memory_order __m = memory_order_seq_cst) const noexcept |
1671 | { return __atomic_impl::fetch_and(_M_ptr, __i, __m); } |
1672 | |
1673 | value_type |
1674 | fetch_or(value_type __i, |
1675 | memory_order __m = memory_order_seq_cst) const noexcept |
1676 | { return __atomic_impl::fetch_or(_M_ptr, __i, __m); } |
1677 | |
1678 | value_type |
1679 | fetch_xor(value_type __i, |
1680 | memory_order __m = memory_order_seq_cst) const noexcept |
1681 | { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); } |
1682 | |
1683 | _GLIBCXX_ALWAYS_INLINE value_type |
1684 | operator++(int) const noexcept |
1685 | { return fetch_add(1); } |
1686 | |
1687 | _GLIBCXX_ALWAYS_INLINE value_type |
1688 | operator--(int) const noexcept |
1689 | { return fetch_sub(1); } |
1690 | |
1691 | value_type |
1692 | operator++() const noexcept |
1693 | { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); } |
1694 | |
1695 | value_type |
1696 | operator--() const noexcept |
1697 | { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); } |
1698 | |
1699 | value_type |
1700 | operator+=(value_type __i) const noexcept |
1701 | { return __atomic_impl::__add_fetch(_M_ptr, __i); } |
1702 | |
1703 | value_type |
1704 | operator-=(value_type __i) const noexcept |
1705 | { return __atomic_impl::__sub_fetch(_M_ptr, __i); } |
1706 | |
1707 | value_type |
1708 | operator&=(value_type __i) const noexcept |
1709 | { return __atomic_impl::__and_fetch(_M_ptr, __i); } |
1710 | |
1711 | value_type |
1712 | operator|=(value_type __i) const noexcept |
1713 | { return __atomic_impl::__or_fetch(_M_ptr, __i); } |
1714 | |
1715 | value_type |
1716 | operator^=(value_type __i) const noexcept |
1717 | { return __atomic_impl::__xor_fetch(_M_ptr, __i); } |
1718 | |
1719 | private: |
1720 | _Tp* _M_ptr; |
1721 | }; |
1722 | |
1723 | // base class for atomic_ref<floating-point-type> |
1724 | template<typename _Fp> |
1725 | struct __atomic_ref<_Fp, false, true> |
1726 | { |
1727 | static_assert(is_floating_point_v<_Fp>); |
1728 | |
1729 | public: |
1730 | using value_type = _Fp; |
1731 | using difference_type = value_type; |
1732 | |
1733 | static constexpr bool is_always_lock_free |
1734 | = __atomic_always_lock_free(sizeof(_Fp), 0); |
1735 | |
1736 | static constexpr size_t required_alignment = __alignof__(_Fp); |
1737 | |
1738 | __atomic_ref() = delete; |
1739 | __atomic_ref& operator=(const __atomic_ref&) = delete; |
1740 | |
1741 | explicit |
1742 | __atomic_ref(_Fp& __t) : _M_ptr(&__t) |
1743 | { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } |
1744 | |
1745 | __atomic_ref(const __atomic_ref&) noexcept = default; |
1746 | |
1747 | _Fp |
1748 | operator=(_Fp __t) const noexcept |
1749 | { |
1750 | this->store(__t); |
1751 | return __t; |
1752 | } |
1753 | |
1754 | operator _Fp() const noexcept { return this->load(); } |
1755 | |
1756 | bool |
1757 | is_lock_free() const noexcept |
1758 | { |
1759 | return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>(); |
1760 | } |
1761 | |
1762 | void |
1763 | store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept |
1764 | { __atomic_impl::store(_M_ptr, __t, __m); } |
1765 | |
1766 | _Fp |
1767 | load(memory_order __m = memory_order_seq_cst) const noexcept |
1768 | { return __atomic_impl::load(_M_ptr, __m); } |
1769 | |
1770 | _Fp |
1771 | exchange(_Fp __desired, |
1772 | memory_order __m = memory_order_seq_cst) const noexcept |
1773 | { return __atomic_impl::exchange(_M_ptr, __desired, __m); } |
1774 | |
1775 | bool |
1776 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1777 | memory_order __success, |
1778 | memory_order __failure) const noexcept |
1779 | { |
1780 | return __atomic_impl::compare_exchange_weak(_M_ptr, |
1781 | __expected, __desired, |
1782 | __success, __failure); |
1783 | } |
1784 | |
1785 | bool |
1786 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1787 | memory_order __success, |
1788 | memory_order __failure) const noexcept |
1789 | { |
1790 | return __atomic_impl::compare_exchange_strong(_M_ptr, |
1791 | __expected, __desired, |
1792 | __success, __failure); |
1793 | } |
1794 | |
1795 | bool |
1796 | compare_exchange_weak(_Fp& __expected, _Fp __desired, |
1797 | memory_order __order = memory_order_seq_cst) |
1798 | const noexcept |
1799 | { |
1800 | return compare_exchange_weak(__expected, __desired, __order, |
1801 | __cmpexch_failure_order(__order)); |
1802 | } |
1803 | |
1804 | bool |
1805 | compare_exchange_strong(_Fp& __expected, _Fp __desired, |
1806 | memory_order __order = memory_order_seq_cst) |
1807 | const noexcept |
1808 | { |
1809 | return compare_exchange_strong(__expected, __desired, __order, |
1810 | __cmpexch_failure_order(__order)); |
1811 | } |
1812 | |
1813 | #if __cpp_lib_atomic_wait |
1814 | _GLIBCXX_ALWAYS_INLINE void |
1815 | wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept |
1816 | { __atomic_impl::wait(_M_ptr, __old, __m); } |
1817 | |
1818 | // TODO add const volatile overload |
1819 | |
1820 | _GLIBCXX_ALWAYS_INLINE void |
1821 | notify_one() const noexcept |
1822 | { __atomic_impl::notify_one(_M_ptr); } |
1823 | |
1824 | // TODO add const volatile overload |
1825 | |
1826 | _GLIBCXX_ALWAYS_INLINE void |
1827 | notify_all() const noexcept |
1828 | { __atomic_impl::notify_all(_M_ptr); } |
1829 | |
1830 | // TODO add const volatile overload |
1831 | #endif // __cpp_lib_atomic_wait |
1832 | |
1833 | value_type |
1834 | fetch_add(value_type __i, |
1835 | memory_order __m = memory_order_seq_cst) const noexcept |
1836 | { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); } |
1837 | |
1838 | value_type |
1839 | fetch_sub(value_type __i, |
1840 | memory_order __m = memory_order_seq_cst) const noexcept |
1841 | { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); } |
1842 | |
1843 | value_type |
1844 | operator+=(value_type __i) const noexcept |
1845 | { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); } |
1846 | |
1847 | value_type |
1848 | operator-=(value_type __i) const noexcept |
1849 | { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); } |
1850 | |
1851 | private: |
1852 | _Fp* _M_ptr; |
1853 | }; |
1854 | |
1855 | // base class for atomic_ref<pointer-type> |
1856 | template<typename _Tp> |
1857 | struct __atomic_ref<_Tp*, false, false> |
1858 | { |
1859 | public: |
1860 | using value_type = _Tp*; |
1861 | using difference_type = ptrdiff_t; |
1862 | |
1863 | static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2; |
1864 | |
1865 | static constexpr size_t required_alignment = __alignof__(_Tp*); |
1866 | |
1867 | __atomic_ref() = delete; |
1868 | __atomic_ref& operator=(const __atomic_ref&) = delete; |
1869 | |
1870 | explicit |
1871 | __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t)) |
1872 | { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } |
1873 | |
1874 | __atomic_ref(const __atomic_ref&) noexcept = default; |
1875 | |
1876 | _Tp* |
1877 | operator=(_Tp* __t) const noexcept |
1878 | { |
1879 | this->store(__t); |
1880 | return __t; |
1881 | } |
1882 | |
1883 | operator _Tp*() const noexcept { return this->load(); } |
1884 | |
1885 | bool |
1886 | is_lock_free() const noexcept |
1887 | { |
1888 | return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>(); |
1889 | } |
1890 | |
1891 | void |
1892 | store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept |
1893 | { __atomic_impl::store(_M_ptr, __t, __m); } |
1894 | |
1895 | _Tp* |
1896 | load(memory_order __m = memory_order_seq_cst) const noexcept |
1897 | { return __atomic_impl::load(_M_ptr, __m); } |
1898 | |
1899 | _Tp* |
1900 | exchange(_Tp* __desired, |
1901 | memory_order __m = memory_order_seq_cst) const noexcept |
1902 | { return __atomic_impl::exchange(_M_ptr, __desired, __m); } |
1903 | |
1904 | bool |
1905 | compare_exchange_weak(_Tp*& __expected, _Tp* __desired, |
1906 | memory_order __success, |
1907 | memory_order __failure) const noexcept |
1908 | { |
1909 | return __atomic_impl::compare_exchange_weak(_M_ptr, |
1910 | __expected, __desired, |
1911 | __success, __failure); |
1912 | } |
1913 | |
1914 | bool |
1915 | compare_exchange_strong(_Tp*& __expected, _Tp* __desired, |
1916 | memory_order __success, |
1917 | memory_order __failure) const noexcept |
1918 | { |
1919 | return __atomic_impl::compare_exchange_strong(_M_ptr, |
1920 | __expected, __desired, |
1921 | __success, __failure); |
1922 | } |
1923 | |
1924 | bool |
1925 | compare_exchange_weak(_Tp*& __expected, _Tp* __desired, |
1926 | memory_order __order = memory_order_seq_cst) |
1927 | const noexcept |
1928 | { |
1929 | return compare_exchange_weak(__expected, __desired, __order, |
1930 | __cmpexch_failure_order(__order)); |
1931 | } |
1932 | |
1933 | bool |
1934 | compare_exchange_strong(_Tp*& __expected, _Tp* __desired, |
1935 | memory_order __order = memory_order_seq_cst) |
1936 | const noexcept |
1937 | { |
1938 | return compare_exchange_strong(__expected, __desired, __order, |
1939 | __cmpexch_failure_order(__order)); |
1940 | } |
1941 | |
1942 | #if __cpp_lib_atomic_wait |
1943 | _GLIBCXX_ALWAYS_INLINE void |
1944 | wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept |
1945 | { __atomic_impl::wait(_M_ptr, __old, __m); } |
1946 | |
1947 | // TODO add const volatile overload |
1948 | |
1949 | _GLIBCXX_ALWAYS_INLINE void |
1950 | notify_one() const noexcept |
1951 | { __atomic_impl::notify_one(_M_ptr); } |
1952 | |
1953 | // TODO add const volatile overload |
1954 | |
1955 | _GLIBCXX_ALWAYS_INLINE void |
1956 | notify_all() const noexcept |
1957 | { __atomic_impl::notify_all(_M_ptr); } |
1958 | |
1959 | // TODO add const volatile overload |
1960 | #endif // __cpp_lib_atomic_wait |
1961 | |
1962 | _GLIBCXX_ALWAYS_INLINE value_type |
1963 | fetch_add(difference_type __d, |
1964 | memory_order __m = memory_order_seq_cst) const noexcept |
1965 | { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); } |
1966 | |
1967 | _GLIBCXX_ALWAYS_INLINE value_type |
1968 | fetch_sub(difference_type __d, |
1969 | memory_order __m = memory_order_seq_cst) const noexcept |
1970 | { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); } |
1971 | |
1972 | value_type |
1973 | operator++(int) const noexcept |
1974 | { return fetch_add(1); } |
1975 | |
1976 | value_type |
1977 | operator--(int) const noexcept |
1978 | { return fetch_sub(1); } |
1979 | |
1980 | value_type |
1981 | operator++() const noexcept |
1982 | { |
1983 | return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1)); |
1984 | } |
1985 | |
1986 | value_type |
1987 | operator--() const noexcept |
1988 | { |
1989 | return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1)); |
1990 | } |
1991 | |
1992 | value_type |
1993 | operator+=(difference_type __d) const noexcept |
1994 | { |
1995 | return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d)); |
1996 | } |
1997 | |
1998 | value_type |
1999 | operator-=(difference_type __d) const noexcept |
2000 | { |
2001 | return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d)); |
2002 | } |
2003 | |
2004 | private: |
2005 | static constexpr ptrdiff_t |
2006 | _S_type_size(ptrdiff_t __d) noexcept |
2007 | { |
2008 | static_assert(is_object_v<_Tp>); |
2009 | return __d * sizeof(_Tp); |
2010 | } |
2011 | |
2012 | _Tp** _M_ptr; |
2013 | }; |
2014 | #endif // C++2a |
2015 | |
2016 | /// @endcond |
2017 | |
2018 | /// @} group atomics |
2019 | |
2020 | _GLIBCXX_END_NAMESPACE_VERSION |
2021 | } // namespace std |
2022 | |
2023 | #endif |
2024 | |