Linux r-1tbfree-gecko-e8ip2zsa-2f967-wvg4t 6.12.63-84.121.amzn2023.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Dec 31 02:07:30 UTC 2025 x86_64
PHP/8.5.2 (Development Server)
: | : 10.16.21.217
Cant Read [ /etc/named.conf ]
8.5.2
root
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
usr /
include /
c++ /
14 /
bits /
[ HOME SHELL ]
Name
Size
Permission
Action
algorithmfwd.h
24.05
KB
-rw-r--r--
align.h
3.62
KB
-rw-r--r--
alloc_traits.h
31.4
KB
-rw-r--r--
allocated_ptr.h
3.25
KB
-rw-r--r--
allocator.h
8.61
KB
-rw-r--r--
atomic_base.h
60.9
KB
-rw-r--r--
atomic_futex.h
12.19
KB
-rw-r--r--
atomic_lockfree_defines.h
2.3
KB
-rw-r--r--
atomic_timed_wait.h
13.18
KB
-rw-r--r--
atomic_wait.h
12.33
KB
-rw-r--r--
basic_ios.h
15.85
KB
-rw-r--r--
basic_ios.tcc
5.67
KB
-rw-r--r--
basic_string.h
161.58
KB
-rw-r--r--
basic_string.tcc
30.7
KB
-rw-r--r--
boost_concept_check.h
29.13
KB
-rw-r--r--
c++0x_warning.h
1.44
KB
-rw-r--r--
char_traits.h
28.54
KB
-rw-r--r--
charconv.h
3.58
KB
-rw-r--r--
chrono.h
47.09
KB
-rw-r--r--
chrono_io.h
126.98
KB
-rw-r--r--
codecvt.h
24.9
KB
-rw-r--r--
concept_check.h
3.32
KB
-rw-r--r--
cow_string.h
134.11
KB
-rw-r--r--
cpp_type_traits.h
14.54
KB
-rw-r--r--
cxxabi_forced.h
1.77
KB
-rw-r--r--
cxxabi_init_exception.h
2.17
KB
-rw-r--r--
deque.tcc
41.43
KB
-rw-r--r--
elements_of.h
2.13
KB
-rw-r--r--
enable_special_members.h
12.16
KB
-rw-r--r--
erase_if.h
2.08
KB
-rw-r--r--
exception.h
2.41
KB
-rw-r--r--
exception_defines.h
1.61
KB
-rw-r--r--
exception_ptr.h
8.02
KB
-rw-r--r--
forward_list.h
50
KB
-rw-r--r--
forward_list.tcc
13.71
KB
-rw-r--r--
fs_dir.h
17.94
KB
-rw-r--r--
fs_fwd.h
10.91
KB
-rw-r--r--
fs_ops.h
10.49
KB
-rw-r--r--
fs_path.h
41.47
KB
-rw-r--r--
fstream.tcc
32.64
KB
-rw-r--r--
functexcept.h
4.17
KB
-rw-r--r--
functional_hash.h
8.79
KB
-rw-r--r--
gslice.h
5.52
KB
-rw-r--r--
gslice_array.h
7.71
KB
-rw-r--r--
hash_bytes.h
2.1
KB
-rw-r--r--
hashtable.h
88.15
KB
-rw-r--r--
hashtable_policy.h
64.73
KB
-rw-r--r--
indirect_array.h
7.72
KB
-rw-r--r--
invoke.h
6.08
KB
-rw-r--r--
ios_base.h
32.29
KB
-rw-r--r--
istream.tcc
32.16
KB
-rw-r--r--
iterator_concepts.h
34.38
KB
-rw-r--r--
list.tcc
17.63
KB
-rw-r--r--
locale_classes.h
25.15
KB
-rw-r--r--
locale_classes.tcc
10.76
KB
-rw-r--r--
locale_conv.h
18.74
KB
-rw-r--r--
locale_facets.h
92.12
KB
-rw-r--r--
locale_facets.tcc
40.61
KB
-rw-r--r--
locale_facets_nonio.h
69.1
KB
-rw-r--r--
locale_facets_nonio.tcc
55.86
KB
-rw-r--r--
localefwd.h
5.8
KB
-rw-r--r--
mask_array.h
7.77
KB
-rw-r--r--
max_size_type.h
22
KB
-rw-r--r--
memory_resource.h
15.8
KB
-rw-r--r--
memoryfwd.h
2.5
KB
-rw-r--r--
mofunc_impl.h
7.45
KB
-rw-r--r--
move.h
7.62
KB
-rw-r--r--
move_only_function.h
6.15
KB
-rw-r--r--
nested_exception.h
7.51
KB
-rw-r--r--
new_allocator.h
7.31
KB
-rw-r--r--
node_handle.h
11.1
KB
-rw-r--r--
ostream.tcc
12.02
KB
-rw-r--r--
ostream_insert.h
3.98
KB
-rw-r--r--
out_ptr.h
14.14
KB
-rw-r--r--
parse_numbers.h
7.8
KB
-rw-r--r--
postypes.h
7.33
KB
-rw-r--r--
predefined_ops.h
9.94
KB
-rw-r--r--
ptr_traits.h
8.05
KB
-rw-r--r--
quoted_string.h
5.01
KB
-rw-r--r--
random.h
181.57
KB
-rw-r--r--
random.tcc
103.49
KB
-rw-r--r--
range_access.h
11.66
KB
-rw-r--r--
ranges_algo.h
129.22
KB
-rw-r--r--
ranges_algobase.h
18.63
KB
-rw-r--r--
ranges_base.h
28.99
KB
-rw-r--r--
ranges_cmp.h
5.74
KB
-rw-r--r--
ranges_uninitialized.h
17.71
KB
-rw-r--r--
ranges_util.h
25.84
KB
-rw-r--r--
refwrap.h
15.05
KB
-rw-r--r--
regex.h
104.49
KB
-rw-r--r--
regex.tcc
16.1
KB
-rw-r--r--
regex_automaton.h
10.55
KB
-rw-r--r--
regex_automaton.tcc
7.6
KB
-rw-r--r--
regex_compiler.h
15.92
KB
-rw-r--r--
regex_compiler.tcc
18.48
KB
-rw-r--r--
regex_constants.h
14.77
KB
-rw-r--r--
regex_error.h
5.35
KB
-rw-r--r--
regex_executor.h
8.81
KB
-rw-r--r--
regex_executor.tcc
18.49
KB
-rw-r--r--
regex_scanner.h
6.88
KB
-rw-r--r--
regex_scanner.tcc
14.59
KB
-rw-r--r--
requires_hosted.h
1.36
KB
-rw-r--r--
sat_arith.h
4.64
KB
-rw-r--r--
semaphore_base.h
7.7
KB
-rw-r--r--
shared_ptr.h
38.26
KB
-rw-r--r--
shared_ptr_atomic.h
24.12
KB
-rw-r--r--
shared_ptr_base.h
65.58
KB
-rw-r--r--
slice_array.h
9.39
KB
-rw-r--r--
specfun.h
46.09
KB
-rw-r--r--
sstream.tcc
9.91
KB
-rw-r--r--
std_abs.h
4.63
KB
-rw-r--r--
std_function.h
23.35
KB
-rw-r--r--
std_mutex.h
6.73
KB
-rw-r--r--
std_thread.h
10.02
KB
-rw-r--r--
stl_algo.h
210.46
KB
-rw-r--r--
stl_algobase.h
79.62
KB
-rw-r--r--
stl_bvector.h
41.68
KB
-rw-r--r--
stl_construct.h
8.61
KB
-rw-r--r--
stl_deque.h
76.46
KB
-rw-r--r--
stl_function.h
44.2
KB
-rw-r--r--
stl_heap.h
20.38
KB
-rw-r--r--
stl_iterator.h
93.68
KB
-rw-r--r--
stl_iterator_base_funcs.h
8.75
KB
-rw-r--r--
stl_iterator_base_types.h
9.48
KB
-rw-r--r--
stl_list.h
70.78
KB
-rw-r--r--
stl_map.h
54.84
KB
-rw-r--r--
stl_multimap.h
42.71
KB
-rw-r--r--
stl_multiset.h
36.87
KB
-rw-r--r--
stl_numeric.h
14.25
KB
-rw-r--r--
stl_pair.h
43.42
KB
-rw-r--r--
stl_queue.h
28.04
KB
-rw-r--r--
stl_raw_storage_iter.h
3.91
KB
-rw-r--r--
stl_relops.h
4.47
KB
-rw-r--r--
stl_set.h
37.13
KB
-rw-r--r--
stl_stack.h
13.7
KB
-rw-r--r--
stl_tempbuf.h
10.17
KB
-rw-r--r--
stl_tree.h
72.02
KB
-rw-r--r--
stl_uninitialized.h
35.65
KB
-rw-r--r--
stl_vector.h
69.15
KB
-rw-r--r--
stream_iterator.h
8.2
KB
-rw-r--r--
streambuf.tcc
4.58
KB
-rw-r--r--
streambuf_iterator.h
15.57
KB
-rw-r--r--
string_view.tcc
7
KB
-rw-r--r--
stringfwd.h
2.56
KB
-rw-r--r--
text_encoding-data.h
24.3
KB
-rw-r--r--
this_thread_sleep.h
3.21
KB
-rw-r--r--
unicode-data.h
25.4
KB
-rw-r--r--
unicode.h
29.66
KB
-rw-r--r--
uniform_int_dist.h
13
KB
-rw-r--r--
unique_lock.h
6.32
KB
-rw-r--r--
unique_ptr.h
35.96
KB
-rw-r--r--
unordered_map.h
75.45
KB
-rw-r--r--
unordered_set.h
61.91
KB
-rw-r--r--
uses_allocator.h
6.91
KB
-rw-r--r--
uses_allocator_args.h
8.52
KB
-rw-r--r--
utility.h
8.55
KB
-rw-r--r--
valarray_after.h
22.74
KB
-rw-r--r--
valarray_array.h
20.8
KB
-rw-r--r--
valarray_array.tcc
7.08
KB
-rw-r--r--
valarray_before.h
18.69
KB
-rw-r--r--
vector.tcc
37.12
KB
-rw-r--r--
version.h
79.04
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : shared_ptr_atomic.h
// shared_ptr atomic access -*- C++ -*- // Copyright (C) 2014-2024 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file bits/shared_ptr_atomic.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{memory} */ #ifndef _SHARED_PTR_ATOMIC_H #define _SHARED_PTR_ATOMIC_H 1 #include <bits/atomic_base.h> #include <bits/shared_ptr.h> // Annotations for the custom locking in atomic<shared_ptr<T>>. #if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>) #include <sanitizer/tsan_interface.h> #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \ __tsan_mutex_destroy(X, __tsan_mutex_not_static) #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \ __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock) #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \ __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0) #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \ __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0) #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0) #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0) #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0) #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0) #else #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) #endif namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @addtogroup pointer_abstractions * @relates shared_ptr * @{ */ /// @cond undocumented struct _Sp_locker { _Sp_locker(const _Sp_locker&) = delete; _Sp_locker& operator=(const _Sp_locker&) = delete; #ifdef __GTHREADS explicit _Sp_locker(const void*) noexcept; _Sp_locker(const void*, const void*) noexcept; ~_Sp_locker(); private: unsigned char _M_key1; unsigned char _M_key2; #else explicit _Sp_locker(const void*, const void* = nullptr) { } #endif }; /// @endcond /** * @brief Report whether shared_ptr atomic operations are lock-free. * @param __p A non-null pointer to a shared_ptr object. * @return True if atomic access to @c *__p is lock-free, false otherwise. * @{ */ template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>*) { #ifdef __GTHREADS return __gthread_active_p() == 0; #else return true; #endif } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_is_lock_free(const shared_ptr<_Tp>* __p) { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); } /// @} /** * @brief Atomic load for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @return @c *__p * * The memory order shall not be `memory_order_release` or * `memory_order_acq_rel`. * @{ */ template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline shared_ptr<_Tp> atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order) { _Sp_locker __lock{__p}; return *__p; } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline shared_ptr<_Tp> atomic_load(const shared_ptr<_Tp>* __p) { return std::atomic_load_explicit(__p, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline __shared_ptr<_Tp, _Lp> atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order) { _Sp_locker __lock{__p}; return *__p; } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline __shared_ptr<_Tp, _Lp> atomic_load(const __shared_ptr<_Tp, _Lp>* __p) { return std::atomic_load_explicit(__p, memory_order_seq_cst); } /// @} /** * @brief Atomic store for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __r The value to store. * * The memory order shall not be `memory_order_acquire` or * `memory_order_acq_rel`. * @{ */ template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline void atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); // use swap so that **__p not destroyed while lock held } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline void atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r) { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline void atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); // use swap so that **__p not destroyed while lock held } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline void atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r) { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); } /// @} /** * @brief Atomic exchange for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __r New value to store in `*__p`. * @return The original value of `*__p` * @{ */ template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline shared_ptr<_Tp> atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); return __r; } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline shared_ptr<_Tp> atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r) { return std::atomic_exchange_explicit(__p, std::move(__r), memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline __shared_ptr<_Tp, _Lp> atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); return __r; } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline __shared_ptr<_Tp, _Lp> atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r) { return std::atomic_exchange_explicit(__p, std::move(__r), memory_order_seq_cst); } /// @} /** * @brief Atomic compare-and-swap for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __v A non-null pointer to a shared_ptr object. * @param __w A non-null pointer to a shared_ptr object. * @return True if `*__p` was equivalent to `*__v`, false otherwise. * * The memory order for failure shall not be `memory_order_release` or * `memory_order_acq_rel`. * @{ */ template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") bool atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w, memory_order, memory_order) { shared_ptr<_Tp> __x; // goes out of scope after __lock _Sp_locker __lock{__p, __v}; owner_less<shared_ptr<_Tp>> __less; if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p)) { __x = std::move(*__p); *__p = std::move(__w); return true; } __x = std::move(*__v); *__v = *__p; return false; } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w, memory_order __success, memory_order __failure) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), __success, __failure); } template<typename _Tp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w) { return std::atomic_compare_exchange_weak_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") bool atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w, memory_order, memory_order) { __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock _Sp_locker __lock{__p, __v}; owner_less<__shared_ptr<_Tp, _Lp>> __less; if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p)) { __x = std::move(*__p); *__p = std::move(__w); return true; } __x = std::move(*__v); *__v = *__p; return false; } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w, memory_order __success, memory_order __failure) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), __success, __failure); } template<typename _Tp, _Lock_policy _Lp> _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>") inline bool atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w) { return std::atomic_compare_exchange_weak_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } /// @} /// @} group pointer_abstractions #ifdef __glibcxx_atomic_shared_ptr // C++ >= 20 && HOSTED template<typename _Tp> struct atomic; /** * @addtogroup pointer_abstractions * @relates shared_ptr * @{ */ template<typename _Tp> class _Sp_atomic { using value_type = _Tp; friend struct atomic<_Tp>; // An atomic version of __shared_count<> and __weak_count<>. // Stores a _Sp_counted_base<>* but uses the LSB as a lock. struct _Atomic_count { // Either __shared_count<> or __weak_count<> using __count_type = decltype(_Tp::_M_refcount); // _Sp_counted_base<>* using pointer = decltype(__count_type::_M_pi); // Ensure we can use the LSB as the lock bit. static_assert(alignof(remove_pointer_t<pointer>) > 1); constexpr _Atomic_count() noexcept = default; explicit _Atomic_count(__count_type&& __c) noexcept : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi)) { __c._M_pi = nullptr; } ~_Atomic_count() { auto __val = _M_val.load(memory_order_relaxed); _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val); __glibcxx_assert(!(__val & _S_lock_bit)); if (auto __pi = reinterpret_cast<pointer>(__val)) { if constexpr (__is_shared_ptr<_Tp>) __pi->_M_release(); else __pi->_M_weak_release(); } } _Atomic_count(const _Atomic_count&) = delete; _Atomic_count& operator=(const _Atomic_count&) = delete; // Precondition: Caller does not hold lock! // Returns the raw pointer value without the lock bit set. pointer lock(memory_order __o) const noexcept { // To acquire the lock we flip the LSB from 0 to 1. auto __current = _M_val.load(memory_order_relaxed); while (__current & _S_lock_bit) { #if __glibcxx_atomic_wait __detail::__thread_relax(); #endif __current = _M_val.load(memory_order_relaxed); } _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val); while (!_M_val.compare_exchange_strong(__current, __current | _S_lock_bit, __o, memory_order_relaxed)) { _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val); #if __glibcxx_atomic_wait __detail::__thread_relax(); #endif __current = __current & ~_S_lock_bit; _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val); } _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val); return reinterpret_cast<pointer>(__current); } // Precondition: caller holds lock! void unlock(memory_order __o) const noexcept { _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); _M_val.fetch_sub(1, __o); _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); } // Swaps the values of *this and __c, and unlocks *this. // Precondition: caller holds lock! void _M_swap_unlock(__count_type& __c, memory_order __o) noexcept { if (__o != memory_order_seq_cst) __o = memory_order_release; auto __x = reinterpret_cast<uintptr_t>(__c._M_pi); _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); __x = _M_val.exchange(__x, __o); _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit); } #if __glibcxx_atomic_wait // Precondition: caller holds lock! void _M_wait_unlock(memory_order __o) const noexcept { _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val); auto __v = _M_val.fetch_sub(1, memory_order_relaxed); _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val); _M_val.wait(__v & ~_S_lock_bit, __o); } void notify_one() noexcept { _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val); _M_val.notify_one(); _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val); } void notify_all() noexcept { _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val); _M_val.notify_all(); _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val); } #endif private: mutable __atomic_base<uintptr_t> _M_val{0}; static constexpr uintptr_t _S_lock_bit{1}; }; typename _Tp::element_type* _M_ptr = nullptr; _Atomic_count _M_refcount; static typename _Atomic_count::pointer _S_add_ref(typename _Atomic_count::pointer __p) { if (__p) { if constexpr (__is_shared_ptr<_Tp>) __p->_M_add_ref_copy(); else __p->_M_weak_add_ref(); } return __p; } constexpr _Sp_atomic() noexcept = default; explicit _Sp_atomic(value_type __r) noexcept : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount)) { } ~_Sp_atomic() = default; _Sp_atomic(const _Sp_atomic&) = delete; void operator=(const _Sp_atomic&) = delete; value_type load(memory_order __o) const noexcept { __glibcxx_assert(__o != memory_order_release && __o != memory_order_acq_rel); // Ensure that the correct value of _M_ptr is visible after locking, // by upgrading relaxed or consume to acquire. if (__o != memory_order_seq_cst) __o = memory_order_acquire; value_type __ret; auto __pi = _M_refcount.lock(__o); __ret._M_ptr = _M_ptr; __ret._M_refcount._M_pi = _S_add_ref(__pi); _M_refcount.unlock(memory_order_relaxed); return __ret; } void swap(value_type& __r, memory_order __o) noexcept { _M_refcount.lock(memory_order_acquire); std::swap(_M_ptr, __r._M_ptr); _M_refcount._M_swap_unlock(__r._M_refcount, __o); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { bool __result = true; auto __pi = _M_refcount.lock(memory_order_acquire); if (_M_ptr == __expected._M_ptr && __pi == __expected._M_refcount._M_pi) { _M_ptr = __desired._M_ptr; _M_refcount._M_swap_unlock(__desired._M_refcount, __o); } else { _Tp __sink = std::move(__expected); __expected._M_ptr = _M_ptr; __expected._M_refcount._M_pi = _S_add_ref(__pi); _M_refcount.unlock(__o2); __result = false; } return __result; } #if __glibcxx_atomic_wait void wait(value_type __old, memory_order __o) const noexcept { auto __pi = _M_refcount.lock(memory_order_acquire); if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi) _M_refcount._M_wait_unlock(__o); else _M_refcount.unlock(memory_order_relaxed); } void notify_one() noexcept { _M_refcount.notify_one(); } void notify_all() noexcept { _M_refcount.notify_all(); } #endif }; template<typename _Tp> struct atomic<shared_ptr<_Tp>> { public: using value_type = shared_ptr<_Tp>; static constexpr bool is_always_lock_free = false; bool is_lock_free() const noexcept { return false; } constexpr atomic() noexcept = default; // _GLIBCXX_RESOLVE_LIB_DEFECTS // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work constexpr atomic(nullptr_t) noexcept : atomic() { } atomic(shared_ptr<_Tp> __r) noexcept : _M_impl(std::move(__r)) { } atomic(const atomic&) = delete; void operator=(const atomic&) = delete; shared_ptr<_Tp> load(memory_order __o = memory_order_seq_cst) const noexcept { return _M_impl.load(__o); } operator shared_ptr<_Tp>() const noexcept { return _M_impl.load(memory_order_seq_cst); } void store(shared_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); } void operator=(shared_ptr<_Tp> __desired) noexcept { _M_impl.swap(__desired, memory_order_seq_cst); } // _GLIBCXX_RESOLVE_LIB_DEFECTS // 3893. LWG 3661 broke atomic<shared_ptr<T>> a; a = nullptr; void operator=(nullptr_t) noexcept { store(nullptr); } shared_ptr<_Tp> exchange(shared_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); return __desired; } bool compare_exchange_strong(shared_ptr<_Tp>& __expected, shared_ptr<_Tp> __desired, memory_order __o, memory_order __o2) noexcept { return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { memory_order __o2; switch (__o) { case memory_order_acq_rel: __o2 = memory_order_acquire; break; case memory_order_release: __o2 = memory_order_relaxed; break; default: __o2 = __o; } return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o); } #if __glibcxx_atomic_wait void wait(value_type __old, memory_order __o = memory_order_seq_cst) const noexcept { _M_impl.wait(std::move(__old), __o); } void notify_one() noexcept { _M_impl.notify_one(); } void notify_all() noexcept { _M_impl.notify_all(); } #endif private: _Sp_atomic<shared_ptr<_Tp>> _M_impl; }; template<typename _Tp> struct atomic<weak_ptr<_Tp>> { public: using value_type = weak_ptr<_Tp>; static constexpr bool is_always_lock_free = false; bool is_lock_free() const noexcept { return false; } constexpr atomic() noexcept = default; atomic(weak_ptr<_Tp> __r) noexcept : _M_impl(move(__r)) { } atomic(const atomic&) = delete; void operator=(const atomic&) = delete; weak_ptr<_Tp> load(memory_order __o = memory_order_seq_cst) const noexcept { return _M_impl.load(__o); } operator weak_ptr<_Tp>() const noexcept { return _M_impl.load(memory_order_seq_cst); } void store(weak_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); } void operator=(weak_ptr<_Tp> __desired) noexcept { _M_impl.swap(__desired, memory_order_seq_cst); } weak_ptr<_Tp> exchange(weak_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); return __desired; } bool compare_exchange_strong(weak_ptr<_Tp>& __expected, weak_ptr<_Tp> __desired, memory_order __o, memory_order __o2) noexcept { return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { memory_order __o2; switch (__o) { case memory_order_acq_rel: __o2 = memory_order_acquire; break; case memory_order_release: __o2 = memory_order_relaxed; break; default: __o2 = __o; } return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o); } #if __glibcxx_atomic_wait void wait(value_type __old, memory_order __o = memory_order_seq_cst) const noexcept { _M_impl.wait(std::move(__old), __o); } void notify_one() noexcept { _M_impl.notify_one(); } void notify_all() noexcept { _M_impl.notify_all(); } #endif private: _Sp_atomic<weak_ptr<_Tp>> _M_impl; }; /// @} group pointer_abstractions #endif // C++20 _GLIBCXX_END_NAMESPACE_VERSION } // namespace #endif // _SHARED_PTR_ATOMIC_H
Close