-
Notifications
You must be signed in to change notification settings - Fork 4
/
atomic_shared_ptr.hpp
171 lines (126 loc) · 6.77 KB
/
atomic_shared_ptr.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
// A lock-free atomic shared pointer for modern C++. Not fully
// feature complete yet. Currently, support is missing for:
// - atomic_weak_ptr
// - aliased shared pointers
#pragma once
#include <atomic>
#include "details/atomic_details.hpp"
#include "details/hazard_pointers.hpp"
#include "shared_ptr.hpp"
namespace parlay {
// Turn on deamortized reclamation. This substantially improves the worst-case store
// latency by spreading out reclamation over time instead of doing it in bulk, in
// exchange for a slight increase in load latency.
inline void enable_deamortized_reclamation() {
// Experimental feature. Still a work-in-progress!
get_hazard_list<parlay::details::control_block_base>().enable_deamortized_reclamation();
}
template<typename T>
class atomic_shared_ptr {
using shared_ptr_type = shared_ptr<T>;
using control_block_type = details::control_block_base;
public:
constexpr atomic_shared_ptr() noexcept = default;
constexpr explicit(false) atomic_shared_ptr(std::nullptr_t) noexcept // NOLINT(google-explicit-constructor)
: control_block{nullptr} { }
explicit(false) atomic_shared_ptr(shared_ptr_type desired) { // NOLINT(google-explicit-constructor)
auto [ptr_, control_block_] = desired.release_internals();
control_block.store(control_block_, std::memory_order_relaxed);
}
atomic_shared_ptr(const atomic_shared_ptr&) = delete;
atomic_shared_ptr& operator=(const atomic_shared_ptr&) = delete;
~atomic_shared_ptr() { store(nullptr); }
bool is_lock_free() const noexcept {
return control_block.is_lock_free();
}
constexpr static bool is_always_lock_free = std::atomic<control_block_type*>::is_always_lock_free;
[[nodiscard]] shared_ptr_type load([[maybe_unused]] std::memory_order order = std::memory_order_seq_cst) const {
control_block_type* current_control_block = nullptr;
auto& hazptr = get_hazard_list<control_block_type>();
while (true) {
current_control_block = hazptr.protect(control_block);
if (current_control_block == nullptr || current_control_block->increment_strong_count_if_nonzero()) break;
}
return make_shared_from_ctrl_block(current_control_block);
}
void store(shared_ptr_type desired, std::memory_order order = std::memory_order_seq_cst) {
auto [ptr_, control_block_] = desired.release_internals();
auto old_control_block = control_block.exchange(control_block_, order);
if (old_control_block) {
old_control_block->decrement_strong_count();
}
}
shared_ptr_type exchange(shared_ptr_type desired, std::memory_order order = std::memory_order_seq_cst) noexcept {
auto [ptr_, control_block_] = desired.release_internals();
auto old_control_block = control_block.exchange(control_block_, order);
return make_shared_from_ctrl_block(old_control_block);
}
bool compare_exchange_weak(shared_ptr_type& expected, shared_ptr_type&& desired,
std::memory_order success, std::memory_order failure) {
auto expected_ctrl_block = expected.control_block;
auto desired_ctrl_block = desired.control_block;
if (control_block.compare_exchange_weak(expected_ctrl_block, desired_ctrl_block, success, failure)) {
if (expected_ctrl_block) {
expected_ctrl_block->decrement_strong_count();
}
desired.release_internals();
return true;
}
else {
expected = load(); // It's possible that expected ABAs and stays the same on failure, hence
return false; // why this algorithm can not be used to implement compare_exchange_strong
}
}
bool compare_exchange_strong(shared_ptr_type& expected, shared_ptr_type&& desired,
std::memory_order success, std::memory_order failure) {
auto expected_ctrl_block = expected.control_block;
// If expected changes then we have completed the operation (unsuccessfully), we only
// have to loop in case expected ABAs or the weak operation fails spuriously.
do {
if (compare_exchange_weak(expected, std::move(desired), success, failure)) {
return true;
}
} while (expected_ctrl_block == expected.control_block);
return false;
}
bool compare_exchange_weak(shared_ptr_type& expected, const shared_ptr_type& desired,
std::memory_order success, std::memory_order failure) {
// This version is not very efficient and should be avoided. It's just here to provide the complete
// API of atomic<shared_ptr>. The issue with it is that if the compare_exchange fails, the reference
// count of desired is incremented and decremented for no reason. On the other hand, the rvalue
// version doesn't modify the reference count of desired at all.
return compare_exchange_weak(expected, shared_ptr_type{desired}, success, failure);
}
bool compare_exchange_strong(shared_ptr_type& expected, const shared_ptr_type& desired,
std::memory_order success, std::memory_order failure) {
// This version is not very efficient and should be avoided. It's just here to provide the complete
// API of atomic<shared_ptr>. The issue with it is that if the compare_exchange fails, the reference
// count of desired is incremented and decremented for no reason. On the other hand, the rvalue
// version doesn't modify the reference count of desired at all.
return compare_exchange_strong(expected, shared_ptr_type{desired}, success, failure);
}
bool compare_exchange_strong(shared_ptr_type& expected, const shared_ptr_type& desired, std::memory_order order = std::memory_order_seq_cst) {
return compare_exchange_strong(expected, desired, order, details::default_failure_memory_order(order));
}
bool compare_exchange_weak(shared_ptr_type& expected, const shared_ptr_type& desired, std::memory_order order = std::memory_order_seq_cst) {
return compare_exchange_weak(expected, desired, order, details::default_failure_memory_order(order));
}
bool compare_exchange_strong(shared_ptr_type& expected, shared_ptr_type&& desired, std::memory_order order = std::memory_order_seq_cst) {
return compare_exchange_strong(expected, std::move(desired), order, details::default_failure_memory_order(order));
}
bool compare_exchange_weak(shared_ptr_type& expected, shared_ptr_type&& desired, std::memory_order order = std::memory_order_seq_cst) {
return compare_exchange_weak(expected, std::move(desired), order, details::default_failure_memory_order(order));
}
private:
static shared_ptr_type make_shared_from_ctrl_block(control_block_type* control_block_) {
if (control_block_) {
T* ptr = static_cast<T*>(control_block_->get_ptr());
return shared_ptr_type{ptr, control_block_};
}
else {
return shared_ptr_type{nullptr};
}
}
mutable std::atomic<control_block_type*> control_block;
};
};