react-native-audio-api 0.11.0-nightly-6ba0571-20251210 → 0.11.0-nightly-c0ffb48-20251211
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/android/core/utils/AudioDecoder.cpp +0 -13
- package/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +1 -1
- package/common/cpp/audioapi/core/effects/BiquadFilterNode.h +4 -4
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +2 -2
- package/common/cpp/audioapi/core/sources/StreamerNode.cpp +1 -0
- package/common/cpp/audioapi/core/sources/StreamerNode.h +1 -3
- package/common/cpp/audioapi/utils/AlignedAllocator.hpp +19 -16
- package/common/cpp/audioapi/utils/Benchmark.hpp +50 -47
- package/common/cpp/audioapi/utils/CrossThreadEventScheduler.hpp +12 -9
- package/common/cpp/audioapi/utils/MoveOnlyFunction.hpp +17 -15
- package/common/cpp/audioapi/utils/RingBiDirectionalBuffer.hpp +22 -28
- package/common/cpp/audioapi/utils/SpscChannel.hpp +329 -305
- package/common/cpp/audioapi/utils/ThreadPool.hpp +54 -28
- package/common/cpp/test/CMakeLists.txt +2 -8
- package/package.json +2 -2
|
@@ -1,58 +1,58 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
+
#include <algorithm>
|
|
3
4
|
#include <atomic>
|
|
4
5
|
#include <memory>
|
|
5
|
-
#include <algorithm>
|
|
6
6
|
#include <thread>
|
|
7
7
|
#include <type_traits>
|
|
8
|
-
|
|
8
|
+
#include <utility>
|
|
9
9
|
|
|
10
10
|
namespace audioapi::channels::spsc {
|
|
11
11
|
|
|
12
12
|
/// @brief Overflow strategy for sender when the channel is full
|
|
13
13
|
enum class OverflowStrategy {
|
|
14
|
-
|
|
15
|
-
|
|
14
|
+
/// @brief Block and wait for space (default behavior)
|
|
15
|
+
WAIT_ON_FULL,
|
|
16
16
|
|
|
17
|
-
|
|
18
|
-
|
|
17
|
+
/// @brief Overwrite the oldest unread element
|
|
18
|
+
OVERWRITE_ON_FULL
|
|
19
19
|
};
|
|
20
20
|
|
|
21
21
|
/// @brief Wait strategy for receiver and sender when looping and trying
|
|
22
22
|
enum class WaitStrategy {
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
23
|
+
/// @brief Busy loop waiting strategy
|
|
24
|
+
/// @note should be used when low latency is required and channel is not expected to wait
|
|
25
|
+
/// @note should be definitely used with OverflowStrategy::OVERWRITE_ON_FULL
|
|
26
|
+
/// @note it uses `asm volatile ("" ::: "memory")` to prevent harmful compiler optimizations
|
|
27
|
+
BUSY_LOOP,
|
|
28
|
+
|
|
29
|
+
/// @brief Yielding waiting strategy
|
|
30
|
+
/// @note should be used when low latency is not critical and channel is expected to wait
|
|
31
|
+
/// @note it uses std::this_thread::yield under the hood
|
|
32
|
+
YIELD,
|
|
33
|
+
|
|
34
|
+
/// @brief Atomic waiting strategy
|
|
35
|
+
/// @note should be used when low latency is required and channel is expected to wait for longer
|
|
36
|
+
/// @note it uses std::atomic_wait under the hood
|
|
37
|
+
ATOMIC_WAIT,
|
|
38
38
|
};
|
|
39
39
|
|
|
40
40
|
/// @brief Response status for channel operations
|
|
41
41
|
enum class ResponseStatus {
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
42
|
+
SUCCESS,
|
|
43
|
+
CHANNEL_FULL,
|
|
44
|
+
CHANNEL_EMPTY,
|
|
45
45
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
46
|
+
/// @brief Indicates that the last value is being overwritten or read so try fails
|
|
47
|
+
/// @note This status is only returned if given channel supports OVERWRITE_ON_FULL strategy
|
|
48
|
+
SKIP_DUE_TO_OVERWRITE
|
|
49
49
|
};
|
|
50
50
|
|
|
51
|
-
template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
51
|
+
template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
52
52
|
class Sender;
|
|
53
|
-
template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
53
|
+
template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
54
54
|
class Receiver;
|
|
55
|
-
template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
55
|
+
template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
|
|
56
56
|
class InnerChannel;
|
|
57
57
|
|
|
58
58
|
/// @brief Create a bounded single-producer, single-consumer channel
|
|
@@ -61,134 +61,150 @@ class InnerChannel;
|
|
|
61
61
|
/// @tparam Strategy The overflow strategy (default: WAIT_ON_FULL)
|
|
62
62
|
/// @tparam Wait The wait strategy used when looping and trying to send or receive (default: BUSY_LOOP)
|
|
63
63
|
/// @return A pair of sender and receiver for the channel
|
|
64
|
-
template <
|
|
64
|
+
template <
|
|
65
|
+
typename T,
|
|
66
|
+
OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
|
|
67
|
+
WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
|
|
65
68
|
std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>> channel(size_t capacity) {
|
|
66
|
-
|
|
67
|
-
|
|
69
|
+
auto channel = std::make_shared<InnerChannel<T, Strategy, Wait>>(capacity);
|
|
70
|
+
return {Sender<T, Strategy, Wait>(channel), Receiver<T, Strategy, Wait>(channel)};
|
|
68
71
|
}
|
|
69
72
|
|
|
70
73
|
/// @brief Sender for a single-producer, single-consumer channel
|
|
71
74
|
/// @tparam T The type of values sent through the channel
|
|
72
75
|
/// @tparam Strategy The overflow strategy used by the channel
|
|
73
76
|
/// It allows to send values to the channel. It is designed to be used only from one thread at a time.
|
|
74
|
-
template <
|
|
77
|
+
template <
|
|
78
|
+
typename T,
|
|
79
|
+
OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
|
|
80
|
+
WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
|
|
75
81
|
class Sender {
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
}
|
|
114
|
-
}
|
|
82
|
+
/// Disallows sender creation outside of channel function
|
|
83
|
+
explicit Sender(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
|
|
84
|
+
|
|
85
|
+
public:
|
|
86
|
+
/// @brief Default constructor
|
|
87
|
+
/// @note required to have sender as class member
|
|
88
|
+
Sender() = default;
|
|
89
|
+
Sender(const Sender &) = delete;
|
|
90
|
+
Sender &operator=(const Sender &) = delete;
|
|
91
|
+
|
|
92
|
+
Sender &operator=(Sender &&other) noexcept {
|
|
93
|
+
channel_ = std::move(other.channel_);
|
|
94
|
+
return *this;
|
|
95
|
+
}
|
|
96
|
+
Sender(Sender &&other) noexcept : channel_(std::move(other.channel_)) {}
|
|
97
|
+
|
|
98
|
+
/// @brief Try to send a value to the channel
|
|
99
|
+
/// @param value The value to send
|
|
100
|
+
/// @return ResponseStatus indicating the result of the operation
|
|
101
|
+
/// @note this function is lock-free and wait-free
|
|
102
|
+
template <typename U>
|
|
103
|
+
ResponseStatus try_send(U &&value) noexcept(std::is_nothrow_constructible_v<T, U &&>) {
|
|
104
|
+
return channel_->try_send(std::forward<U>(value));
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/// @brief Send a value to the channel (copy version)
|
|
108
|
+
/// @param value The value to send
|
|
109
|
+
/// @note This function is lock-free but may block if the channel is full
|
|
110
|
+
void send(const T &value) noexcept(std::is_nothrow_constructible_v<T, const T &>) {
|
|
111
|
+
if (channel_->try_send(value) != ResponseStatus::SUCCESS) [[unlikely]] {
|
|
112
|
+
do {
|
|
113
|
+
if constexpr (Wait == WaitStrategy::YIELD) {
|
|
114
|
+
std::this_thread::yield(); // Yield to allow other threads to run
|
|
115
|
+
} else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
|
|
116
|
+
asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
|
|
117
|
+
} else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
118
|
+
channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
|
|
119
|
+
}
|
|
120
|
+
} while (channel_->try_send(value) != ResponseStatus::SUCCESS);
|
|
115
121
|
}
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
}
|
|
131
|
-
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/// @brief Send a value to the channel (move version)
|
|
125
|
+
/// @param value The value to send
|
|
126
|
+
/// @note This function is lock-free but may block if the channel is full.
|
|
127
|
+
void send(T &&value) noexcept(std::is_nothrow_move_constructible_v<T>) {
|
|
128
|
+
if (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS) [[unlikely]] {
|
|
129
|
+
do {
|
|
130
|
+
if constexpr (Wait == WaitStrategy::YIELD) {
|
|
131
|
+
std::this_thread::yield(); // Yield to allow other threads to run
|
|
132
|
+
} else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
|
|
133
|
+
asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
|
|
134
|
+
} else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
135
|
+
channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
|
|
136
|
+
}
|
|
137
|
+
} while (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS);
|
|
132
138
|
}
|
|
139
|
+
}
|
|
133
140
|
|
|
134
|
-
private:
|
|
135
|
-
|
|
141
|
+
private:
|
|
142
|
+
std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
|
|
136
143
|
|
|
137
|
-
|
|
144
|
+
friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>>
|
|
145
|
+
channel<T, Strategy, Wait>(size_t capacity);
|
|
138
146
|
};
|
|
139
147
|
|
|
140
148
|
/// @brief Receiver for a single-producer, single-consumer channel
|
|
141
149
|
/// @tparam T The type of values sent through the channel
|
|
142
150
|
/// @tparam Strategy The overflow strategy used by the channel
|
|
143
151
|
/// It allows to receive values from the channel. It is designed to be used only from one thread at a time.
|
|
144
|
-
template <
|
|
152
|
+
template <
|
|
153
|
+
typename T,
|
|
154
|
+
OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
|
|
155
|
+
WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
|
|
145
156
|
class Receiver {
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
157
|
+
/// Disallows receiver creation outside of channel function
|
|
158
|
+
explicit Receiver(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
|
|
159
|
+
|
|
160
|
+
public:
|
|
161
|
+
/// @brief Default constructor
|
|
162
|
+
/// @note required to have receiver as class member
|
|
163
|
+
Receiver() = default;
|
|
164
|
+
Receiver(const Receiver &) = delete;
|
|
165
|
+
Receiver &operator=(const Receiver &) = delete;
|
|
166
|
+
|
|
167
|
+
Receiver &operator=(Receiver &&other) noexcept {
|
|
168
|
+
channel_ = std::move(other.channel_);
|
|
169
|
+
return *this;
|
|
170
|
+
}
|
|
171
|
+
Receiver(Receiver &&other) noexcept : channel_(std::move(other.channel_)) {}
|
|
172
|
+
|
|
173
|
+
/// @brief Try to receive a value from the channel
|
|
174
|
+
/// @param value The received value
|
|
175
|
+
/// @return ResponseStatus indicating the result of the operation
|
|
176
|
+
/// @note This function is lock-free and wait-free.
|
|
177
|
+
ResponseStatus try_receive(T &value) noexcept(
|
|
178
|
+
std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
|
|
179
|
+
return channel_->try_receive(value);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/// @brief Receive a value from the channel
|
|
183
|
+
/// @return The received value
|
|
184
|
+
/// @note This function is lock-free but may block if the channel is empty.
|
|
185
|
+
T receive() noexcept(
|
|
186
|
+
std::is_nothrow_default_constructible_v<T> && std::is_nothrow_move_assignable_v<T> &&
|
|
187
|
+
std::is_nothrow_destructible_v<T>) {
|
|
188
|
+
T value;
|
|
189
|
+
if (channel_->try_receive(value) != ResponseStatus::SUCCESS) [[unlikely]] {
|
|
190
|
+
do {
|
|
191
|
+
if constexpr (Wait == WaitStrategy::YIELD) {
|
|
192
|
+
std::this_thread::yield(); // Yield to allow other threads to run
|
|
193
|
+
} else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
|
|
194
|
+
asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
|
|
195
|
+
} else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
196
|
+
channel_->sendCursor_.wait(channel_->sendCursorCache_, std::memory_order_acquire);
|
|
184
197
|
}
|
|
185
|
-
|
|
198
|
+
} while (channel_->try_receive(value) != ResponseStatus::SUCCESS);
|
|
186
199
|
}
|
|
200
|
+
return value;
|
|
201
|
+
}
|
|
187
202
|
|
|
188
|
-
private:
|
|
189
|
-
|
|
203
|
+
private:
|
|
204
|
+
std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
|
|
190
205
|
|
|
191
|
-
|
|
206
|
+
friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>>
|
|
207
|
+
channel<T, Strategy, Wait>(size_t capacity);
|
|
192
208
|
};
|
|
193
209
|
|
|
194
210
|
/// @brief Inner channel implementation for the SPSC queue
|
|
@@ -197,210 +213,218 @@ private:
|
|
|
197
213
|
/// @tparam Wait The wait strategy used for internal operations
|
|
198
214
|
/// This class is not intended to be used directly by users.
|
|
199
215
|
/// @note this class is not thread safe and should be wrapped in std::shared_ptr
|
|
200
|
-
template <
|
|
216
|
+
template <
|
|
217
|
+
typename T,
|
|
218
|
+
OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
|
|
219
|
+
WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
|
|
201
220
|
class InnerChannel {
|
|
202
|
-
public:
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
221
|
+
public:
|
|
222
|
+
/// @brief Construct a channel with a given capacity
|
|
223
|
+
/// @param capacity The minimum capacity of the channel, for performance it will be allocated with next power of 2
|
|
224
|
+
/// Uses raw memory allocation so the T type is not required to provide default constructors
|
|
225
|
+
/// alignment is the key for performance it makes sure that objects are properly aligned in memory for faster access
|
|
226
|
+
explicit InnerChannel(size_t capacity)
|
|
227
|
+
: capacity_(next_power_of_2(capacity)),
|
|
209
228
|
capacity_mask_(capacity_ - 1),
|
|
210
|
-
buffer_(
|
|
229
|
+
buffer_(
|
|
230
|
+
static_cast<T *>(operator new[](capacity_ * sizeof(T), std::align_val_t{alignof(T)}))) {
|
|
211
231
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
232
|
+
// Initialize cache values for better performance
|
|
233
|
+
rcvCursorCache_ = 0;
|
|
234
|
+
sendCursorCache_ = 0;
|
|
215
235
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
}
|
|
236
|
+
// Initialize reader state for overwrite strategy
|
|
237
|
+
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
238
|
+
oldestOccupied_.store(false, std::memory_order_relaxed);
|
|
220
239
|
}
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
// Deallocate the buffer
|
|
235
|
-
::operator delete[](
|
|
236
|
-
buffer_,
|
|
237
|
-
capacity_ * sizeof(T),
|
|
238
|
-
std::align_val_t{alignof(T)});
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
/// This should not be called if there is existing handle to reader or writer
|
|
243
|
+
~InnerChannel() {
|
|
244
|
+
size_t sendCursor = sendCursor_.load(std::memory_order_seq_cst);
|
|
245
|
+
size_t rcvCursor = rcvCursor_.load(std::memory_order_seq_cst);
|
|
246
|
+
|
|
247
|
+
// Call destructors for all elements in the buffer
|
|
248
|
+
size_t i = rcvCursor;
|
|
249
|
+
while (i != sendCursor) {
|
|
250
|
+
buffer_[i].~T();
|
|
251
|
+
i = next_index(i);
|
|
239
252
|
}
|
|
240
253
|
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
254
|
+
// Deallocate the buffer
|
|
255
|
+
::operator delete[](buffer_, capacity_ * sizeof(T), std::align_val_t{alignof(T)});
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
/// @brief Try to send a value to the channel
|
|
259
|
+
/// @param value The value to send
|
|
260
|
+
/// @return ResponseStatus indicating the result of the operation
|
|
261
|
+
/// @note This function is lock-free and wait-free
|
|
262
|
+
template <typename U>
|
|
263
|
+
ResponseStatus try_send(U &&value) noexcept(std::is_nothrow_constructible_v<T, U &&>) {
|
|
264
|
+
if constexpr (Strategy == OverflowStrategy::WAIT_ON_FULL) {
|
|
265
|
+
return try_send_wait_on_full(std::forward<U>(value));
|
|
266
|
+
} else {
|
|
267
|
+
return try_send_overwrite_on_full(std::forward<U>(value));
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/// @brief Try to receive a value from the channel
|
|
272
|
+
/// @param value The variable to store the received value
|
|
273
|
+
/// @return ResponseStatus indicating the result of the operation
|
|
274
|
+
/// @note This function is lock-free and wait-free
|
|
275
|
+
ResponseStatus try_receive(T &value) noexcept(
|
|
276
|
+
std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
|
|
277
|
+
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
278
|
+
// Set reader active flag to prevent overwrites during read
|
|
279
|
+
bool isOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
|
|
280
|
+
if (isOccupied) {
|
|
281
|
+
// It means that the oldest element is being overwritten so we cannot read
|
|
282
|
+
return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
|
|
283
|
+
}
|
|
252
284
|
}
|
|
253
285
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
/// @return ResponseStatus indicating the result of the operation
|
|
257
|
-
/// @note This function is lock-free and wait-free
|
|
258
|
-
ResponseStatus try_receive(T& value) noexcept(std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
|
|
259
|
-
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
260
|
-
// Set reader active flag to prevent overwrites during read
|
|
261
|
-
bool isOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
|
|
262
|
-
if (isOccupied) {
|
|
263
|
-
// It means that the oldest element is being overwritten so we cannot read
|
|
264
|
-
return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
size_t rcvCursor = rcvCursor_.load(std::memory_order_relaxed); // only receiver thread reads this
|
|
269
|
-
|
|
270
|
-
if (rcvCursor == sendCursorCache_) {
|
|
271
|
-
// Refresh cache
|
|
272
|
-
sendCursorCache_ = sendCursor_.load(std::memory_order_acquire);
|
|
273
|
-
if (rcvCursor == sendCursorCache_) {
|
|
274
|
-
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
275
|
-
oldestOccupied_.store(false, std::memory_order_release);
|
|
276
|
-
}
|
|
286
|
+
size_t rcvCursor =
|
|
287
|
+
rcvCursor_.load(std::memory_order_relaxed); // only receiver thread reads this
|
|
277
288
|
|
|
278
|
-
|
|
279
|
-
|
|
289
|
+
if (rcvCursor == sendCursorCache_) {
|
|
290
|
+
// Refresh cache
|
|
291
|
+
sendCursorCache_ = sendCursor_.load(std::memory_order_acquire);
|
|
292
|
+
if (rcvCursor == sendCursorCache_) {
|
|
293
|
+
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
294
|
+
oldestOccupied_.store(false, std::memory_order_release);
|
|
280
295
|
}
|
|
281
296
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
rcvCursor_.store(next_index(rcvCursor), std::memory_order_release);
|
|
297
|
+
return ResponseStatus::CHANNEL_EMPTY;
|
|
298
|
+
}
|
|
299
|
+
}
|
|
286
300
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
}
|
|
301
|
+
value = std::move(buffer_[rcvCursor]);
|
|
302
|
+
buffer_[rcvCursor].~T(); // Call destructor
|
|
290
303
|
|
|
291
|
-
|
|
292
|
-
oldestOccupied_.store(false, std::memory_order_release);
|
|
293
|
-
}
|
|
304
|
+
rcvCursor_.store(next_index(rcvCursor), std::memory_order_release);
|
|
294
305
|
|
|
295
|
-
|
|
306
|
+
if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
307
|
+
rcvCursor_.notify_one(); // Notify sender that a value has been received
|
|
296
308
|
}
|
|
297
309
|
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
inline ResponseStatus try_send_wait_on_full(U&& value) noexcept(std::is_nothrow_constructible_v<T, U&&>) {
|
|
302
|
-
size_t sendCursor = sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
|
|
303
|
-
size_t next_sendCursor = next_index(sendCursor);
|
|
304
|
-
|
|
305
|
-
if (next_sendCursor == rcvCursorCache_) {
|
|
306
|
-
// Refresh the cache
|
|
307
|
-
rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
|
|
308
|
-
if (next_sendCursor == rcvCursorCache_) return ResponseStatus::CHANNEL_FULL;
|
|
309
|
-
}
|
|
310
|
+
if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
|
|
311
|
+
oldestOccupied_.store(false, std::memory_order_release);
|
|
312
|
+
}
|
|
310
313
|
|
|
311
|
-
|
|
312
|
-
|
|
314
|
+
return ResponseStatus::SUCCESS;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
private:
|
|
318
|
+
/// @brief Try to send with WAIT_ON_FULL strategy (original behavior)
|
|
319
|
+
template <typename U>
|
|
320
|
+
inline ResponseStatus try_send_wait_on_full(U &&value) noexcept(
|
|
321
|
+
std::is_nothrow_constructible_v<T, U &&>) {
|
|
322
|
+
size_t sendCursor =
|
|
323
|
+
sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
|
|
324
|
+
size_t next_sendCursor = next_index(sendCursor);
|
|
325
|
+
|
|
326
|
+
if (next_sendCursor == rcvCursorCache_) {
|
|
327
|
+
// Refresh the cache
|
|
328
|
+
rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
|
|
329
|
+
if (next_sendCursor == rcvCursorCache_)
|
|
330
|
+
return ResponseStatus::CHANNEL_FULL;
|
|
331
|
+
}
|
|
313
332
|
|
|
314
|
-
|
|
333
|
+
// Construct the new element in place
|
|
334
|
+
new (&buffer_[sendCursor]) T(std::forward<U>(value));
|
|
315
335
|
|
|
316
|
-
|
|
317
|
-
sendCursor_.notify_one(); // Notify receiver that a value has been sent
|
|
318
|
-
}
|
|
336
|
+
sendCursor_.store(next_sendCursor, std::memory_order_release);
|
|
319
337
|
|
|
320
|
-
|
|
338
|
+
if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
339
|
+
sendCursor_.notify_one(); // Notify receiver that a value has been sent
|
|
321
340
|
}
|
|
322
341
|
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
if (rcvCursorCache_ == newestRcvCursor) {
|
|
343
|
-
rcvCursorCache_ = next_index(newestRcvCursor);
|
|
344
|
-
rcvCursor_.store(rcvCursorCache_, std::memory_order_release);
|
|
345
|
-
} else {
|
|
346
|
-
rcvCursorCache_ = newestRcvCursor;
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
oldestOccupied_.store(false, std::memory_order_release);
|
|
350
|
-
}
|
|
342
|
+
return ResponseStatus::SUCCESS;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
/// @brief Try to send with OVERWRITE_ON_FULL strategy
|
|
346
|
+
template <typename U>
|
|
347
|
+
inline ResponseStatus try_send_overwrite_on_full(U &&value) noexcept(
|
|
348
|
+
std::is_nothrow_constructible_v<T, U &&>) {
|
|
349
|
+
size_t sendCursor =
|
|
350
|
+
sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
|
|
351
|
+
size_t next_sendCursor = next_index(sendCursor);
|
|
352
|
+
|
|
353
|
+
if (next_sendCursor == rcvCursorCache_) {
|
|
354
|
+
// Refresh the cache
|
|
355
|
+
rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
|
|
356
|
+
if (next_sendCursor == rcvCursorCache_) {
|
|
357
|
+
bool isOldestOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
|
|
358
|
+
if (isOldestOccupied) {
|
|
359
|
+
// If the oldest element is occupied, we cannot overwrite
|
|
360
|
+
return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
|
|
351
361
|
}
|
|
352
362
|
|
|
353
|
-
|
|
354
|
-
new (&buffer_[sendCursor]) T(std::forward<U>(value));
|
|
355
|
-
sendCursor_.store(next_sendCursor, std::memory_order_release);
|
|
363
|
+
size_t newestRcvCursor = rcvCursor_.load(std::memory_order_acquire);
|
|
356
364
|
|
|
357
|
-
|
|
358
|
-
|
|
365
|
+
/// If the receiver did not advance, we can safely advance the cursor
|
|
366
|
+
if (rcvCursorCache_ == newestRcvCursor) {
|
|
367
|
+
rcvCursorCache_ = next_index(newestRcvCursor);
|
|
368
|
+
rcvCursor_.store(rcvCursorCache_, std::memory_order_release);
|
|
369
|
+
} else {
|
|
370
|
+
rcvCursorCache_ = newestRcvCursor;
|
|
359
371
|
}
|
|
360
372
|
|
|
361
|
-
|
|
373
|
+
oldestOccupied_.store(false, std::memory_order_release);
|
|
374
|
+
}
|
|
362
375
|
}
|
|
363
376
|
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
static constexpr size_t next_power_of_2(const size_t n) noexcept {
|
|
368
|
-
if (n <= 1) return 1;
|
|
369
|
-
|
|
370
|
-
// Use bit manipulation for efficiency
|
|
371
|
-
size_t power = 1;
|
|
372
|
-
while (power < n) {
|
|
373
|
-
power <<= 1;
|
|
374
|
-
}
|
|
375
|
-
return power;
|
|
376
|
-
}
|
|
377
|
+
// Normal case: buffer not full
|
|
378
|
+
new (&buffer_[sendCursor]) T(std::forward<U>(value));
|
|
379
|
+
sendCursor_.store(next_sendCursor, std::memory_order_release);
|
|
377
380
|
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
/// @return The next index
|
|
381
|
-
/// @note it might not be used for performance but it is a good reference
|
|
382
|
-
inline size_t next_index(const size_t val) const noexcept {
|
|
383
|
-
return (val + 1) & capacity_mask_;
|
|
381
|
+
if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
|
|
382
|
+
sendCursor_.notify_one(); // Notify receiver that a value has been sent
|
|
384
383
|
}
|
|
385
384
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
T* buffer_;
|
|
389
|
-
|
|
390
|
-
/// Producer-side data (accessed by sender thread)
|
|
391
|
-
alignas(64) std::atomic<size_t> sendCursor_{0};
|
|
392
|
-
alignas(64) size_t rcvCursorCache_{0}; // reduces cache coherency
|
|
385
|
+
return ResponseStatus::SUCCESS;
|
|
386
|
+
}
|
|
393
387
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
388
|
+
/// @brief Calculate the next power of 2 greater than or equal to n
|
|
389
|
+
/// @param n The input value
|
|
390
|
+
/// @return The next power of 2
|
|
391
|
+
static constexpr size_t next_power_of_2(const size_t n) noexcept {
|
|
392
|
+
if (n <= 1)
|
|
393
|
+
return 1;
|
|
397
394
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
395
|
+
// Use bit manipulation for efficiency
|
|
396
|
+
size_t power = 1;
|
|
397
|
+
while (power < n) {
|
|
398
|
+
power <<= 1;
|
|
399
|
+
}
|
|
400
|
+
return power;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
/// @brief Get the next index in a circular buffer
|
|
404
|
+
/// @param val The current index
|
|
405
|
+
/// @return The next index
|
|
406
|
+
/// @note it might not be used for performance but it is a good reference
|
|
407
|
+
inline size_t next_index(const size_t val) const noexcept {
|
|
408
|
+
return (val + 1) & capacity_mask_;
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
const size_t capacity_;
|
|
412
|
+
const size_t capacity_mask_; // mask for bitwise next_index
|
|
413
|
+
T *buffer_;
|
|
414
|
+
|
|
415
|
+
/// Producer-side data (accessed by sender thread)
|
|
416
|
+
alignas(64) std::atomic<size_t> sendCursor_{0};
|
|
417
|
+
alignas(64) size_t rcvCursorCache_{0}; // reduces cache coherency
|
|
418
|
+
|
|
419
|
+
/// Consumer-side data (accessed by receiver thread)
|
|
420
|
+
alignas(64) std::atomic<size_t> rcvCursor_{0};
|
|
421
|
+
alignas(64) size_t sendCursorCache_{0}; // reduces cache coherency
|
|
422
|
+
|
|
423
|
+
/// Flag indicating if the oldest element is occupied
|
|
424
|
+
alignas(64) std::atomic<bool> oldestOccupied_{false};
|
|
425
|
+
|
|
426
|
+
friend class Sender<T, Strategy, Wait>;
|
|
427
|
+
friend class Receiver<T, Strategy, Wait>;
|
|
403
428
|
};
|
|
404
429
|
|
|
405
|
-
|
|
406
|
-
} // namespace channels::spsc
|
|
430
|
+
} // namespace audioapi::channels::spsc
|