react-native-audio-api 0.11.0-nightly-6ba0571-20251209 → 0.11.0-nightly-c0ffb48-20251211

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,58 +1,58 @@
1
1
  #pragma once
2
2
 
3
+ #include <algorithm>
3
4
  #include <atomic>
4
5
  #include <memory>
5
- #include <algorithm>
6
6
  #include <thread>
7
7
  #include <type_traits>
8
-
8
+ #include <utility>
9
9
 
10
10
  namespace audioapi::channels::spsc {
11
11
 
12
12
  /// @brief Overflow strategy for sender when the channel is full
13
13
  enum class OverflowStrategy {
14
- /// @brief Block and wait for space (default behavior)
15
- WAIT_ON_FULL,
14
+ /// @brief Block and wait for space (default behavior)
15
+ WAIT_ON_FULL,
16
16
 
17
- /// @brief Overwrite the oldest unread element
18
- OVERWRITE_ON_FULL
17
+ /// @brief Overwrite the oldest unread element
18
+ OVERWRITE_ON_FULL
19
19
  };
20
20
 
21
21
  /// @brief Wait strategy for receiver and sender when looping and trying
22
22
  enum class WaitStrategy {
23
- /// @brief Busy loop waiting strategy
24
- /// @note should be used when low latency is required and channel is not expected to wait
25
- /// @note should be definitely used with OverflowStrategy::OVERWRITE_ON_FULL
26
- /// @note it uses `asm volatile ("" ::: "memory")` to prevent harmful compiler optimizations
27
- BUSY_LOOP,
28
-
29
- /// @brief Yielding waiting strategy
30
- /// @note should be used when low latency is not critical and channel is expected to wait
31
- /// @note it uses std::this_thread::yield under the hood
32
- YIELD,
33
-
34
- /// @brief Atomic waiting strategy
35
- /// @note should be used when low latency is required and channel is expected to wait for longer
36
- /// @note it uses std::atomic_wait under the hood
37
- ATOMIC_WAIT,
23
+ /// @brief Busy loop waiting strategy
24
+ /// @note should be used when low latency is required and channel is not expected to wait
25
+ /// @note should be definitely used with OverflowStrategy::OVERWRITE_ON_FULL
26
+ /// @note it uses `asm volatile ("" ::: "memory")` to prevent harmful compiler optimizations
27
+ BUSY_LOOP,
28
+
29
+ /// @brief Yielding waiting strategy
30
+ /// @note should be used when low latency is not critical and channel is expected to wait
31
+ /// @note it uses std::this_thread::yield under the hood
32
+ YIELD,
33
+
34
+ /// @brief Atomic waiting strategy
35
+ /// @note should be used when low latency is required and channel is expected to wait for longer
36
+ /// @note it uses std::atomic_wait under the hood
37
+ ATOMIC_WAIT,
38
38
  };
39
39
 
40
40
  /// @brief Response status for channel operations
41
41
  enum class ResponseStatus {
42
- SUCCESS,
43
- CHANNEL_FULL,
44
- CHANNEL_EMPTY,
42
+ SUCCESS,
43
+ CHANNEL_FULL,
44
+ CHANNEL_EMPTY,
45
45
 
46
- /// @brief Indicates that the last value is being overwritten or read so try fails
47
- /// @note This status is only returned if given channel supports OVERWRITE_ON_FULL strategy
48
- SKIP_DUE_TO_OVERWRITE
46
+ /// @brief Indicates that the last value is being overwritten or read so try fails
47
+ /// @note This status is only returned if given channel supports OVERWRITE_ON_FULL strategy
48
+ SKIP_DUE_TO_OVERWRITE
49
49
  };
50
50
 
51
- template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
51
+ template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
52
52
  class Sender;
53
- template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
53
+ template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
54
54
  class Receiver;
55
- template<typename T, OverflowStrategy Strategy, WaitStrategy Wait>
55
+ template <typename T, OverflowStrategy Strategy, WaitStrategy Wait>
56
56
  class InnerChannel;
57
57
 
58
58
  /// @brief Create a bounded single-producer, single-consumer channel
@@ -61,134 +61,150 @@ class InnerChannel;
61
61
  /// @tparam Strategy The overflow strategy (default: WAIT_ON_FULL)
62
62
  /// @tparam Wait The wait strategy used when looping and trying to send or receive (default: BUSY_LOOP)
63
63
  /// @return A pair of sender and receiver for the channel
64
- template <typename T, OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL, WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
64
+ template <
65
+ typename T,
66
+ OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
67
+ WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
65
68
  std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>> channel(size_t capacity) {
66
- auto channel = std::make_shared<InnerChannel<T, Strategy, Wait>>(capacity);
67
- return { Sender<T, Strategy, Wait>(channel), Receiver<T, Strategy, Wait>(channel) };
69
+ auto channel = std::make_shared<InnerChannel<T, Strategy, Wait>>(capacity);
70
+ return {Sender<T, Strategy, Wait>(channel), Receiver<T, Strategy, Wait>(channel)};
68
71
  }
69
72
 
70
73
  /// @brief Sender for a single-producer, single-consumer channel
71
74
  /// @tparam T The type of values sent through the channel
72
75
  /// @tparam Strategy The overflow strategy used by the channel
73
76
  /// It allows to send values to the channel. It is designed to be used only from one thread at a time.
74
- template <typename T, OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL, WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
77
+ template <
78
+ typename T,
79
+ OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
80
+ WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
75
81
  class Sender {
76
- /// Disallows sender creation outside of channel function
77
- explicit Sender(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
78
- public:
79
- /// @brief Default constructor
80
- /// @note required to have sender as class member
81
- Sender() = default;
82
- Sender(const Sender&) = delete;
83
- Sender& operator=(const Sender&) = delete;
84
-
85
- Sender& operator=(Sender&& other) noexcept {
86
- channel_ = std::move(other.channel_);
87
- return *this;
88
- }
89
- Sender(Sender&& other) noexcept : channel_(std::move(other.channel_)) {}
90
-
91
- /// @brief Try to send a value to the channel
92
- /// @param value The value to send
93
- /// @return ResponseStatus indicating the result of the operation
94
- /// @note this function is lock-free and wait-free
95
- template<typename U>
96
- ResponseStatus try_send(U&& value) noexcept(std::is_nothrow_constructible_v<T, U&&>) {
97
- return channel_->try_send(std::forward<U>(value));
98
- }
99
-
100
- /// @brief Send a value to the channel (copy version)
101
- /// @param value The value to send
102
- /// @note This function is lock-free but may block if the channel is full
103
- void send(const T& value) noexcept(std::is_nothrow_constructible_v<T, const T&>) {
104
- if (channel_->try_send(value) != ResponseStatus::SUCCESS) [[ unlikely ]] {
105
- do {
106
- if constexpr (Wait == WaitStrategy::YIELD) {
107
- std::this_thread::yield(); // Yield to allow other threads to run
108
- } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
109
- asm volatile ("" ::: "memory"); // Busy loop, just spin with compiler barrier
110
- } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
111
- channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
112
- }
113
- } while (channel_->try_send(value) != ResponseStatus::SUCCESS);
114
- }
82
+ /// Disallows sender creation outside of channel function
83
+ explicit Sender(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
84
+
85
+ public:
86
+ /// @brief Default constructor
87
+ /// @note required to have sender as class member
88
+ Sender() = default;
89
+ Sender(const Sender &) = delete;
90
+ Sender &operator=(const Sender &) = delete;
91
+
92
+ Sender &operator=(Sender &&other) noexcept {
93
+ channel_ = std::move(other.channel_);
94
+ return *this;
95
+ }
96
+ Sender(Sender &&other) noexcept : channel_(std::move(other.channel_)) {}
97
+
98
+ /// @brief Try to send a value to the channel
99
+ /// @param value The value to send
100
+ /// @return ResponseStatus indicating the result of the operation
101
+ /// @note this function is lock-free and wait-free
102
+ template <typename U>
103
+ ResponseStatus try_send(U &&value) noexcept(std::is_nothrow_constructible_v<T, U &&>) {
104
+ return channel_->try_send(std::forward<U>(value));
105
+ }
106
+
107
+ /// @brief Send a value to the channel (copy version)
108
+ /// @param value The value to send
109
+ /// @note This function is lock-free but may block if the channel is full
110
+ void send(const T &value) noexcept(std::is_nothrow_constructible_v<T, const T &>) {
111
+ if (channel_->try_send(value) != ResponseStatus::SUCCESS) [[unlikely]] {
112
+ do {
113
+ if constexpr (Wait == WaitStrategy::YIELD) {
114
+ std::this_thread::yield(); // Yield to allow other threads to run
115
+ } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
116
+ asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
117
+ } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
118
+ channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
119
+ }
120
+ } while (channel_->try_send(value) != ResponseStatus::SUCCESS);
115
121
  }
116
-
117
- /// @brief Send a value to the channel (move version)
118
- /// @param value The value to send
119
- /// @note This function is lock-free but may block if the channel is full.
120
- void send(T&& value) noexcept(std::is_nothrow_move_constructible_v<T>) {
121
- if (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS) [[ unlikely ]] {
122
- do {
123
- if constexpr (Wait == WaitStrategy::YIELD) {
124
- std::this_thread::yield(); // Yield to allow other threads to run
125
- } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
126
- asm volatile ("" ::: "memory"); // Busy loop, just spin with compiler barrier
127
- } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
128
- channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
129
- }
130
- } while (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS);
131
- }
122
+ }
123
+
124
+ /// @brief Send a value to the channel (move version)
125
+ /// @param value The value to send
126
+ /// @note This function is lock-free but may block if the channel is full.
127
+ void send(T &&value) noexcept(std::is_nothrow_move_constructible_v<T>) {
128
+ if (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS) [[unlikely]] {
129
+ do {
130
+ if constexpr (Wait == WaitStrategy::YIELD) {
131
+ std::this_thread::yield(); // Yield to allow other threads to run
132
+ } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
133
+ asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
134
+ } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
135
+ channel_->rcvCursor_.wait(channel_->rcvCursorCache_, std::memory_order_acquire);
136
+ }
137
+ } while (channel_->try_send(std::move(value)) != ResponseStatus::SUCCESS);
132
138
  }
139
+ }
133
140
 
134
- private:
135
- std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
141
+ private:
142
+ std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
136
143
 
137
- friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>> channel<T, Strategy, Wait>(size_t capacity);
144
+ friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>>
145
+ channel<T, Strategy, Wait>(size_t capacity);
138
146
  };
139
147
 
140
148
  /// @brief Receiver for a single-producer, single-consumer channel
141
149
  /// @tparam T The type of values sent through the channel
142
150
  /// @tparam Strategy The overflow strategy used by the channel
143
151
  /// It allows to receive values from the channel. It is designed to be used only from one thread at a time.
144
- template <typename T, OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL, WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
152
+ template <
153
+ typename T,
154
+ OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
155
+ WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
145
156
  class Receiver {
146
- /// Disallows receiver creation outside of channel function
147
- explicit Receiver(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
148
- public:
149
- /// @brief Default constructor
150
- /// @note required to have receiver as class member
151
- Receiver() = default;
152
- Receiver(const Receiver&) = delete;
153
- Receiver& operator=(const Receiver&) = delete;
154
-
155
- Receiver& operator=(Receiver&& other) noexcept {
156
- channel_ = std::move(other.channel_);
157
- return *this;
158
- }
159
- Receiver(Receiver&& other) noexcept : channel_(std::move(other.channel_)) {}
160
-
161
- /// @brief Try to receive a value from the channel
162
- /// @param value The received value
163
- /// @return ResponseStatus indicating the result of the operation
164
- /// @note This function is lock-free and wait-free.
165
- ResponseStatus try_receive(T& value) noexcept(std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
166
- return channel_->try_receive(value);
167
- }
168
-
169
- /// @brief Receive a value from the channel
170
- /// @return The received value
171
- /// @note This function is lock-free but may block if the channel is empty.
172
- T receive() noexcept(std::is_nothrow_default_constructible_v<T> && std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
173
- T value;
174
- if (channel_->try_receive(value) != ResponseStatus::SUCCESS) [[ unlikely ]] {
175
- do {
176
- if constexpr (Wait == WaitStrategy::YIELD) {
177
- std::this_thread::yield(); // Yield to allow other threads to run
178
- } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
179
- asm volatile ("" ::: "memory"); // Busy loop, just spin with compiler barrier
180
- } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
181
- channel_->sendCursor_.wait(channel_->sendCursorCache_, std::memory_order_acquire);
182
- }
183
- } while (channel_->try_receive(value) != ResponseStatus::SUCCESS);
157
+ /// Disallows receiver creation outside of channel function
158
+ explicit Receiver(std::shared_ptr<InnerChannel<T, Strategy, Wait>> chan) : channel_(chan) {}
159
+
160
+ public:
161
+ /// @brief Default constructor
162
+ /// @note required to have receiver as class member
163
+ Receiver() = default;
164
+ Receiver(const Receiver &) = delete;
165
+ Receiver &operator=(const Receiver &) = delete;
166
+
167
+ Receiver &operator=(Receiver &&other) noexcept {
168
+ channel_ = std::move(other.channel_);
169
+ return *this;
170
+ }
171
+ Receiver(Receiver &&other) noexcept : channel_(std::move(other.channel_)) {}
172
+
173
+ /// @brief Try to receive a value from the channel
174
+ /// @param value The received value
175
+ /// @return ResponseStatus indicating the result of the operation
176
+ /// @note This function is lock-free and wait-free.
177
+ ResponseStatus try_receive(T &value) noexcept(
178
+ std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
179
+ return channel_->try_receive(value);
180
+ }
181
+
182
+ /// @brief Receive a value from the channel
183
+ /// @return The received value
184
+ /// @note This function is lock-free but may block if the channel is empty.
185
+ T receive() noexcept(
186
+ std::is_nothrow_default_constructible_v<T> && std::is_nothrow_move_assignable_v<T> &&
187
+ std::is_nothrow_destructible_v<T>) {
188
+ T value;
189
+ if (channel_->try_receive(value) != ResponseStatus::SUCCESS) [[unlikely]] {
190
+ do {
191
+ if constexpr (Wait == WaitStrategy::YIELD) {
192
+ std::this_thread::yield(); // Yield to allow other threads to run
193
+ } else if constexpr (Wait == WaitStrategy::BUSY_LOOP) {
194
+ asm volatile("" ::: "memory"); // Busy loop, just spin with compiler barrier
195
+ } else if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
196
+ channel_->sendCursor_.wait(channel_->sendCursorCache_, std::memory_order_acquire);
184
197
  }
185
- return value;
198
+ } while (channel_->try_receive(value) != ResponseStatus::SUCCESS);
186
199
  }
200
+ return value;
201
+ }
187
202
 
188
- private:
189
- std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
203
+ private:
204
+ std::shared_ptr<InnerChannel<T, Strategy, Wait>> channel_;
190
205
 
191
- friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>> channel<T, Strategy, Wait>(size_t capacity);
206
+ friend std::pair<Sender<T, Strategy, Wait>, Receiver<T, Strategy, Wait>>
207
+ channel<T, Strategy, Wait>(size_t capacity);
192
208
  };
193
209
 
194
210
  /// @brief Inner channel implementation for the SPSC queue
@@ -197,210 +213,218 @@ private:
197
213
  /// @tparam Wait The wait strategy used for internal operations
198
214
  /// This class is not intended to be used directly by users.
199
215
  /// @note this class is not thread safe and should be wrapped in std::shared_ptr
200
- template <typename T, OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL, WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
216
+ template <
217
+ typename T,
218
+ OverflowStrategy Strategy = OverflowStrategy::WAIT_ON_FULL,
219
+ WaitStrategy Wait = WaitStrategy::BUSY_LOOP>
201
220
  class InnerChannel {
202
- public:
203
- /// @brief Construct a channel with a given capacity
204
- /// @param capacity The minimum capacity of the channel, for performance it will be allocated with next power of 2
205
- /// Uses raw memory allocation so the T type is not required to provide default constructors
206
- /// alignment is the key for performance it makes sure that objects are properly aligned in memory for faster access
207
- explicit InnerChannel(size_t capacity) :
208
- capacity_(next_power_of_2(capacity)),
221
+ public:
222
+ /// @brief Construct a channel with a given capacity
223
+ /// @param capacity The minimum capacity of the channel, for performance it will be allocated with next power of 2
224
+ /// Uses raw memory allocation so the T type is not required to provide default constructors
225
+ /// alignment is the key for performance it makes sure that objects are properly aligned in memory for faster access
226
+ explicit InnerChannel(size_t capacity)
227
+ : capacity_(next_power_of_2(capacity)),
209
228
  capacity_mask_(capacity_ - 1),
210
- buffer_(static_cast<T*>(operator new[](capacity_ * sizeof(T), std::align_val_t{alignof(T)}))) {
229
+ buffer_(
230
+ static_cast<T *>(operator new[](capacity_ * sizeof(T), std::align_val_t{alignof(T)}))) {
211
231
 
212
- // Initialize cache values for better performance
213
- rcvCursorCache_ = 0;
214
- sendCursorCache_ = 0;
232
+ // Initialize cache values for better performance
233
+ rcvCursorCache_ = 0;
234
+ sendCursorCache_ = 0;
215
235
 
216
- // Initialize reader state for overwrite strategy
217
- if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
218
- oldestOccupied_.store(false, std::memory_order_relaxed);
219
- }
236
+ // Initialize reader state for overwrite strategy
237
+ if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
238
+ oldestOccupied_.store(false, std::memory_order_relaxed);
220
239
  }
221
-
222
- /// This should not be called if there is existing handle to reader or writer
223
- ~InnerChannel() {
224
- size_t sendCursor = sendCursor_.load(std::memory_order_seq_cst);
225
- size_t rcvCursor = rcvCursor_.load(std::memory_order_seq_cst);
226
-
227
- // Call destructors for all elements in the buffer
228
- size_t i = rcvCursor;
229
- while (i != sendCursor) {
230
- buffer_[i].~T();
231
- i = next_index(i);
232
- }
233
-
234
- // Deallocate the buffer
235
- ::operator delete[](
236
- buffer_,
237
- capacity_ * sizeof(T),
238
- std::align_val_t{alignof(T)});
240
+ }
241
+
242
+ /// This should not be called if there is existing handle to reader or writer
243
+ ~InnerChannel() {
244
+ size_t sendCursor = sendCursor_.load(std::memory_order_seq_cst);
245
+ size_t rcvCursor = rcvCursor_.load(std::memory_order_seq_cst);
246
+
247
+ // Call destructors for all elements in the buffer
248
+ size_t i = rcvCursor;
249
+ while (i != sendCursor) {
250
+ buffer_[i].~T();
251
+ i = next_index(i);
239
252
  }
240
253
 
241
- /// @brief Try to send a value to the channel
242
- /// @param value The value to send
243
- /// @return ResponseStatus indicating the result of the operation
244
- /// @note This function is lock-free and wait-free
245
- template<typename U>
246
- ResponseStatus try_send(U&& value) noexcept(std::is_nothrow_constructible_v<T, U&&>) {
247
- if constexpr (Strategy == OverflowStrategy::WAIT_ON_FULL) {
248
- return try_send_wait_on_full(std::forward<U>(value));
249
- } else {
250
- return try_send_overwrite_on_full(std::forward<U>(value));
251
- }
254
+ // Deallocate the buffer
255
+ ::operator delete[](buffer_, capacity_ * sizeof(T), std::align_val_t{alignof(T)});
256
+ }
257
+
258
+ /// @brief Try to send a value to the channel
259
+ /// @param value The value to send
260
+ /// @return ResponseStatus indicating the result of the operation
261
+ /// @note This function is lock-free and wait-free
262
+ template <typename U>
263
+ ResponseStatus try_send(U &&value) noexcept(std::is_nothrow_constructible_v<T, U &&>) {
264
+ if constexpr (Strategy == OverflowStrategy::WAIT_ON_FULL) {
265
+ return try_send_wait_on_full(std::forward<U>(value));
266
+ } else {
267
+ return try_send_overwrite_on_full(std::forward<U>(value));
268
+ }
269
+ }
270
+
271
+ /// @brief Try to receive a value from the channel
272
+ /// @param value The variable to store the received value
273
+ /// @return ResponseStatus indicating the result of the operation
274
+ /// @note This function is lock-free and wait-free
275
+ ResponseStatus try_receive(T &value) noexcept(
276
+ std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
277
+ if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
278
+ // Set reader active flag to prevent overwrites during read
279
+ bool isOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
280
+ if (isOccupied) {
281
+ // It means that the oldest element is being overwritten so we cannot read
282
+ return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
283
+ }
252
284
  }
253
285
 
254
- /// @brief Try to receive a value from the channel
255
- /// @param value The variable to store the received value
256
- /// @return ResponseStatus indicating the result of the operation
257
- /// @note This function is lock-free and wait-free
258
- ResponseStatus try_receive(T& value) noexcept(std::is_nothrow_move_assignable_v<T> && std::is_nothrow_destructible_v<T>) {
259
- if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
260
- // Set reader active flag to prevent overwrites during read
261
- bool isOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
262
- if (isOccupied) {
263
- // It means that the oldest element is being overwritten so we cannot read
264
- return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
265
- }
266
- }
267
-
268
- size_t rcvCursor = rcvCursor_.load(std::memory_order_relaxed); // only receiver thread reads this
269
-
270
- if (rcvCursor == sendCursorCache_) {
271
- // Refresh cache
272
- sendCursorCache_ = sendCursor_.load(std::memory_order_acquire);
273
- if (rcvCursor == sendCursorCache_) {
274
- if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
275
- oldestOccupied_.store(false, std::memory_order_release);
276
- }
286
+ size_t rcvCursor =
287
+ rcvCursor_.load(std::memory_order_relaxed); // only receiver thread reads this
277
288
 
278
- return ResponseStatus::CHANNEL_EMPTY;
279
- }
289
+ if (rcvCursor == sendCursorCache_) {
290
+ // Refresh cache
291
+ sendCursorCache_ = sendCursor_.load(std::memory_order_acquire);
292
+ if (rcvCursor == sendCursorCache_) {
293
+ if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
294
+ oldestOccupied_.store(false, std::memory_order_release);
280
295
  }
281
296
 
282
- value = std::move(buffer_[rcvCursor]);
283
- buffer_[rcvCursor].~T(); // Call destructor
284
-
285
- rcvCursor_.store(next_index(rcvCursor), std::memory_order_release);
297
+ return ResponseStatus::CHANNEL_EMPTY;
298
+ }
299
+ }
286
300
 
287
- if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
288
- rcvCursor_.notify_one(); // Notify sender that a value has been received
289
- }
301
+ value = std::move(buffer_[rcvCursor]);
302
+ buffer_[rcvCursor].~T(); // Call destructor
290
303
 
291
- if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
292
- oldestOccupied_.store(false, std::memory_order_release);
293
- }
304
+ rcvCursor_.store(next_index(rcvCursor), std::memory_order_release);
294
305
 
295
- return ResponseStatus::SUCCESS;
306
+ if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
307
+ rcvCursor_.notify_one(); // Notify sender that a value has been received
296
308
  }
297
309
 
298
- private:
299
- /// @brief Try to send with WAIT_ON_FULL strategy (original behavior)
300
- template<typename U>
301
- inline ResponseStatus try_send_wait_on_full(U&& value) noexcept(std::is_nothrow_constructible_v<T, U&&>) {
302
- size_t sendCursor = sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
303
- size_t next_sendCursor = next_index(sendCursor);
304
-
305
- if (next_sendCursor == rcvCursorCache_) {
306
- // Refresh the cache
307
- rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
308
- if (next_sendCursor == rcvCursorCache_) return ResponseStatus::CHANNEL_FULL;
309
- }
310
+ if constexpr (Strategy == OverflowStrategy::OVERWRITE_ON_FULL) {
311
+ oldestOccupied_.store(false, std::memory_order_release);
312
+ }
310
313
 
311
- // Construct the new element in place
312
- new (&buffer_[sendCursor]) T(std::forward<U>(value));
314
+ return ResponseStatus::SUCCESS;
315
+ }
316
+
317
+ private:
318
+ /// @brief Try to send with WAIT_ON_FULL strategy (original behavior)
319
+ template <typename U>
320
+ inline ResponseStatus try_send_wait_on_full(U &&value) noexcept(
321
+ std::is_nothrow_constructible_v<T, U &&>) {
322
+ size_t sendCursor =
323
+ sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
324
+ size_t next_sendCursor = next_index(sendCursor);
325
+
326
+ if (next_sendCursor == rcvCursorCache_) {
327
+ // Refresh the cache
328
+ rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
329
+ if (next_sendCursor == rcvCursorCache_)
330
+ return ResponseStatus::CHANNEL_FULL;
331
+ }
313
332
 
314
- sendCursor_.store(next_sendCursor, std::memory_order_release);
333
+ // Construct the new element in place
334
+ new (&buffer_[sendCursor]) T(std::forward<U>(value));
315
335
 
316
- if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
317
- sendCursor_.notify_one(); // Notify receiver that a value has been sent
318
- }
336
+ sendCursor_.store(next_sendCursor, std::memory_order_release);
319
337
 
320
- return ResponseStatus::SUCCESS;
338
+ if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
339
+ sendCursor_.notify_one(); // Notify receiver that a value has been sent
321
340
  }
322
341
 
323
- /// @brief Try to send with OVERWRITE_ON_FULL strategy
324
- template<typename U>
325
- inline ResponseStatus try_send_overwrite_on_full(U&& value) noexcept(std::is_nothrow_constructible_v<T, U&&>) {
326
- size_t sendCursor = sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
327
- size_t next_sendCursor = next_index(sendCursor);
328
-
329
- if (next_sendCursor == rcvCursorCache_) {
330
- // Refresh the cache
331
- rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
332
- if (next_sendCursor == rcvCursorCache_) {
333
- bool isOldestOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
334
- if (isOldestOccupied) {
335
- // If the oldest element is occupied, we cannot overwrite
336
- return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
337
- }
338
-
339
- size_t newestRcvCursor = rcvCursor_.load(std::memory_order_acquire);
340
-
341
- /// If the receiver did not advance, we can safely advance the cursor
342
- if (rcvCursorCache_ == newestRcvCursor) {
343
- rcvCursorCache_ = next_index(newestRcvCursor);
344
- rcvCursor_.store(rcvCursorCache_, std::memory_order_release);
345
- } else {
346
- rcvCursorCache_ = newestRcvCursor;
347
- }
348
-
349
- oldestOccupied_.store(false, std::memory_order_release);
350
- }
342
+ return ResponseStatus::SUCCESS;
343
+ }
344
+
345
+ /// @brief Try to send with OVERWRITE_ON_FULL strategy
346
+ template <typename U>
347
+ inline ResponseStatus try_send_overwrite_on_full(U &&value) noexcept(
348
+ std::is_nothrow_constructible_v<T, U &&>) {
349
+ size_t sendCursor =
350
+ sendCursor_.load(std::memory_order_relaxed); // only sender thread writes this
351
+ size_t next_sendCursor = next_index(sendCursor);
352
+
353
+ if (next_sendCursor == rcvCursorCache_) {
354
+ // Refresh the cache
355
+ rcvCursorCache_ = rcvCursor_.load(std::memory_order_acquire);
356
+ if (next_sendCursor == rcvCursorCache_) {
357
+ bool isOldestOccupied = oldestOccupied_.exchange(true, std::memory_order_acq_rel);
358
+ if (isOldestOccupied) {
359
+ // If the oldest element is occupied, we cannot overwrite
360
+ return ResponseStatus::SKIP_DUE_TO_OVERWRITE;
351
361
  }
352
362
 
353
- // Normal case: buffer not full
354
- new (&buffer_[sendCursor]) T(std::forward<U>(value));
355
- sendCursor_.store(next_sendCursor, std::memory_order_release);
363
+ size_t newestRcvCursor = rcvCursor_.load(std::memory_order_acquire);
356
364
 
357
- if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
358
- sendCursor_.notify_one(); // Notify receiver that a value has been sent
365
+ /// If the receiver did not advance, we can safely advance the cursor
366
+ if (rcvCursorCache_ == newestRcvCursor) {
367
+ rcvCursorCache_ = next_index(newestRcvCursor);
368
+ rcvCursor_.store(rcvCursorCache_, std::memory_order_release);
369
+ } else {
370
+ rcvCursorCache_ = newestRcvCursor;
359
371
  }
360
372
 
361
- return ResponseStatus::SUCCESS;
373
+ oldestOccupied_.store(false, std::memory_order_release);
374
+ }
362
375
  }
363
376
 
364
- /// @brief Calculate the next power of 2 greater than or equal to n
365
- /// @param n The input value
366
- /// @return The next power of 2
367
- static constexpr size_t next_power_of_2(const size_t n) noexcept {
368
- if (n <= 1) return 1;
369
-
370
- // Use bit manipulation for efficiency
371
- size_t power = 1;
372
- while (power < n) {
373
- power <<= 1;
374
- }
375
- return power;
376
- }
377
+ // Normal case: buffer not full
378
+ new (&buffer_[sendCursor]) T(std::forward<U>(value));
379
+ sendCursor_.store(next_sendCursor, std::memory_order_release);
377
380
 
378
- /// @brief Get the next index in a circular buffer
379
- /// @param val The current index
380
- /// @return The next index
381
- /// @note it might not be used for performance but it is a good reference
382
- inline size_t next_index(const size_t val) const noexcept {
383
- return (val + 1) & capacity_mask_;
381
+ if constexpr (Wait == WaitStrategy::ATOMIC_WAIT) {
382
+ sendCursor_.notify_one(); // Notify receiver that a value has been sent
384
383
  }
385
384
 
386
- const size_t capacity_;
387
- const size_t capacity_mask_; // mask for bitwise next_index
388
- T* buffer_;
389
-
390
- /// Producer-side data (accessed by sender thread)
391
- alignas(64) std::atomic<size_t> sendCursor_{0};
392
- alignas(64) size_t rcvCursorCache_{0}; // reduces cache coherency
385
+ return ResponseStatus::SUCCESS;
386
+ }
393
387
 
394
- /// Consumer-side data (accessed by receiver thread)
395
- alignas(64) std::atomic<size_t> rcvCursor_{0};
396
- alignas(64) size_t sendCursorCache_{0}; // reduces cache coherency
388
+ /// @brief Calculate the next power of 2 greater than or equal to n
389
+ /// @param n The input value
390
+ /// @return The next power of 2
391
+ static constexpr size_t next_power_of_2(const size_t n) noexcept {
392
+ if (n <= 1)
393
+ return 1;
397
394
 
398
- /// Flag indicating if the oldest element is occupied
399
- alignas(64) std::atomic<bool> oldestOccupied_{false};
400
-
401
- friend class Sender<T, Strategy, Wait>;
402
- friend class Receiver<T, Strategy, Wait>;
395
+ // Use bit manipulation for efficiency
396
+ size_t power = 1;
397
+ while (power < n) {
398
+ power <<= 1;
399
+ }
400
+ return power;
401
+ }
402
+
403
+ /// @brief Get the next index in a circular buffer
404
+ /// @param val The current index
405
+ /// @return The next index
406
+ /// @note it might not be used for performance but it is a good reference
407
+ inline size_t next_index(const size_t val) const noexcept {
408
+ return (val + 1) & capacity_mask_;
409
+ }
410
+
411
+ const size_t capacity_;
412
+ const size_t capacity_mask_; // mask for bitwise next_index
413
+ T *buffer_;
414
+
415
+ /// Producer-side data (accessed by sender thread)
416
+ alignas(64) std::atomic<size_t> sendCursor_{0};
417
+ alignas(64) size_t rcvCursorCache_{0}; // reduces cache coherency
418
+
419
+ /// Consumer-side data (accessed by receiver thread)
420
+ alignas(64) std::atomic<size_t> rcvCursor_{0};
421
+ alignas(64) size_t sendCursorCache_{0}; // reduces cache coherency
422
+
423
+ /// Flag indicating if the oldest element is occupied
424
+ alignas(64) std::atomic<bool> oldestOccupied_{false};
425
+
426
+ friend class Sender<T, Strategy, Wait>;
427
+ friend class Receiver<T, Strategy, Wait>;
403
428
  };
404
429
 
405
-
406
- } // namespace channels::spsc
430
+ } // namespace audioapi::channels::spsc