react-native-audio-api 0.4.16 → 0.4.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,6 +36,7 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
36
36
 
37
37
  ma_uint64 framesDecoded;
38
38
  ma_decoder_read_pcm_frames(&decoder, buffer, totalFrameCount, &framesDecoded);
39
+
39
40
  if (framesDecoded == 0) {
40
41
  __android_log_print(
41
42
  ANDROID_LOG_ERROR,
@@ -73,47 +73,50 @@ class AnalyserNodeHostObject : public AudioNodeHostObject {
73
73
  }
74
74
 
75
75
  JSI_HOST_FUNCTION(getFloatFrequencyData) {
76
- auto destination = args[0].getObject(runtime).asArray(runtime);
77
- auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
78
- auto data = new float[length];
76
+ auto destination = args[0].getObject(runtime).asArray(runtime);
77
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
78
+ auto data = new float[length];
79
79
 
80
- auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
81
- analyserNode->getFloatFrequencyData(data, length);
80
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
81
+ analyserNode->getFloatFrequencyData(data, length);
82
82
 
83
- for (int i = 0; i < length; i++) {
84
- destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
85
- }
83
+ for (int i = 0; i < length; i++) {
84
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
85
+ }
86
86
 
87
+ delete[] data;
87
88
  return jsi::Value::undefined();
88
89
  }
89
90
 
90
91
  JSI_HOST_FUNCTION(getByteFrequencyData) {
91
- auto destination = args[0].getObject(runtime).asArray(runtime);
92
- auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
93
- auto data = new uint8_t[length];
92
+ auto destination = args[0].getObject(runtime).asArray(runtime);
93
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
94
+ auto data = new uint8_t[length];
94
95
 
95
- auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
96
- analyserNode->getByteFrequencyData(data, length);
96
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
97
+ analyserNode->getByteFrequencyData(data, length);
97
98
 
98
- for (int i = 0; i < length; i++) {
99
- destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
100
- }
99
+ for (int i = 0; i < length; i++) {
100
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
101
+ }
101
102
 
103
+ delete[] data;
102
104
  return jsi::Value::undefined();
103
105
  }
104
106
 
105
107
  JSI_HOST_FUNCTION(getFloatTimeDomainData) {
106
- auto destination = args[0].getObject(runtime).asArray(runtime);
107
- auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
108
- auto data = new float[length];
108
+ auto destination = args[0].getObject(runtime).asArray(runtime);
109
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
110
+ auto data = new float[length];
109
111
 
110
- auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
111
- analyserNode->getFloatTimeDomainData(data, length);
112
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
113
+ analyserNode->getFloatTimeDomainData(data, length);
112
114
 
113
115
  for (int i = 0; i < length; i++) {
114
116
  destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
115
117
  }
116
118
 
119
+ delete[] data;
117
120
  return jsi::Value::undefined();
118
121
  }
119
122
 
@@ -129,6 +132,7 @@ class AnalyserNodeHostObject : public AudioNodeHostObject {
129
132
  destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
130
133
  }
131
134
 
135
+ delete[] data;
132
136
  return jsi::Value::undefined();
133
137
  }
134
138
 
@@ -74,6 +74,7 @@ class AudioBufferHostObject : public JsiHostObject {
74
74
  destination.setValueAtIndex(runtime, i, jsi::Value(destinationData[i]));
75
75
  }
76
76
 
77
+ delete[] destinationData;
77
78
  return jsi::Value::undefined();
78
79
  }
79
80
 
@@ -94,6 +95,7 @@ class AudioBufferHostObject : public JsiHostObject {
94
95
  audioBuffer_->copyToChannel(
95
96
  sourceData, sourceLength, channelNumber, startInChannel);
96
97
 
98
+ delete[] sourceData;
97
99
  return jsi::Value::undefined();
98
100
  }
99
101
  };
@@ -2,6 +2,7 @@
2
2
 
3
3
  #include <audioapi/jsi/JsiHostObject.h>
4
4
  #include <audioapi/core/AudioParam.h>
5
+ #include <audioapi/core/utils/AudioArray.h>
5
6
 
6
7
  #include <jsi/jsi.h>
7
8
  #include <memory>
@@ -81,14 +82,18 @@ class AudioParamHostObject : public JsiHostObject {
81
82
  JSI_HOST_FUNCTION(setValueCurveAtTime) {
82
83
  auto values = args[0].getObject(runtime).asArray(runtime);
83
84
  auto length = static_cast<size_t>(values.length(runtime));
84
- auto valuesData = new float[length];
85
+
86
+ auto valuesArray = std::make_shared<AudioArray>(length);
87
+ auto rawData = valuesArray->getData();
88
+
85
89
  for (size_t i = 0; i < values.length(runtime); i++) {
86
- valuesData[i] =
87
- static_cast<float>(values.getValueAtIndex(runtime, i).getNumber());
90
+ rawData[i] = static_cast<float>(values.getValueAtIndex(runtime, i).getNumber());
88
91
  }
92
+
89
93
  double startTime = args[1].getNumber();
90
94
  double duration = args[2].getNumber();
91
- param_->setValueCurveAtTime(valuesData, length, startTime, duration);
95
+ param_->setValueCurveAtTime(valuesArray, length, startTime, duration);
96
+
92
97
  return jsi::Value::undefined();
93
98
  }
94
99
 
@@ -123,6 +123,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
123
123
  realData[i] =
124
124
  static_cast<float>(real.getValueAtIndex(runtime, i).getNumber());
125
125
  }
126
+
126
127
  for (size_t i = 0; i < imag.length(runtime); i++) {
127
128
  realData[i] =
128
129
  static_cast<float>(imag.getValueAtIndex(runtime, i).getNumber());
@@ -132,6 +133,9 @@ class BaseAudioContextHostObject : public JsiHostObject {
132
133
  realData, imagData, disableNormalization, length);
133
134
  auto periodicWaveHostObject =
134
135
  std::make_shared<PeriodicWaveHostObject>(periodicWave);
136
+
137
+ delete[] realData;
138
+ delete[] imagData;
135
139
  return jsi::Object::createFromHostObject(runtime, periodicWaveHostObject);
136
140
  }
137
141
 
@@ -12,7 +12,9 @@ AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
12
12
  }
13
13
 
14
14
  AudioNode::~AudioNode() {
15
- isInitialized_ = false;
15
+ if (isInitialized_) {
16
+ cleanup();
17
+ }
16
18
  }
17
19
 
18
20
  int AudioNode::getNumberOfInputs() const {
@@ -41,8 +43,8 @@ void AudioNode::connect(const std::shared_ptr<AudioNode> &node) {
41
43
  }
42
44
 
43
45
  void AudioNode::disconnect() {
44
- for (auto &outputNode : outputNodes_) {
45
- disconnect(outputNode);
46
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
47
+ disconnect(*it);
46
48
  }
47
49
  }
48
50
 
@@ -56,18 +58,26 @@ bool AudioNode::isEnabled() const {
56
58
  }
57
59
 
58
60
  void AudioNode::enable() {
61
+ if (isEnabled()) {
62
+ return;
63
+ }
64
+
59
65
  isEnabled_ = true;
60
66
 
61
- for (auto &outputNode : outputNodes_) {
62
- outputNode->onInputEnabled();
67
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
68
+ it->get()->onInputEnabled();
63
69
  }
64
70
  }
65
71
 
66
72
  void AudioNode::disable() {
73
+ if (!isEnabled()) {
74
+ return;
75
+ }
76
+
67
77
  isEnabled_ = false;
68
78
 
69
- for (auto &outputNode : outputNodes_) {
70
- outputNode->onInputDisabled();
79
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
80
+ it->get()->onInputDisabled();
71
81
  }
72
82
  }
73
83
 
@@ -96,7 +106,7 @@ std::string AudioNode::toString(ChannelInterpretation interpretation) {
96
106
  }
97
107
 
98
108
  std::shared_ptr<AudioBus> AudioNode::processAudio(
99
- std::shared_ptr<AudioBus> outputBus,
109
+ const std::shared_ptr<AudioBus> &outputBus,
100
110
  int framesToProcess,
101
111
  bool checkIsAlreadyProcessed) {
102
112
  if (!isInitialized_) {
@@ -148,7 +158,9 @@ std::shared_ptr<AudioBus> AudioNode::processInputs(
148
158
  processingBus->zero();
149
159
 
150
160
  int maxNumberOfChannels = 0;
151
- for (auto inputNode : inputNodes_) {
161
+
162
+ for (auto it = inputNodes_.begin(); it != inputNodes_.end(); ++it) {
163
+ auto inputNode = *it;
152
164
  assert(inputNode != nullptr);
153
165
 
154
166
  if (!inputNode->isEnabled()) {
@@ -169,7 +181,7 @@ std::shared_ptr<AudioBus> AudioNode::processInputs(
169
181
  }
170
182
 
171
183
  std::shared_ptr<AudioBus> AudioNode::applyChannelCountMode(
172
- std::shared_ptr<AudioBus> processingBus) {
184
+ std::shared_ptr<AudioBus> &processingBus) {
173
185
  // If the channelCountMode is EXPLICIT, the node should output the number of
174
186
  // channels specified by the channelCount.
175
187
  if (channelCountMode_ == ChannelCountMode::EXPLICIT) {
@@ -189,21 +201,35 @@ std::shared_ptr<AudioBus> AudioNode::applyChannelCountMode(
189
201
  void AudioNode::mixInputsBuses(const std::shared_ptr<AudioBus> &processingBus) {
190
202
  assert(processingBus != nullptr);
191
203
 
192
- for (const auto &inputBus : inputBuses_) {
193
- processingBus->sum(inputBus.get(), channelInterpretation_);
204
+ for (auto it = inputBuses_.begin(); it != inputBuses_.end(); ++it) {
205
+ processingBus->sum(it->get(), channelInterpretation_);
194
206
  }
195
207
 
196
208
  inputBuses_.clear();
197
209
  }
198
210
 
199
211
  void AudioNode::connectNode(const std::shared_ptr<AudioNode> &node) {
200
- outputNodes_.insert(node);
201
- node->onInputConnected(this);
212
+ auto position = std::find_if(
213
+ outputNodes_.begin(), outputNodes_.end(), [&node](auto const &nodeSP) {
214
+ return nodeSP.get() == node.get();
215
+ });
216
+
217
+ if (position == outputNodes_.end()) {
218
+ outputNodes_.emplace_back(node);
219
+ node->onInputConnected(this);
220
+ }
202
221
  }
203
222
 
204
223
  void AudioNode::disconnectNode(const std::shared_ptr<AudioNode> &node) {
205
- outputNodes_.erase(node);
206
- node->onInputDisconnected(this);
224
+ auto position = std::find_if(
225
+ outputNodes_.begin(), outputNodes_.end(), [&node](auto const &nodeSP) {
226
+ return nodeSP.get() == node.get();
227
+ });
228
+
229
+ if (position != outputNodes_.end()) {
230
+ node->onInputDisconnected(this);
231
+ outputNodes_.erase(position);
232
+ }
207
233
  }
208
234
 
209
235
  void AudioNode::onInputEnabled() {
@@ -227,7 +253,7 @@ void AudioNode::onInputConnected(AudioNode *node) {
227
253
  return;
228
254
  }
229
255
 
230
- inputNodes_.insert(node);
256
+ inputNodes_.push_back(node);
231
257
 
232
258
  if (node->isEnabled()) {
233
259
  onInputEnabled();
@@ -239,26 +265,27 @@ void AudioNode::onInputDisconnected(AudioNode *node) {
239
265
  return;
240
266
  }
241
267
 
242
- inputNodes_.erase(node);
268
+ if (node->isEnabled()) {
269
+ onInputDisabled();
270
+ }
271
+
272
+ auto position = std::find(inputNodes_.begin(), inputNodes_.end(), node);
243
273
 
244
- if (isEnabled()) {
245
- node->onInputDisabled();
274
+ if (position != inputNodes_.end()) {
275
+ inputNodes_.erase(position);
276
+ } else {
277
+ assert(false);
246
278
  }
247
279
  }
248
280
 
249
281
  void AudioNode::cleanup() {
250
282
  isInitialized_ = false;
251
283
 
252
- for (const auto &outputNode : outputNodes_) {
253
- outputNode->onInputDisconnected(this);
254
- }
255
-
256
- for (const auto &inputNode : inputNodes_) {
257
- inputNode->disconnectNode(shared_from_this());
284
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
285
+ it->get()->onInputDisconnected(this);
258
286
  }
259
287
 
260
288
  outputNodes_.clear();
261
- inputNodes_.clear();
262
289
  }
263
290
 
264
291
  } // namespace audioapi
@@ -6,7 +6,6 @@
6
6
 
7
7
  #include <memory>
8
8
  #include <string>
9
- #include <unordered_set>
10
9
  #include <cstddef>
11
10
  #include <vector>
12
11
  #include <cassert>
@@ -48,8 +47,8 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
48
47
  ChannelInterpretation channelInterpretation_ =
49
48
  ChannelInterpretation::SPEAKERS;
50
49
 
51
- std::unordered_set<AudioNode *> inputNodes_ = {};
52
- std::unordered_set<std::shared_ptr<AudioNode>> outputNodes_ = {};
50
+ std::vector<AudioNode *> inputNodes_ = {};
51
+ std::vector<std::shared_ptr<AudioNode>> outputNodes_ = {};
53
52
 
54
53
  int numberOfEnabledInputNodes_ = 0;
55
54
  bool isInitialized_ = false;
@@ -65,13 +64,13 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
65
64
  static std::string toString(ChannelCountMode mode);
66
65
  static std::string toString(ChannelInterpretation interpretation);
67
66
 
68
- virtual std::shared_ptr<AudioBus> processAudio(std::shared_ptr<AudioBus> outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
67
+ virtual std::shared_ptr<AudioBus> processAudio(const std::shared_ptr<AudioBus> &outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
69
68
  virtual void processNode(const std::shared_ptr<AudioBus>&, int) = 0;
70
69
 
71
70
  bool isAlreadyProcessed();
72
- std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus>& outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
73
- std::shared_ptr<AudioBus> applyChannelCountMode(std::shared_ptr<AudioBus> processingBus);
74
- void mixInputsBuses(const std::shared_ptr<AudioBus>& processingBus);
71
+ std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus> &outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
72
+ std::shared_ptr<AudioBus> applyChannelCountMode(std::shared_ptr<AudioBus> &processingBus);
73
+ void mixInputsBuses(const std::shared_ptr<AudioBus> &processingBus);
75
74
 
76
75
  void connectNode(const std::shared_ptr<AudioNode> &node);
77
76
  void disconnectNode(const std::shared_ptr<AudioNode> &node);
@@ -1,5 +1,6 @@
1
1
  #include <audioapi/core/AudioParam.h>
2
2
  #include <audioapi/core/BaseAudioContext.h>
3
+ #include <audioapi/core/utils/AudioArray.h>
3
4
  #include <audioapi/dsp/AudioUtils.h>
4
5
 
5
6
  namespace audioapi {
@@ -179,7 +180,7 @@ void AudioParam::setTargetAtTime(
179
180
  }
180
181
 
181
182
  void AudioParam::setValueCurveAtTime(
182
- const float *values,
183
+ const std::shared_ptr<AudioArray> &values,
183
184
  size_t length,
184
185
  double startTime,
185
186
  double duration) {
@@ -206,7 +207,7 @@ void AudioParam::setValueCurveAtTime(
206
207
  (time - startTime) * static_cast<double>(length - 1) /
207
208
  (endTime - startTime));
208
209
 
209
- return AudioUtils::linearInterpolate(values, k, k + 1, factor);
210
+ return AudioUtils::linearInterpolate(values->getData(), k, k + 1, factor);
210
211
  }
211
212
 
212
213
  return endValue;
@@ -216,7 +217,7 @@ void AudioParam::setValueCurveAtTime(
216
217
  startTime,
217
218
  startTime + duration,
218
219
  getQueueEndValue(),
219
- values[length - 1],
220
+ values->operator[](values->getSize() - 1),
220
221
  calculateValue,
221
222
  ParamChangeEventType::SET_VALUE_CURVE);
222
223
  updateQueue(event);
@@ -10,6 +10,8 @@
10
10
 
11
11
  namespace audioapi {
12
12
 
13
+ class AudioArray;
14
+
13
15
  class AudioParam {
14
16
  public:
15
17
  explicit AudioParam(float defaultValue, float minValue, float maxValue);
@@ -27,7 +29,7 @@ class AudioParam {
27
29
  void exponentialRampToValueAtTime(float value, double endTime);
28
30
  void setTargetAtTime(float target, double startTime, double timeConstant);
29
31
  void setValueCurveAtTime(
30
- const float *values,
32
+ const std::shared_ptr<AudioArray> &values,
31
33
  size_t length,
32
34
  double startTime,
33
35
  double duration);
@@ -67,6 +67,15 @@ PeriodicWave::PeriodicWave(
67
67
  createBandLimitedTables(real, imaginary, length);
68
68
  }
69
69
 
70
+ PeriodicWave::~PeriodicWave() {
71
+ for (int i = 0; i < numberOfRanges_; i++) {
72
+ delete[] bandLimitedTables_[i];
73
+ }
74
+
75
+ delete[] bandLimitedTables_;
76
+ bandLimitedTables_ = nullptr;
77
+ }
78
+
70
79
  int PeriodicWave::getPeriodicWaveSize() const {
71
80
  if (sampleRate_ <= 24000) {
72
81
  return 2048;
@@ -185,6 +194,9 @@ void PeriodicWave::generateBasicWaveForm(OscillatorType type) {
185
194
  }
186
195
 
187
196
  createBandLimitedTables(real, imaginary, halfSize);
197
+
198
+ delete[] real;
199
+ delete[] imaginary;
188
200
  }
189
201
 
190
202
  void PeriodicWave::createBandLimitedTables(
@@ -368,4 +380,5 @@ float PeriodicWave::doInterpolation(
368
380
  return (1 - waveTableInterpolationFactor) * higherWaveDataSample +
369
381
  waveTableInterpolationFactor * lowerWaveDataSample;
370
382
  }
383
+
371
384
  } // namespace audioapi
@@ -47,6 +47,7 @@ class PeriodicWave {
47
47
  float *imaginary,
48
48
  int length,
49
49
  bool disableNormalization);
50
+ ~PeriodicWave();
50
51
 
51
52
  [[nodiscard]] int getPeriodicWaveSize() const;
52
53
  [[nodiscard]] float getScale() const;
@@ -42,7 +42,7 @@ void StretcherNode::processNode(
42
42
  }
43
43
 
44
44
  std::shared_ptr<AudioBus> StretcherNode::processAudio(
45
- std::shared_ptr<AudioBus> outputBus,
45
+ const std::shared_ptr<AudioBus> &outputBus,
46
46
  int framesToProcess,
47
47
  bool checkIsAlreadyProcessed) {
48
48
  if (!isInitialized_) {
@@ -20,7 +20,7 @@ class StretcherNode : public AudioNode {
20
20
 
21
21
  protected:
22
22
  void processNode(const std::shared_ptr<AudioBus>& processingBus, int framesToProcess) override;
23
- std::shared_ptr<AudioBus> processAudio(std::shared_ptr<AudioBus> outputBus, int framesToProcess, bool checkIsAlreadyProcessed) override;
23
+ std::shared_ptr<AudioBus> processAudio(const std::shared_ptr<AudioBus> &outputBus, int framesToProcess, bool checkIsAlreadyProcessed) override;
24
24
 
25
25
  private:
26
26
  // k-rate params
@@ -122,38 +122,38 @@ std::mutex &AudioBufferSourceNode::getBufferLock() {
122
122
  void AudioBufferSourceNode::processNode(
123
123
  const std::shared_ptr<AudioBus> &processingBus,
124
124
  int framesToProcess) {
125
- if (!Locker::tryLock(getBufferLock())) {
126
- processingBus->zero();
127
- return;
128
- }
125
+ if (auto locker = Locker::tryLock(getBufferLock())) {
126
+ // No audio data to fill, zero the output and return.
127
+ if (!buffer_) {
128
+ processingBus->zero();
129
+ return;
130
+ }
129
131
 
130
- // No audio data to fill, zero the output and return.
131
- if (!buffer_) {
132
- processingBus->zero();
133
- return;
134
- }
132
+ size_t startOffset = 0;
133
+ size_t offsetLength = 0;
135
134
 
136
- size_t startOffset = 0;
137
- size_t offsetLength = 0;
135
+ updatePlaybackInfo(
136
+ processingBus, framesToProcess, startOffset, offsetLength);
137
+ float playbackRate = getPlaybackRateValue(startOffset);
138
138
 
139
- updatePlaybackInfo(processingBus, framesToProcess, startOffset, offsetLength);
140
- float playbackRate = getPlaybackRateValue(startOffset);
139
+ assert(alignedBus_ != nullptr);
140
+ assert(alignedBus_->getSize() > 0);
141
141
 
142
- assert(alignedBus_ != nullptr);
143
- assert(alignedBus_->getSize() > 0);
142
+ if (playbackRate == 0.0f || !isPlaying()) {
143
+ processingBus->zero();
144
+ return;
145
+ } else if (std::fabs(playbackRate) == 1.0) {
146
+ processWithoutInterpolation(
147
+ processingBus, startOffset, offsetLength, playbackRate);
148
+ } else {
149
+ processWithInterpolation(
150
+ processingBus, startOffset, offsetLength, playbackRate);
151
+ }
144
152
 
145
- if (playbackRate == 0.0f || !isPlaying()) {
146
- processingBus->zero();
147
- return;
148
- } else if (std::fabs(playbackRate) == 1.0) {
149
- processWithoutInterpolation(
150
- processingBus, startOffset, offsetLength, playbackRate);
153
+ handleStopScheduled();
151
154
  } else {
152
- processWithInterpolation(
153
- processingBus, startOffset, offsetLength, playbackRate);
155
+ processingBus->zero();
154
156
  }
155
-
156
- handleStopScheduled();
157
157
  }
158
158
 
159
159
  /**
@@ -63,7 +63,7 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
63
63
  size_t stopFrame = stopTime_ == -1.0
64
64
  ? std::numeric_limits<size_t>::max()
65
65
  : std::max(
66
- AudioUtils::timeToSampleFrame(stopTime_, sampleRate), firstFrame);
66
+ AudioUtils::timeToSampleFrame(stopTime_, sampleRate), lastFrame);
67
67
 
68
68
  if (isUnscheduled() || isFinished()) {
69
69
  startOffset = 0;
@@ -85,7 +85,10 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
85
85
  startOffset = std::max(startFrame, firstFrame) - firstFrame > 0
86
86
  ? std::max(startFrame, firstFrame) - firstFrame
87
87
  : 0;
88
- nonSilentFramesToProcess = std::min(lastFrame, stopFrame) - startFrame;
88
+ nonSilentFramesToProcess = std::max(std::min(lastFrame, stopFrame), startFrame) - startFrame;
89
+
90
+ assert(startOffset < framesToProcess);
91
+ assert(nonSilentFramesToProcess <= framesToProcess);
89
92
  processingBus->zero(0, startOffset);
90
93
  return;
91
94
  }
@@ -97,6 +100,9 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
97
100
  if (stopFrame < lastFrame && stopFrame >= firstFrame) {
98
101
  startOffset = 0;
99
102
  nonSilentFramesToProcess = stopFrame - firstFrame;
103
+
104
+ assert(startOffset < framesToProcess);
105
+ assert(nonSilentFramesToProcess <= framesToProcess);
100
106
  processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame);
101
107
  return;
102
108
  }
@@ -12,6 +12,7 @@ OscillatorNode::OscillatorNode(BaseAudioContext *context)
12
12
  detuneParam_ = std::make_shared<AudioParam>(0.0, -MAX_DETUNE, MAX_DETUNE);
13
13
  type_ = OscillatorType::SINE;
14
14
  periodicWave_ = context_->getBasicWaveForm(type_);
15
+
15
16
  isInitialized_ = true;
16
17
  }
17
18
 
@@ -5,7 +5,7 @@
5
5
  namespace audioapi {
6
6
 
7
7
  AudioNodeManager::~AudioNodeManager() {
8
- audioNodesToConnect_.clear();
8
+ cleanup();
9
9
  }
10
10
 
11
11
  void AudioNodeManager::addPendingConnection(
@@ -18,12 +18,10 @@ void AudioNodeManager::addPendingConnection(
18
18
  }
19
19
 
20
20
  void AudioNodeManager::preProcessGraph() {
21
- if (!Locker::tryLock(getGraphLock())) {
22
- return;
21
+ if (auto locker = Locker::tryLock(getGraphLock())) {
22
+ settlePendingConnections();
23
+ prepareNodesForDestruction();
23
24
  }
24
-
25
- settlePendingConnections();
26
- prepareNodesForDestruction();
27
25
  }
28
26
 
29
27
  std::mutex &AudioNodeManager::getGraphLock() {
@@ -32,19 +30,18 @@ std::mutex &AudioNodeManager::getGraphLock() {
32
30
 
33
31
  void AudioNodeManager::addNode(const std::shared_ptr<AudioNode> &node) {
34
32
  Locker lock(getGraphLock());
35
-
36
- nodes_.insert(node);
33
+ nodes_.emplace_back(node);
37
34
  }
38
35
 
39
36
  void AudioNodeManager::settlePendingConnections() {
40
- for (auto &connection : audioNodesToConnect_) {
41
- std::shared_ptr<AudioNode> from = std::get<0>(connection);
42
- std::shared_ptr<AudioNode> to = std::get<1>(connection);
43
- ConnectionType type = std::get<2>(connection);
37
+ for (auto it = audioNodesToConnect_.begin(); it != audioNodesToConnect_.end();
38
+ ++it) {
39
+ std::shared_ptr<AudioNode> from = std::get<0>(*it);
40
+ std::shared_ptr<AudioNode> to = std::get<1>(*it);
41
+ ConnectionType type = std::get<2>(*it);
44
42
 
45
- if (!to || !from) {
46
- continue;
47
- }
43
+ assert(from != nullptr);
44
+ assert(to != nullptr);
48
45
 
49
46
  if (type == ConnectionType::CONNECT) {
50
47
  from->connectNode(to);
@@ -57,8 +54,11 @@ void AudioNodeManager::settlePendingConnections() {
57
54
  }
58
55
 
59
56
  void AudioNodeManager::prepareNodesForDestruction() {
60
- for (auto it = nodes_.begin(); it != nodes_.end();) {
57
+ auto it = nodes_.begin();
58
+
59
+ while (it != nodes_.end()) {
61
60
  if (it->use_count() == 1) {
61
+ assert(it->get()->inputNodes_.size() == 0);
62
62
  it->get()->cleanup();
63
63
  it = nodes_.erase(it);
64
64
  } else {
@@ -70,11 +70,12 @@ void AudioNodeManager::prepareNodesForDestruction() {
70
70
  void AudioNodeManager::cleanup() {
71
71
  Locker lock(getGraphLock());
72
72
 
73
- for (auto &node : nodes_) {
74
- node->cleanup();
73
+ for (auto it = nodes_.begin(); it != nodes_.end(); ++it) {
74
+ it->get()->cleanup();
75
75
  }
76
76
 
77
77
  nodes_.clear();
78
+ audioNodesToConnect_.clear();
78
79
  }
79
80
 
80
81
  } // namespace audioapi
@@ -4,7 +4,6 @@
4
4
  #include <mutex>
5
5
  #include <tuple>
6
6
  #include <vector>
7
- #include <unordered_set>
8
7
 
9
8
  namespace audioapi {
10
9
 
@@ -30,10 +29,11 @@ class AudioNodeManager {
30
29
  void cleanup();
31
30
 
32
31
  private:
32
+ friend class AudioNode;
33
33
  std::mutex graphLock_;
34
34
 
35
35
  // all nodes created in the context
36
- std::unordered_set<std::shared_ptr<AudioNode>> nodes_;
36
+ std::vector<std::shared_ptr<AudioNode>> nodes_;
37
37
 
38
38
  // connections to be settled
39
39
  std::vector<std::tuple<
@@ -15,6 +15,8 @@ typedef void (^RenderAudioBlock)(AudioBufferList *outputBuffer, int numFrames);
15
15
  @property (nonatomic, copy) RenderAudioBlock renderAudio;
16
16
  @property (nonatomic, assign) float sampleRate;
17
17
  @property (nonatomic, assign) bool isRunning;
18
+ @property (nonatomic, strong) AVAudioSourceNodeRenderBlock renderBlock;
19
+ @property (nonatomic, assign) bool isAudioSessionActive;
18
20
 
19
21
  - (instancetype)initWithRenderAudioBlock:(RenderAudioBlock)renderAudio;
20
22
 
@@ -34,10 +36,6 @@ typedef void (^RenderAudioBlock)(AudioBufferList *outputBuffer, int numFrames);
34
36
 
35
37
  - (void)setupAndInitAudioSession;
36
38
 
37
- - (void)setupAndInitNotificationHandlers;
38
-
39
39
  - (void)connectAudioEngine;
40
40
 
41
- - (void)handleEngineConfigurationChange:(NSNotification *)notification;
42
-
43
41
  @end
@@ -9,26 +9,25 @@
9
9
  self.audioEngine = [[AVAudioEngine alloc] init];
10
10
  self.audioEngine.mainMixerNode.outputVolume = 1;
11
11
  self.isRunning = true;
12
+ self.isAudioSessionActive = false;
12
13
 
13
14
  [self setupAndInitAudioSession];
14
- [self setupAndInitNotificationHandlers];
15
15
 
16
16
  self.sampleRate = [self.audioSession sampleRate];
17
17
 
18
- _format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:2];
19
-
20
18
  __weak typeof(self) weakSelf = self;
21
- _sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format
22
- renderBlock:^OSStatus(
23
- BOOL *isSilence,
24
- const AudioTimeStamp *timestamp,
25
- AVAudioFrameCount frameCount,
26
- AudioBufferList *outputData) {
27
- return [weakSelf renderCallbackWithIsSilence:isSilence
28
- timestamp:timestamp
29
- frameCount:frameCount
30
- outputData:outputData];
31
- }];
19
+ self.renderBlock = ^OSStatus(
20
+ BOOL *isSilence, const AudioTimeStamp *timestamp, AVAudioFrameCount frameCount, AudioBufferList *outputData) {
21
+ if (outputData->mNumberBuffers != 2) {
22
+ return kAudioServicesBadPropertySizeError;
23
+ }
24
+
25
+ weakSelf.renderAudio(outputData, frameCount);
26
+ return kAudioServicesNoError;
27
+ };
28
+
29
+ _format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:2];
30
+ _sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format renderBlock:self.renderBlock];
32
31
  }
33
32
 
34
33
  return self;
@@ -41,26 +40,26 @@
41
40
  self.audioEngine = [[AVAudioEngine alloc] init];
42
41
  self.audioEngine.mainMixerNode.outputVolume = 1;
43
42
  self.isRunning = true;
43
+ self.isAudioSessionActive = false;
44
44
 
45
45
  [self setupAndInitAudioSession];
46
- [self setupAndInitNotificationHandlers];
47
46
 
48
47
  self.sampleRate = sampleRate;
49
48
 
50
49
  _format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:2];
51
50
 
52
51
  __weak typeof(self) weakSelf = self;
53
- _sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format
54
- renderBlock:^OSStatus(
55
- BOOL *isSilence,
56
- const AudioTimeStamp *timestamp,
57
- AVAudioFrameCount frameCount,
58
- AudioBufferList *outputData) {
59
- return [weakSelf renderCallbackWithIsSilence:isSilence
60
- timestamp:timestamp
61
- frameCount:frameCount
62
- outputData:outputData];
63
- }];
52
+ self.renderBlock = ^OSStatus(
53
+ BOOL *isSilence, const AudioTimeStamp *timestamp, AVAudioFrameCount frameCount, AudioBufferList *outputData) {
54
+ if (outputData->mNumberBuffers != 2) {
55
+ return kAudioServicesBadPropertySizeError;
56
+ }
57
+
58
+ weakSelf.renderAudio(outputData, frameCount);
59
+ return kAudioServicesNoError;
60
+ };
61
+
62
+ _sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format renderBlock:self.renderBlock];
64
63
  }
65
64
 
66
65
  return self;
@@ -90,7 +89,9 @@
90
89
  [self.audioSession setActive:false error:&error];
91
90
 
92
91
  if (error != nil) {
93
- @throw error;
92
+ NSLog(@"Error while de-activating audio session: %@", [error debugDescription]);
93
+ } else {
94
+ self.isAudioSessionActive = false;
94
95
  }
95
96
  }
96
97
 
@@ -114,20 +115,6 @@
114
115
  self.renderAudio = nil;
115
116
  }
116
117
 
117
- - (OSStatus)renderCallbackWithIsSilence:(BOOL *)isSilence
118
- timestamp:(const AudioTimeStamp *)timestamp
119
- frameCount:(AVAudioFrameCount)frameCount
120
- outputData:(AudioBufferList *)outputData
121
- {
122
- if (outputData->mNumberBuffers < 2) {
123
- return noErr; // Ensure we have stereo output
124
- }
125
-
126
- self.renderAudio(outputData, frameCount);
127
-
128
- return noErr;
129
- }
130
-
131
118
  - (void)setupAndInitAudioSession
132
119
  {
133
120
  NSError *error = nil;
@@ -136,33 +123,53 @@
136
123
  self.audioSession = [AVAudioSession sharedInstance];
137
124
  }
138
125
 
139
- [self.audioSession setCategory:AVAudioSessionCategoryPlayback
140
- mode:AVAudioSessionModeDefault
141
- options:AVAudioSessionCategoryOptionDuckOthers | AVAudioSessionCategoryOptionAllowBluetooth |
142
- AVAudioSessionCategoryOptionAllowAirPlay
143
- error:&error];
126
+ AVAudioSessionCategory desiredCategory = AVAudioSessionCategoryPlayback;
127
+ AVAudioSessionMode desiredMode = AVAudioSessionModeDefault;
128
+ AVAudioSessionCategoryOptions desiredOptions = AVAudioSessionCategoryOptionAllowBluetooth | AVAudioSessionCategoryOptionAllowAirPlay;
144
129
 
145
- if (error != nil) {
146
- NSLog(@"Error while configuring audio session: %@", [error localizedDescription]);
130
+ if (self.audioSession.category != desiredCategory || self.audioSession.mode != desiredMode || self.audioSession.categoryOptions != desiredOptions) {
131
+ [self.audioSession setCategory:desiredCategory mode:desiredMode options:desiredOptions error:&error];
132
+
133
+ if (error != nil) {
134
+ NSLog(@"Error while configuring audio session: %@", [error debugDescription]);
135
+ }
136
+ } else {
137
+ NSLog(@"AVAudioSession category mode and options are valid, skipping configuration");
147
138
  }
148
139
 
149
- [self.audioSession setActive:true error:&error];
140
+ if (self.sampleRate) {
141
+ if (self.audioSession.preferredSampleRate != self.sampleRate) {
142
+ [self.audioSession setPreferredSampleRate:self.sampleRate error:&error];
150
143
 
151
- if (error != nil) {
152
- NSLog(@"Error while activating audio session: %@", [error localizedDescription]);
144
+ if (error != nil) {
145
+ NSLog(@"Error while setting preferred sample rate buffer duration: %@", [error debugDescription]);
146
+ }
147
+ } else {
148
+ NSLog(@"AVAudioSession preferred sample rate is valid, skipping configuration");
149
+ }
153
150
  }
154
- }
155
151
 
156
- - (void)setupAndInitNotificationHandlers
157
- {
158
- if (!self.notificationCenter) {
159
- self.notificationCenter = [NSNotificationCenter defaultCenter];
152
+ if (self.audioSession.preferredIOBufferDuration != 0.02) {
153
+ [self.audioSession setPreferredIOBufferDuration:0.02 error:&error];
154
+
155
+ if (error != nil) {
156
+ NSLog(@"Error while setting preferred IO buffer duration: %@", [error debugDescription]);
157
+ }
158
+ } else {
159
+ NSLog(@"AVAudioSession preferred IO buffer duration is valid, skipping configuration");
160
160
  }
161
161
 
162
- [self.notificationCenter addObserver:self
163
- selector:@selector(handleEngineConfigurationChange:)
164
- name:AVAudioEngineConfigurationChangeNotification
165
- object:nil];
162
+ if (!self.isAudioSessionActive) {
163
+ [self.audioSession setActive:true error:&error];
164
+
165
+ if (error != nil) {
166
+ NSLog(@"Error while activating audio session: %@", [error debugDescription]);
167
+ } else {
168
+ self.isAudioSessionActive = true;
169
+ }
170
+ } else {
171
+ NSLog(@"AVAudioSession was active, skipping unnecessary activation");
172
+ }
166
173
  }
167
174
 
168
175
  - (void)connectAudioEngine
@@ -183,13 +190,4 @@
183
190
  }
184
191
  }
185
192
 
186
- - (void)handleEngineConfigurationChange:(NSNotification *)notification
187
- {
188
- if (!self.isRunning) {
189
- return;
190
- }
191
-
192
- [self connectAudioEngine];
193
- }
194
-
195
193
  @end
@@ -69,7 +69,7 @@ IOSAudioPlayer::~IOSAudioPlayer()
69
69
  [audioPlayer_ cleanup];
70
70
 
71
71
  if (audioBus_) {
72
- audioBus_ = 0;
72
+ audioBus_ = nullptr;
73
73
  }
74
74
  }
75
75
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "react-native-audio-api",
3
- "version": "0.4.16",
3
+ "version": "0.4.18",
4
4
  "description": "react-native-audio-api provides system for controlling audio in React Native environment compatible with Web Audio API specification",
5
5
  "bin": {
6
6
  "setup-custom-wasm": "./scripts/setup-custom-wasm.js"