react-native-audio-api 0.5.3 → 0.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/android/core/AudioDecoder.cpp +1 -0
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +2 -2
- package/common/cpp/audioapi/core/AudioNode.cpp +50 -28
- package/common/cpp/audioapi/core/AudioNode.h +2 -4
- package/common/cpp/audioapi/core/effects/PeriodicWave.cpp +1 -0
- package/common/cpp/audioapi/core/sources/AudioBuffer.cpp +0 -7
- package/common/cpp/audioapi/core/sources/AudioBuffer.h +0 -3
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +109 -78
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +10 -0
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +7 -15
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +2 -1
- package/common/cpp/audioapi/core/sources/OscillatorNode.cpp +1 -0
- package/common/cpp/audioapi/core/utils/AudioNodeManager.cpp +18 -17
- package/common/cpp/audioapi/utils/AudioArray.cpp +6 -0
- package/common/cpp/audioapi/utils/AudioArray.h +1 -0
- package/common/cpp/audioapi/utils/AudioBus.cpp +12 -0
- package/common/cpp/audioapi/utils/AudioBus.h +3 -2
- package/ios/audioapi/ios/core/AudioPlayer.h +1 -7
- package/ios/audioapi/ios/core/AudioPlayer.m +31 -106
- package/ios/audioapi/ios/core/IOSAudioPlayer.mm +1 -1
- package/package.json +1 -1
|
@@ -36,6 +36,7 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
|
|
|
36
36
|
|
|
37
37
|
ma_uint64 framesDecoded;
|
|
38
38
|
ma_decoder_read_pcm_frames(&decoder, buffer, totalFrameCount, &framesDecoded);
|
|
39
|
+
|
|
39
40
|
if (framesDecoded == 0) {
|
|
40
41
|
__android_log_print(ANDROID_LOG_ERROR, "AudioDecoder", "Failed to decode");
|
|
41
42
|
|
|
@@ -24,7 +24,7 @@ class AudioAPIModuleInstaller {
|
|
|
24
24
|
*jsiRuntime,
|
|
25
25
|
jsi::PropNameID::forAscii(*jsiRuntime, "createAudioContext"),
|
|
26
26
|
0,
|
|
27
|
-
[
|
|
27
|
+
[jsCallInvoker](
|
|
28
28
|
jsi::Runtime &runtime,
|
|
29
29
|
const jsi::Value &thisValue,
|
|
30
30
|
const jsi::Value *args,
|
|
@@ -38,7 +38,7 @@ class AudioAPIModuleInstaller {
|
|
|
38
38
|
}
|
|
39
39
|
|
|
40
40
|
auto audioContextHostObject = std::make_shared<AudioContextHostObject>(
|
|
41
|
-
audioContext,
|
|
41
|
+
audioContext, &runtime, jsCallInvoker);
|
|
42
42
|
|
|
43
43
|
return jsi::Object::createFromHostObject(
|
|
44
44
|
runtime, audioContextHostObject);
|
|
@@ -12,7 +12,9 @@ AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
|
|
|
12
12
|
}
|
|
13
13
|
|
|
14
14
|
AudioNode::~AudioNode() {
|
|
15
|
-
isInitialized_
|
|
15
|
+
if (isInitialized_) {
|
|
16
|
+
cleanup();
|
|
17
|
+
}
|
|
16
18
|
}
|
|
17
19
|
|
|
18
20
|
int AudioNode::getNumberOfInputs() const {
|
|
@@ -41,8 +43,9 @@ void AudioNode::connect(const std::shared_ptr<AudioNode> &node) {
|
|
|
41
43
|
}
|
|
42
44
|
|
|
43
45
|
void AudioNode::disconnect() {
|
|
44
|
-
for (auto
|
|
45
|
-
|
|
46
|
+
for (auto it = outputNodes_.begin(), end = outputNodes_.end(); it != end;
|
|
47
|
+
++it) {
|
|
48
|
+
disconnect(*it);
|
|
46
49
|
}
|
|
47
50
|
}
|
|
48
51
|
|
|
@@ -56,18 +59,28 @@ bool AudioNode::isEnabled() const {
|
|
|
56
59
|
}
|
|
57
60
|
|
|
58
61
|
void AudioNode::enable() {
|
|
62
|
+
if (isEnabled()) {
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
|
|
59
66
|
isEnabled_ = true;
|
|
60
67
|
|
|
61
|
-
for (auto
|
|
62
|
-
|
|
68
|
+
for (auto it = outputNodes_.begin(), end = outputNodes_.end(); it != end;
|
|
69
|
+
++it) {
|
|
70
|
+
it->get()->onInputEnabled();
|
|
63
71
|
}
|
|
64
72
|
}
|
|
65
73
|
|
|
66
74
|
void AudioNode::disable() {
|
|
75
|
+
if (!isEnabled()) {
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
|
|
67
79
|
isEnabled_ = false;
|
|
68
80
|
|
|
69
|
-
for (auto
|
|
70
|
-
|
|
81
|
+
for (auto it = outputNodes_.begin(), end = outputNodes_.end(); it != end;
|
|
82
|
+
++it) {
|
|
83
|
+
it->get()->onInputDisabled();
|
|
71
84
|
}
|
|
72
85
|
}
|
|
73
86
|
|
|
@@ -96,7 +109,7 @@ std::string AudioNode::toString(ChannelInterpretation interpretation) {
|
|
|
96
109
|
}
|
|
97
110
|
|
|
98
111
|
std::shared_ptr<AudioBus> AudioNode::processAudio(
|
|
99
|
-
std::shared_ptr<AudioBus> outputBus,
|
|
112
|
+
const std::shared_ptr<AudioBus> &outputBus,
|
|
100
113
|
int framesToProcess,
|
|
101
114
|
bool checkIsAlreadyProcessed) {
|
|
102
115
|
if (!isInitialized_) {
|
|
@@ -148,7 +161,9 @@ std::shared_ptr<AudioBus> AudioNode::processInputs(
|
|
|
148
161
|
processingBus->zero();
|
|
149
162
|
|
|
150
163
|
int maxNumberOfChannels = 0;
|
|
151
|
-
for (auto
|
|
164
|
+
for (auto it = inputNodes_.begin(), end = inputNodes_.end(); it != end;
|
|
165
|
+
++it) {
|
|
166
|
+
auto inputNode = *it;
|
|
152
167
|
assert(inputNode != nullptr);
|
|
153
168
|
|
|
154
169
|
if (!inputNode->isEnabled()) {
|
|
@@ -169,7 +184,7 @@ std::shared_ptr<AudioBus> AudioNode::processInputs(
|
|
|
169
184
|
}
|
|
170
185
|
|
|
171
186
|
std::shared_ptr<AudioBus> AudioNode::applyChannelCountMode(
|
|
172
|
-
std::shared_ptr<AudioBus> processingBus) {
|
|
187
|
+
const std::shared_ptr<AudioBus> &processingBus) {
|
|
173
188
|
// If the channelCountMode is EXPLICIT, the node should output the number of
|
|
174
189
|
// channels specified by the channelCount.
|
|
175
190
|
if (channelCountMode_ == ChannelCountMode::EXPLICIT) {
|
|
@@ -189,21 +204,30 @@ std::shared_ptr<AudioBus> AudioNode::applyChannelCountMode(
|
|
|
189
204
|
void AudioNode::mixInputsBuses(const std::shared_ptr<AudioBus> &processingBus) {
|
|
190
205
|
assert(processingBus != nullptr);
|
|
191
206
|
|
|
192
|
-
for (
|
|
193
|
-
|
|
207
|
+
for (auto it = inputBuses_.begin(), end = inputBuses_.end(); it != end;
|
|
208
|
+
++it) {
|
|
209
|
+
processingBus->sum(it->get(), channelInterpretation_);
|
|
194
210
|
}
|
|
195
211
|
|
|
196
212
|
inputBuses_.clear();
|
|
197
213
|
}
|
|
198
214
|
|
|
199
215
|
void AudioNode::connectNode(const std::shared_ptr<AudioNode> &node) {
|
|
200
|
-
outputNodes_.
|
|
201
|
-
|
|
216
|
+
auto position = outputNodes_.find(node);
|
|
217
|
+
|
|
218
|
+
if (position == outputNodes_.end()) {
|
|
219
|
+
outputNodes_.insert(node);
|
|
220
|
+
node->onInputConnected(this);
|
|
221
|
+
}
|
|
202
222
|
}
|
|
203
223
|
|
|
204
224
|
void AudioNode::disconnectNode(const std::shared_ptr<AudioNode> &node) {
|
|
205
|
-
outputNodes_.
|
|
206
|
-
|
|
225
|
+
auto position = outputNodes_.find(node);
|
|
226
|
+
|
|
227
|
+
if (position != outputNodes_.end()) {
|
|
228
|
+
node->onInputDisconnected(this);
|
|
229
|
+
outputNodes_.erase(node);
|
|
230
|
+
}
|
|
207
231
|
}
|
|
208
232
|
|
|
209
233
|
void AudioNode::onInputEnabled() {
|
|
@@ -239,28 +263,26 @@ void AudioNode::onInputDisconnected(AudioNode *node) {
|
|
|
239
263
|
return;
|
|
240
264
|
}
|
|
241
265
|
|
|
242
|
-
|
|
266
|
+
if (node->isEnabled()) {
|
|
267
|
+
onInputDisabled();
|
|
268
|
+
}
|
|
243
269
|
|
|
244
|
-
|
|
245
|
-
|
|
270
|
+
auto position = inputNodes_.find(node);
|
|
271
|
+
|
|
272
|
+
if (position != inputNodes_.end()) {
|
|
273
|
+
inputNodes_.erase(position);
|
|
246
274
|
}
|
|
247
275
|
}
|
|
248
276
|
|
|
249
277
|
void AudioNode::cleanup() {
|
|
250
278
|
isInitialized_ = false;
|
|
251
279
|
|
|
252
|
-
for (
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
for (const auto &inputNode : inputNodes_) {
|
|
257
|
-
if (inputNode) {
|
|
258
|
-
inputNode->disconnectNode(shared_from_this());
|
|
259
|
-
}
|
|
280
|
+
for (auto it = outputNodes_.begin(), end = outputNodes_.end(); it != end;
|
|
281
|
+
++it) {
|
|
282
|
+
it->get()->onInputDisconnected(this);
|
|
260
283
|
}
|
|
261
284
|
|
|
262
285
|
outputNodes_.clear();
|
|
263
|
-
inputNodes_.clear();
|
|
264
286
|
}
|
|
265
287
|
|
|
266
288
|
} // namespace audioapi
|
|
@@ -58,19 +58,17 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
|
|
|
58
58
|
std::size_t lastRenderedFrame_{SIZE_MAX};
|
|
59
59
|
|
|
60
60
|
private:
|
|
61
|
-
friend class StretcherNode;
|
|
62
|
-
|
|
63
61
|
std::vector<std::shared_ptr<AudioBus>> inputBuses_ = {};
|
|
64
62
|
|
|
65
63
|
static std::string toString(ChannelCountMode mode);
|
|
66
64
|
static std::string toString(ChannelInterpretation interpretation);
|
|
67
65
|
|
|
68
|
-
virtual std::shared_ptr<AudioBus> processAudio(std::shared_ptr<AudioBus> outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
|
|
66
|
+
virtual std::shared_ptr<AudioBus> processAudio(const std::shared_ptr<AudioBus> &outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
|
|
69
67
|
virtual void processNode(const std::shared_ptr<AudioBus>&, int) = 0;
|
|
70
68
|
|
|
71
69
|
bool isAlreadyProcessed();
|
|
72
70
|
std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus>& outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
|
|
73
|
-
std::shared_ptr<AudioBus> applyChannelCountMode(std::shared_ptr<AudioBus> processingBus);
|
|
71
|
+
std::shared_ptr<AudioBus> applyChannelCountMode(const std::shared_ptr<AudioBus> &processingBus);
|
|
74
72
|
void mixInputsBuses(const std::shared_ptr<AudioBus>& processingBus);
|
|
75
73
|
|
|
76
74
|
void connectNode(const std::shared_ptr<AudioNode> &node);
|
|
@@ -11,17 +11,10 @@ AudioBuffer::AudioBuffer(
|
|
|
11
11
|
size_t length,
|
|
12
12
|
float sampleRate) {
|
|
13
13
|
bus_ = std::make_shared<AudioBus>(length, numberOfChannels, sampleRate);
|
|
14
|
-
stretch_ =
|
|
15
|
-
std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
|
|
16
|
-
stretch_->presetDefault(numberOfChannels, sampleRate);
|
|
17
14
|
}
|
|
18
15
|
|
|
19
16
|
AudioBuffer::AudioBuffer(std::shared_ptr<AudioBus> bus) {
|
|
20
17
|
bus_ = std::move(bus);
|
|
21
|
-
|
|
22
|
-
stretch_ =
|
|
23
|
-
std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
|
|
24
|
-
stretch_->presetDefault(bus_->getNumberOfChannels(), bus_->getSampleRate());
|
|
25
18
|
}
|
|
26
19
|
|
|
27
20
|
size_t AudioBuffer::getLength() const {
|
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
-
#include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>
|
|
4
|
-
|
|
5
3
|
#include <algorithm>
|
|
6
4
|
#include <memory>
|
|
7
5
|
#include <string>
|
|
@@ -39,7 +37,6 @@ class AudioBuffer : public std::enable_shared_from_this<AudioBuffer> {
|
|
|
39
37
|
friend class AudioBufferSourceNode;
|
|
40
38
|
|
|
41
39
|
std::shared_ptr<AudioBus> bus_;
|
|
42
|
-
std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
|
|
43
40
|
};
|
|
44
41
|
|
|
45
42
|
} // namespace audioapi
|
|
@@ -19,6 +19,7 @@ AudioBufferSourceNode::AudioBufferSourceNode(
|
|
|
19
19
|
pitchCorrection_(pitchCorrection),
|
|
20
20
|
vReadIndex_(0.0) {
|
|
21
21
|
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
|
|
22
|
+
alignedBus_ = std::shared_ptr<AudioBus>(nullptr);
|
|
22
23
|
|
|
23
24
|
detuneParam_ = std::make_shared<AudioParam>(
|
|
24
25
|
0.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT);
|
|
@@ -28,9 +29,19 @@ AudioBufferSourceNode::AudioBufferSourceNode(
|
|
|
28
29
|
playbackRateBus_ = std::make_shared<AudioBus>(
|
|
29
30
|
RENDER_QUANTUM_SIZE * 3, channelCount_, context_->getSampleRate());
|
|
30
31
|
|
|
32
|
+
stretch_ =
|
|
33
|
+
std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
|
|
34
|
+
|
|
31
35
|
isInitialized_ = true;
|
|
32
36
|
}
|
|
33
37
|
|
|
38
|
+
AudioBufferSourceNode::~AudioBufferSourceNode() {
|
|
39
|
+
Locker locker(getBufferLock());
|
|
40
|
+
|
|
41
|
+
buffer_.reset();
|
|
42
|
+
alignedBus_.reset();
|
|
43
|
+
}
|
|
44
|
+
|
|
34
45
|
bool AudioBufferSourceNode::getLoop() const {
|
|
35
46
|
return loop_;
|
|
36
47
|
}
|
|
@@ -74,11 +85,13 @@ void AudioBufferSourceNode::setBuffer(
|
|
|
74
85
|
|
|
75
86
|
if (!buffer) {
|
|
76
87
|
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
|
|
88
|
+
alignedBus_ = std::shared_ptr<AudioBus>(nullptr);
|
|
77
89
|
loopEnd_ = 0;
|
|
78
90
|
return;
|
|
79
91
|
}
|
|
80
92
|
|
|
81
93
|
buffer_ = buffer;
|
|
94
|
+
alignedBus_ = std::make_shared<AudioBus>(*buffer_->bus_);
|
|
82
95
|
channelCount_ = buffer_->getNumberOfChannels();
|
|
83
96
|
|
|
84
97
|
audioBus_ = std::make_shared<AudioBus>(
|
|
@@ -87,6 +100,8 @@ void AudioBufferSourceNode::setBuffer(
|
|
|
87
100
|
RENDER_QUANTUM_SIZE * 3, channelCount_, context_->getSampleRate());
|
|
88
101
|
|
|
89
102
|
loopEnd_ = buffer_->getDuration();
|
|
103
|
+
|
|
104
|
+
stretch_->presetDefault(channelCount_, buffer_->getSampleRate());
|
|
90
105
|
}
|
|
91
106
|
|
|
92
107
|
void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
@@ -96,22 +111,30 @@ void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
|
96
111
|
AudioScheduledSourceNode::stop(when + duration);
|
|
97
112
|
}
|
|
98
113
|
|
|
99
|
-
if (!
|
|
114
|
+
if (!alignedBus_) {
|
|
100
115
|
return;
|
|
101
116
|
}
|
|
102
117
|
|
|
103
|
-
offset = std::min(
|
|
118
|
+
offset = std::min(
|
|
119
|
+
offset,
|
|
120
|
+
static_cast<double>(alignedBus_->getSize()) /
|
|
121
|
+
alignedBus_->getSampleRate());
|
|
104
122
|
|
|
105
123
|
if (loop_) {
|
|
106
124
|
offset = std::min(offset, loopEnd_);
|
|
107
125
|
}
|
|
108
126
|
|
|
109
|
-
vReadIndex_ = static_cast<double>(
|
|
127
|
+
vReadIndex_ = static_cast<double>(alignedBus_->getSampleRate() * offset);
|
|
110
128
|
}
|
|
111
129
|
|
|
112
130
|
void AudioBufferSourceNode::disable() {
|
|
113
131
|
AudioNode::disable();
|
|
114
|
-
|
|
132
|
+
|
|
133
|
+
if (onendedCallback_) {
|
|
134
|
+
onendedCallback_(getStopTime());
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
alignedBus_.reset();
|
|
115
138
|
}
|
|
116
139
|
|
|
117
140
|
std::mutex &AudioBufferSourceNode::getBufferLock() {
|
|
@@ -121,88 +144,96 @@ std::mutex &AudioBufferSourceNode::getBufferLock() {
|
|
|
121
144
|
void AudioBufferSourceNode::processNode(
|
|
122
145
|
const std::shared_ptr<AudioBus> &processingBus,
|
|
123
146
|
int framesToProcess) {
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
147
|
+
if (auto locker = Locker::tryLock(getBufferLock())) {
|
|
148
|
+
// No audio data to fill, zero the output and return.
|
|
149
|
+
if (!alignedBus_) {
|
|
150
|
+
processingBus->zero();
|
|
151
|
+
return;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (!pitchCorrection_) {
|
|
155
|
+
processWithoutPitchCorrection(processingBus, framesToProcess);
|
|
156
|
+
} else {
|
|
157
|
+
processWithPitchCorrection(processingBus, framesToProcess);
|
|
158
|
+
}
|
|
129
159
|
|
|
130
|
-
|
|
160
|
+
handleStopScheduled();
|
|
161
|
+
} else {
|
|
131
162
|
processingBus->zero();
|
|
132
|
-
return;
|
|
133
163
|
}
|
|
164
|
+
}
|
|
134
165
|
|
|
166
|
+
double AudioBufferSourceNode::getStopTime() const {
|
|
167
|
+
return dsp::sampleFrameToTime(
|
|
168
|
+
static_cast<int>(vReadIndex_), alignedBus_->getSampleRate());
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Helper functions
|
|
173
|
+
*/
|
|
174
|
+
|
|
175
|
+
void AudioBufferSourceNode::processWithoutPitchCorrection(
|
|
176
|
+
const std::shared_ptr<AudioBus> &processingBus,
|
|
177
|
+
int framesToProcess) {
|
|
135
178
|
size_t startOffset = 0;
|
|
136
179
|
size_t offsetLength = 0;
|
|
137
180
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
updatePlaybackInfo(
|
|
141
|
-
processingBus, framesToProcess, startOffset, offsetLength);
|
|
181
|
+
auto computedPlaybackRate = getComputedPlaybackRateValue();
|
|
182
|
+
updatePlaybackInfo(processingBus, framesToProcess, startOffset, offsetLength);
|
|
142
183
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
processingBus, startOffset, offsetLength, computedPlaybackRate);
|
|
152
|
-
}
|
|
184
|
+
if (computedPlaybackRate == 0.0f || !isPlaying()) {
|
|
185
|
+
processingBus->zero();
|
|
186
|
+
return;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
if (std::fabs(computedPlaybackRate) == 1.0) {
|
|
190
|
+
processWithoutInterpolation(
|
|
191
|
+
processingBus, startOffset, offsetLength, computedPlaybackRate);
|
|
153
192
|
} else {
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
std::clamp(detuneParam_->getValueAtTime(time) / 100.0f, -12.0f, 12.0f);
|
|
193
|
+
processWithInterpolation(
|
|
194
|
+
processingBus, startOffset, offsetLength, computedPlaybackRate);
|
|
195
|
+
}
|
|
196
|
+
}
|
|
159
197
|
|
|
160
|
-
|
|
198
|
+
void AudioBufferSourceNode::processWithPitchCorrection(
|
|
199
|
+
const std::shared_ptr<AudioBus> &processingBus,
|
|
200
|
+
int framesToProcess) {
|
|
201
|
+
size_t startOffset = 0;
|
|
202
|
+
size_t offsetLength = 0;
|
|
161
203
|
|
|
162
|
-
|
|
163
|
-
|
|
204
|
+
auto time = context_->getCurrentTime();
|
|
205
|
+
auto playbackRate =
|
|
206
|
+
std::clamp(playbackRateParam_->getValueAtTime(time), 0.0f, 3.0f);
|
|
207
|
+
auto detune =
|
|
208
|
+
std::clamp(detuneParam_->getValueAtTime(time) / 100.0f, -12.0f, 12.0f);
|
|
164
209
|
|
|
165
|
-
|
|
166
|
-
playbackRateBus_, framesNeededToStretch, startOffset, offsetLength);
|
|
210
|
+
playbackRateBus_->zero();
|
|
167
211
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
return;
|
|
171
|
-
}
|
|
212
|
+
auto framesNeededToStretch =
|
|
213
|
+
static_cast<int>(playbackRate * static_cast<float>(framesToProcess));
|
|
172
214
|
|
|
173
|
-
|
|
174
|
-
|
|
215
|
+
updatePlaybackInfo(
|
|
216
|
+
playbackRateBus_, framesNeededToStretch, startOffset, offsetLength);
|
|
175
217
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
218
|
+
if (playbackRate == 0.0f || !isPlaying()) {
|
|
219
|
+
processingBus->zero();
|
|
220
|
+
return;
|
|
221
|
+
}
|
|
180
222
|
|
|
181
|
-
|
|
223
|
+
processWithoutInterpolation(
|
|
224
|
+
playbackRateBus_, startOffset, offsetLength, playbackRate);
|
|
182
225
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
226
|
+
stretch_->process(
|
|
227
|
+
playbackRateBus_.get()[0],
|
|
228
|
+
framesNeededToStretch,
|
|
229
|
+
processingBus.get()[0],
|
|
230
|
+
framesToProcess);
|
|
188
231
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
}
|
|
232
|
+
if (detune != 0.0f) {
|
|
233
|
+
stretch_->setTransposeSemitones(detune);
|
|
192
234
|
}
|
|
193
|
-
|
|
194
|
-
handleStopScheduled();
|
|
195
235
|
}
|
|
196
236
|
|
|
197
|
-
double AudioBufferSourceNode::getStopTime() const {
|
|
198
|
-
return dsp::sampleFrameToTime(
|
|
199
|
-
static_cast<int>(vReadIndex_), buffer_->getSampleRate());
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
/**
|
|
203
|
-
* Helper functions
|
|
204
|
-
*/
|
|
205
|
-
|
|
206
237
|
void AudioBufferSourceNode::processWithoutInterpolation(
|
|
207
238
|
const std::shared_ptr<AudioBus> &processingBus,
|
|
208
239
|
size_t startOffset,
|
|
@@ -228,15 +259,20 @@ void AudioBufferSourceNode::processWithoutInterpolation(
|
|
|
228
259
|
size_t framesToCopy = std::min(framesToEnd, framesLeft);
|
|
229
260
|
framesToCopy = framesToCopy > 0 ? framesToCopy : 0;
|
|
230
261
|
|
|
262
|
+
assert(readIndex >= 0);
|
|
263
|
+
assert(writeIndex >= 0);
|
|
264
|
+
assert(readIndex + framesToCopy <= alignedBus_->getSize());
|
|
265
|
+
assert(writeIndex + framesToCopy <= processingBus->getSize());
|
|
266
|
+
|
|
231
267
|
// Direction is forward, we can normally copy the data
|
|
232
268
|
if (direction == 1) {
|
|
233
269
|
processingBus->copy(
|
|
234
|
-
|
|
270
|
+
alignedBus_.get(), readIndex, writeIndex, framesToCopy);
|
|
235
271
|
} else {
|
|
236
272
|
for (int i = 0; i < framesToCopy; i += 1) {
|
|
237
273
|
for (int j = 0; j < processingBus->getNumberOfChannels(); j += 1) {
|
|
238
274
|
(*processingBus->getChannel(j))[writeIndex + i] =
|
|
239
|
-
(*
|
|
275
|
+
(*alignedBus_->getChannel(j))[readIndex - i];
|
|
240
276
|
}
|
|
241
277
|
}
|
|
242
278
|
}
|
|
@@ -250,12 +286,7 @@ void AudioBufferSourceNode::processWithoutInterpolation(
|
|
|
250
286
|
|
|
251
287
|
if (!loop_) {
|
|
252
288
|
processingBus->zero(writeIndex, framesLeft);
|
|
253
|
-
|
|
254
|
-
if (onendedCallback_) {
|
|
255
|
-
onendedCallback_(getStopTime());
|
|
256
|
-
}
|
|
257
|
-
playbackState_ = PlaybackState::FINISHED;
|
|
258
|
-
disable();
|
|
289
|
+
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
259
290
|
break;
|
|
260
291
|
}
|
|
261
292
|
}
|
|
@@ -301,7 +332,7 @@ void AudioBufferSourceNode::processWithInterpolation(
|
|
|
301
332
|
|
|
302
333
|
for (int i = 0; i < processingBus->getNumberOfChannels(); i += 1) {
|
|
303
334
|
float *destination = processingBus->getChannel(i)->getData();
|
|
304
|
-
const float *source =
|
|
335
|
+
const float *source = alignedBus_->getChannel(i)->getData();
|
|
305
336
|
|
|
306
337
|
destination[writeIndex] =
|
|
307
338
|
dsp::linearInterpolate(source, readIndex, nextReadIndex, factor);
|
|
@@ -316,8 +347,7 @@ void AudioBufferSourceNode::processWithInterpolation(
|
|
|
316
347
|
|
|
317
348
|
if (!loop_) {
|
|
318
349
|
processingBus->zero(writeIndex, framesLeft);
|
|
319
|
-
playbackState_ = PlaybackState::
|
|
320
|
-
disable();
|
|
350
|
+
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
321
351
|
break;
|
|
322
352
|
}
|
|
323
353
|
}
|
|
@@ -327,7 +357,8 @@ void AudioBufferSourceNode::processWithInterpolation(
|
|
|
327
357
|
float AudioBufferSourceNode::getComputedPlaybackRateValue() {
|
|
328
358
|
auto time = context_->getCurrentTime();
|
|
329
359
|
|
|
330
|
-
auto sampleRateFactor =
|
|
360
|
+
auto sampleRateFactor =
|
|
361
|
+
alignedBus_->getSampleRate() / context_->getSampleRate();
|
|
331
362
|
auto playbackRate = playbackRateParam_->getValueAtTime(time);
|
|
332
363
|
auto detune = std::pow(2.0f, detuneParam_->getValueAtTime(time) / 1200.0f);
|
|
333
364
|
|
|
@@ -342,7 +373,7 @@ double AudioBufferSourceNode::getVirtualStartFrame() {
|
|
|
342
373
|
}
|
|
343
374
|
|
|
344
375
|
double AudioBufferSourceNode::getVirtualEndFrame() {
|
|
345
|
-
auto inputBufferLength = static_cast<double>(
|
|
376
|
+
auto inputBufferLength = static_cast<double>(alignedBus_->getSize());
|
|
346
377
|
auto loopEndFrame = loopEnd_ * context_->getSampleRate();
|
|
347
378
|
|
|
348
379
|
return loop_ && loopEndFrame > 0 && loopStart_ < loopEnd_
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
#include <audioapi/core/sources/AudioBuffer.h>
|
|
4
4
|
#include <audioapi/core/sources/AudioScheduledSourceNode.h>
|
|
5
|
+
#include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>
|
|
5
6
|
|
|
6
7
|
#include <memory>
|
|
7
8
|
#include <cstddef>
|
|
@@ -16,6 +17,7 @@ class AudioParam;
|
|
|
16
17
|
class AudioBufferSourceNode : public AudioScheduledSourceNode {
|
|
17
18
|
public:
|
|
18
19
|
explicit AudioBufferSourceNode(BaseAudioContext *context, bool pitchCorrection);
|
|
20
|
+
~AudioBufferSourceNode();
|
|
19
21
|
|
|
20
22
|
[[nodiscard]] bool getLoop() const;
|
|
21
23
|
[[nodiscard]] double getLoopStart() const;
|
|
@@ -46,6 +48,7 @@ class AudioBufferSourceNode : public AudioScheduledSourceNode {
|
|
|
46
48
|
|
|
47
49
|
// pitch correction
|
|
48
50
|
bool pitchCorrection_;
|
|
51
|
+
std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
|
|
49
52
|
|
|
50
53
|
// k-rate params
|
|
51
54
|
std::shared_ptr<AudioParam> detuneParam_;
|
|
@@ -58,6 +61,13 @@ class AudioBufferSourceNode : public AudioScheduledSourceNode {
|
|
|
58
61
|
|
|
59
62
|
// User provided buffer
|
|
60
63
|
std::shared_ptr<AudioBuffer> buffer_;
|
|
64
|
+
std::shared_ptr<AudioBus> alignedBus_;
|
|
65
|
+
|
|
66
|
+
void processWithoutPitchCorrection(const std::shared_ptr<AudioBus> &processingBus,
|
|
67
|
+
int framesToProcess);
|
|
68
|
+
|
|
69
|
+
void processWithPitchCorrection(const std::shared_ptr<AudioBus> &processingBus,
|
|
70
|
+
int framesToProcess);
|
|
61
71
|
|
|
62
72
|
void processWithoutInterpolation(
|
|
63
73
|
const std::shared_ptr<AudioBus>& processingBus,
|
|
@@ -40,6 +40,10 @@ bool AudioScheduledSourceNode::isFinished() {
|
|
|
40
40
|
return playbackState_ == PlaybackState::FINISHED;
|
|
41
41
|
}
|
|
42
42
|
|
|
43
|
+
bool AudioScheduledSourceNode::isStopScheduled() {
|
|
44
|
+
return playbackState_ == PlaybackState::STOP_SCHEDULED;
|
|
45
|
+
}
|
|
46
|
+
|
|
43
47
|
void AudioScheduledSourceNode::setOnendedCallback(
|
|
44
48
|
const std::function<void(double)> &onendedCallback) {
|
|
45
49
|
onendedCallback_ = onendedCallback;
|
|
@@ -99,36 +103,24 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
|
|
|
99
103
|
// stop will happen in this render quantum
|
|
100
104
|
// zero remaining frames after stop frame
|
|
101
105
|
if (stopFrame < lastFrame && stopFrame >= firstFrame) {
|
|
106
|
+
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
102
107
|
startOffset = 0;
|
|
103
108
|
nonSilentFramesToProcess = stopFrame - firstFrame;
|
|
104
109
|
processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame);
|
|
105
110
|
return;
|
|
106
111
|
}
|
|
107
112
|
|
|
108
|
-
// mark as finished in first silent render quantum
|
|
109
|
-
if (stopFrame < firstFrame) {
|
|
110
|
-
startOffset = 0;
|
|
111
|
-
nonSilentFramesToProcess = 0;
|
|
112
|
-
|
|
113
|
-
if (onendedCallback_) {
|
|
114
|
-
onendedCallback_(getStopTime());
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
playbackState_ = PlaybackState::FINISHED;
|
|
118
|
-
disable();
|
|
119
|
-
return;
|
|
120
|
-
}
|
|
121
|
-
|
|
122
113
|
// normal "mid-buffer" playback
|
|
123
114
|
startOffset = 0;
|
|
124
115
|
nonSilentFramesToProcess = framesToProcess;
|
|
125
116
|
}
|
|
126
117
|
|
|
127
118
|
void AudioScheduledSourceNode::handleStopScheduled() {
|
|
128
|
-
if (
|
|
119
|
+
if (isStopScheduled()) {
|
|
129
120
|
if (onendedCallback_) {
|
|
130
121
|
onendedCallback_(getStopTime());
|
|
131
122
|
}
|
|
123
|
+
|
|
132
124
|
playbackState_ = PlaybackState::FINISHED;
|
|
133
125
|
disable();
|
|
134
126
|
}
|
|
@@ -18,7 +18,7 @@ namespace audioapi {
|
|
|
18
18
|
|
|
19
19
|
class AudioScheduledSourceNode : public AudioNode {
|
|
20
20
|
public:
|
|
21
|
-
enum class PlaybackState { UNSCHEDULED, SCHEDULED, PLAYING, FINISHED };
|
|
21
|
+
enum class PlaybackState { UNSCHEDULED, SCHEDULED, PLAYING, FINISHED, STOP_SCHEDULED };
|
|
22
22
|
explicit AudioScheduledSourceNode(BaseAudioContext *context);
|
|
23
23
|
|
|
24
24
|
void start(double when);
|
|
@@ -28,6 +28,7 @@ class AudioScheduledSourceNode : public AudioNode {
|
|
|
28
28
|
bool isScheduled();
|
|
29
29
|
bool isPlaying();
|
|
30
30
|
bool isFinished();
|
|
31
|
+
bool isStopScheduled();
|
|
31
32
|
|
|
32
33
|
void setOnendedCallback(const std::function<void(double)> &onendedCallback);
|
|
33
34
|
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
namespace audioapi {
|
|
6
6
|
|
|
7
7
|
AudioNodeManager::~AudioNodeManager() {
|
|
8
|
-
|
|
8
|
+
cleanup();
|
|
9
9
|
}
|
|
10
10
|
|
|
11
11
|
void AudioNodeManager::addPendingConnection(
|
|
@@ -18,12 +18,10 @@ void AudioNodeManager::addPendingConnection(
|
|
|
18
18
|
}
|
|
19
19
|
|
|
20
20
|
void AudioNodeManager::preProcessGraph() {
|
|
21
|
-
if (
|
|
22
|
-
|
|
21
|
+
if (auto locker = Locker::tryLock(getGraphLock())) {
|
|
22
|
+
settlePendingConnections();
|
|
23
|
+
prepareNodesForDestruction();
|
|
23
24
|
}
|
|
24
|
-
|
|
25
|
-
settlePendingConnections();
|
|
26
|
-
prepareNodesForDestruction();
|
|
27
25
|
}
|
|
28
26
|
|
|
29
27
|
std::mutex &AudioNodeManager::getGraphLock() {
|
|
@@ -32,19 +30,19 @@ std::mutex &AudioNodeManager::getGraphLock() {
|
|
|
32
30
|
|
|
33
31
|
void AudioNodeManager::addNode(const std::shared_ptr<AudioNode> &node) {
|
|
34
32
|
Locker lock(getGraphLock());
|
|
35
|
-
|
|
36
33
|
nodes_.insert(node);
|
|
37
34
|
}
|
|
38
35
|
|
|
39
36
|
void AudioNodeManager::settlePendingConnections() {
|
|
40
|
-
for (auto
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
37
|
+
for (auto it = audioNodesToConnect_.begin(), end = audioNodesToConnect_.end();
|
|
38
|
+
it != end;
|
|
39
|
+
++it) {
|
|
40
|
+
std::shared_ptr<AudioNode> from = std::get<0>(*it);
|
|
41
|
+
std::shared_ptr<AudioNode> to = std::get<1>(*it);
|
|
42
|
+
ConnectionType type = std::get<2>(*it);
|
|
44
43
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
}
|
|
44
|
+
assert(from != nullptr);
|
|
45
|
+
assert(to != nullptr);
|
|
48
46
|
|
|
49
47
|
if (type == ConnectionType::CONNECT) {
|
|
50
48
|
from->connectNode(to);
|
|
@@ -57,8 +55,11 @@ void AudioNodeManager::settlePendingConnections() {
|
|
|
57
55
|
}
|
|
58
56
|
|
|
59
57
|
void AudioNodeManager::prepareNodesForDestruction() {
|
|
60
|
-
|
|
58
|
+
auto it = nodes_.begin();
|
|
59
|
+
|
|
60
|
+
while (it != nodes_.end()) {
|
|
61
61
|
if (it->use_count() == 1) {
|
|
62
|
+
assert(it->get()->inputNodes_.empty());
|
|
62
63
|
it->get()->cleanup();
|
|
63
64
|
it = nodes_.erase(it);
|
|
64
65
|
} else {
|
|
@@ -70,8 +71,8 @@ void AudioNodeManager::prepareNodesForDestruction() {
|
|
|
70
71
|
void AudioNodeManager::cleanup() {
|
|
71
72
|
Locker lock(getGraphLock());
|
|
72
73
|
|
|
73
|
-
for (auto
|
|
74
|
-
|
|
74
|
+
for (auto it = nodes_.begin(), end = nodes_.end(); it != end; ++it) {
|
|
75
|
+
it->get()->cleanup();
|
|
75
76
|
}
|
|
76
77
|
|
|
77
78
|
nodes_.clear();
|
|
@@ -7,6 +7,12 @@ AudioArray::AudioArray(size_t size) : data_(nullptr), size_(size) {
|
|
|
7
7
|
resize(size);
|
|
8
8
|
}
|
|
9
9
|
|
|
10
|
+
AudioArray::AudioArray(const AudioArray &other) : data_(nullptr), size_(0) {
|
|
11
|
+
resize(other.size_);
|
|
12
|
+
|
|
13
|
+
copy(&other);
|
|
14
|
+
}
|
|
15
|
+
|
|
10
16
|
AudioArray::~AudioArray() {
|
|
11
17
|
if (data_) {
|
|
12
18
|
delete[] data_;
|
|
@@ -22,6 +22,18 @@ AudioBus::AudioBus(size_t size, int numberOfChannels, float sampleRate)
|
|
|
22
22
|
createChannels();
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
+
AudioBus::AudioBus(const AudioBus &other) {
|
|
26
|
+
numberOfChannels_ = other.numberOfChannels_;
|
|
27
|
+
sampleRate_ = other.sampleRate_;
|
|
28
|
+
size_ = other.size_;
|
|
29
|
+
|
|
30
|
+
createChannels();
|
|
31
|
+
|
|
32
|
+
for (int i = 0; i < numberOfChannels_; i += 1) {
|
|
33
|
+
channels_[i] = std::make_shared<AudioArray>(*other.channels_[i]);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
25
37
|
AudioBus::~AudioBus() {
|
|
26
38
|
channels_.clear();
|
|
27
39
|
}
|
|
@@ -25,6 +25,7 @@ class AudioBus {
|
|
|
25
25
|
};
|
|
26
26
|
|
|
27
27
|
explicit AudioBus(size_t size, int numberOfChannels, float sampleRate);
|
|
28
|
+
AudioBus(const AudioBus &other);
|
|
28
29
|
|
|
29
30
|
~AudioBus();
|
|
30
31
|
|
|
@@ -34,8 +35,8 @@ class AudioBus {
|
|
|
34
35
|
[[nodiscard]] AudioArray *getChannel(int index) const;
|
|
35
36
|
[[nodiscard]] AudioArray *getChannelByType(int channelType) const;
|
|
36
37
|
|
|
37
|
-
|
|
38
|
-
|
|
38
|
+
AudioArray &operator[](size_t index);
|
|
39
|
+
const AudioArray &operator[](size_t index) const;
|
|
39
40
|
|
|
40
41
|
void normalize();
|
|
41
42
|
void scale(float value);
|
|
@@ -16,8 +16,7 @@ typedef void (^RenderAudioBlock)(AudioBufferList *outputBuffer, int numFrames);
|
|
|
16
16
|
@property (nonatomic, assign) float sampleRate;
|
|
17
17
|
@property (nonatomic, assign) int channelCount;
|
|
18
18
|
@property (nonatomic, assign) bool isRunning;
|
|
19
|
-
@property (nonatomic,
|
|
20
|
-
@property (nonatomic, assign) bool configurationChanged;
|
|
19
|
+
@property (nonatomic, strong) AVAudioSourceNodeRenderBlock renderBlock;
|
|
21
20
|
|
|
22
21
|
- (instancetype)initWithRenderAudioBlock:(RenderAudioBlock)renderAudio channelCount:(int)channelCount;
|
|
23
22
|
|
|
@@ -39,11 +38,6 @@ typedef void (^RenderAudioBlock)(AudioBufferList *outputBuffer, int numFrames);
|
|
|
39
38
|
|
|
40
39
|
- (void)setupAndInitAudioSession;
|
|
41
40
|
|
|
42
|
-
- (void)setupAndInitNotificationHandlers;
|
|
43
|
-
|
|
44
41
|
- (void)connectAudioEngine;
|
|
45
42
|
|
|
46
|
-
- (void)handleEngineConfigurationChange:(NSNotification *)notification;
|
|
47
|
-
- (void)handleInterruption:(NSNotification *)notification;
|
|
48
|
-
|
|
49
43
|
@end
|
|
@@ -9,29 +9,25 @@
|
|
|
9
9
|
self.audioEngine = [[AVAudioEngine alloc] init];
|
|
10
10
|
self.audioEngine.mainMixerNode.outputVolume = 1;
|
|
11
11
|
self.isRunning = true;
|
|
12
|
-
self.isInterrupted = false;
|
|
13
|
-
self.configurationChanged = false;
|
|
14
12
|
|
|
15
13
|
[self setupAndInitAudioSession];
|
|
16
|
-
[self setupAndInitNotificationHandlers];
|
|
17
14
|
|
|
18
15
|
self.sampleRate = [self.audioSession sampleRate];
|
|
19
16
|
self.channelCount = channelCount;
|
|
20
17
|
|
|
21
|
-
_format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:self.channelCount];
|
|
22
|
-
|
|
23
18
|
__weak typeof(self) weakSelf = self;
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
19
|
+
self.renderBlock = ^OSStatus(
|
|
20
|
+
BOOL *isSilence, const AudioTimeStamp *timestamp, AVAudioFrameCount frameCount, AudioBufferList *outputData) {
|
|
21
|
+
if (outputData->mNumberBuffers != weakSelf.channelCount) {
|
|
22
|
+
return kAudioServicesBadPropertySizeError;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
weakSelf.renderAudio(outputData, frameCount);
|
|
26
|
+
return kAudioServicesNoError;
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
_format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:self.channelCount];
|
|
30
|
+
_sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format renderBlock:self.renderBlock];
|
|
35
31
|
}
|
|
36
32
|
|
|
37
33
|
return self;
|
|
@@ -46,29 +42,25 @@
|
|
|
46
42
|
self.audioEngine = [[AVAudioEngine alloc] init];
|
|
47
43
|
self.audioEngine.mainMixerNode.outputVolume = 1;
|
|
48
44
|
self.isRunning = true;
|
|
49
|
-
self.isInterrupted = false;
|
|
50
|
-
self.configurationChanged = false;
|
|
51
45
|
|
|
52
46
|
[self setupAndInitAudioSession];
|
|
53
|
-
[self setupAndInitNotificationHandlers];
|
|
54
47
|
|
|
55
48
|
self.sampleRate = sampleRate;
|
|
56
49
|
self.channelCount = channelCount;
|
|
57
50
|
|
|
58
|
-
_format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:self.channelCount];
|
|
59
|
-
|
|
60
51
|
__weak typeof(self) weakSelf = self;
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
52
|
+
self.renderBlock = ^OSStatus(
|
|
53
|
+
BOOL *isSilence, const AudioTimeStamp *timestamp, AVAudioFrameCount frameCount, AudioBufferList *outputData) {
|
|
54
|
+
if (outputData->mNumberBuffers != weakSelf.channelCount) {
|
|
55
|
+
return kAudioServicesBadPropertySizeError;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
weakSelf.renderAudio(outputData, frameCount);
|
|
59
|
+
return kAudioServicesNoError;
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
_format = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:self.sampleRate channels:self.channelCount];
|
|
63
|
+
_sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format renderBlock:self.renderBlock];
|
|
72
64
|
}
|
|
73
65
|
|
|
74
66
|
return self;
|
|
@@ -122,20 +114,6 @@
|
|
|
122
114
|
self.renderAudio = nil;
|
|
123
115
|
}
|
|
124
116
|
|
|
125
|
-
- (OSStatus)renderCallbackWithIsSilence:(BOOL *)isSilence
|
|
126
|
-
timestamp:(const AudioTimeStamp *)timestamp
|
|
127
|
-
frameCount:(AVAudioFrameCount)frameCount
|
|
128
|
-
outputData:(AudioBufferList *)outputData
|
|
129
|
-
{
|
|
130
|
-
if (outputData->mNumberBuffers < self.channelCount) {
|
|
131
|
-
return noErr; // Ensure we have stereo output
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
self.renderAudio(outputData, frameCount);
|
|
135
|
-
|
|
136
|
-
return noErr;
|
|
137
|
-
}
|
|
138
|
-
|
|
139
117
|
- (void)setupAndInitAudioSession
|
|
140
118
|
{
|
|
141
119
|
NSError *error = nil;
|
|
@@ -144,17 +122,20 @@
|
|
|
144
122
|
self.audioSession = [AVAudioSession sharedInstance];
|
|
145
123
|
}
|
|
146
124
|
|
|
147
|
-
[self.audioSession
|
|
125
|
+
[self.audioSession setCategory:AVAudioSessionCategoryPlayback
|
|
126
|
+
mode:AVAudioSessionModeDefault
|
|
127
|
+
options:AVAudioSessionCategoryOptionDuckOthers | AVAudioSessionCategoryOptionAllowAirPlay
|
|
128
|
+
error:&error];
|
|
148
129
|
|
|
149
130
|
if (error != nil) {
|
|
150
|
-
NSLog(@"Error while
|
|
131
|
+
NSLog(@"Error while configuring audio session: %@", [error debugDescription]);
|
|
151
132
|
return;
|
|
152
133
|
}
|
|
153
134
|
|
|
154
|
-
[self.audioSession
|
|
135
|
+
[self.audioSession setPreferredIOBufferDuration:0.022 error:&error];
|
|
155
136
|
|
|
156
137
|
if (error != nil) {
|
|
157
|
-
NSLog(@"Error while
|
|
138
|
+
NSLog(@"Error while setting buffer size in audio session: %@", [error debugDescription]);
|
|
158
139
|
return;
|
|
159
140
|
}
|
|
160
141
|
|
|
@@ -164,24 +145,6 @@
|
|
|
164
145
|
NSLog(@"Error while activating audio session: %@", [error debugDescription]);
|
|
165
146
|
return;
|
|
166
147
|
}
|
|
167
|
-
|
|
168
|
-
self.isInterrupted = false;
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
- (void)setupAndInitNotificationHandlers
|
|
172
|
-
{
|
|
173
|
-
if (!self.notificationCenter) {
|
|
174
|
-
self.notificationCenter = [NSNotificationCenter defaultCenter];
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
[self.notificationCenter addObserver:self
|
|
178
|
-
selector:@selector(handleEngineConfigurationChange:)
|
|
179
|
-
name:AVAudioEngineConfigurationChangeNotification
|
|
180
|
-
object:nil];
|
|
181
|
-
[self.notificationCenter addObserver:self
|
|
182
|
-
selector:@selector(handleInterruption:)
|
|
183
|
-
name:AVAudioSessionInterruptionNotification
|
|
184
|
-
object:nil];
|
|
185
148
|
}
|
|
186
149
|
|
|
187
150
|
- (void)connectAudioEngine
|
|
@@ -200,44 +163,6 @@
|
|
|
200
163
|
NSLog(@"Error starting audio engine: %@", [error debugDescription]);
|
|
201
164
|
}
|
|
202
165
|
}
|
|
203
|
-
|
|
204
|
-
self.configurationChanged = false;
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
- (void)handleEngineConfigurationChange:(NSNotification *)notification
|
|
208
|
-
{
|
|
209
|
-
if (!self.isRunning || self.isInterrupted) {
|
|
210
|
-
self.configurationChanged = true;
|
|
211
|
-
return;
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
dispatch_async(dispatch_get_main_queue(), ^{
|
|
215
|
-
[self connectAudioEngine];
|
|
216
|
-
});
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
- (void)handleInterruption:(NSNotification *)notification
|
|
220
|
-
{
|
|
221
|
-
NSError *error;
|
|
222
|
-
UInt8 type = [[notification.userInfo valueForKey:AVAudioSessionInterruptionTypeKey] intValue];
|
|
223
|
-
UInt8 option = [[notification.userInfo valueForKey:AVAudioSessionInterruptionOptionKey] intValue];
|
|
224
|
-
|
|
225
|
-
if (type == AVAudioSessionInterruptionTypeBegan) {
|
|
226
|
-
self.isInterrupted = true;
|
|
227
|
-
return;
|
|
228
|
-
}
|
|
229
|
-
|
|
230
|
-
if (type != AVAudioSessionInterruptionTypeEnded || option != AVAudioSessionInterruptionOptionShouldResume) {
|
|
231
|
-
return;
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
[self setupAndInitAudioSession];
|
|
235
|
-
|
|
236
|
-
if (self.configurationChanged && self.isRunning) {
|
|
237
|
-
dispatch_async(dispatch_get_main_queue(), ^{
|
|
238
|
-
[self connectAudioEngine];
|
|
239
|
-
});
|
|
240
|
-
}
|
|
241
166
|
}
|
|
242
167
|
|
|
243
168
|
@end
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "react-native-audio-api",
|
|
3
|
-
"version": "0.5.
|
|
3
|
+
"version": "0.5.5",
|
|
4
4
|
"description": "react-native-audio-api provides system for controlling audio in React Native environment compatible with Web Audio API specification",
|
|
5
5
|
"bin": {
|
|
6
6
|
"setup-rn-audio-api-web": "./scripts/setup-rn-audio-api-web.js"
|