react-native-audio-api 0.11.0-nightly-010ea11-20251110 → 0.11.0-nightly-141c86f-20251112
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +6 -4
- package/common/cpp/audioapi/HostObjects/AudioContextHostObject.cpp +6 -2
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +16 -2
- package/common/cpp/audioapi/HostObjects/OfflineAudioContextHostObject.cpp +3 -1
- package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.cpp +14 -9
- package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.h +1 -1
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.cpp +19 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.h +3 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp +2 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferSourceNodeHostObject.cpp +4 -1
- package/common/cpp/audioapi/core/AudioContext.cpp +19 -24
- package/common/cpp/audioapi/core/AudioContext.h +2 -2
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +16 -0
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +23 -3
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +16 -3
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +5 -5
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +10 -0
- package/common/cpp/audioapi/dsp/VectorMath.cpp +15 -15
- package/common/cpp/test/src/biquad/BiquadFilterTest.cpp +5 -5
- package/ios/audioapi/ios/AudioAPIModule.h +6 -4
- package/ios/audioapi/ios/AudioAPIModule.mm +62 -41
- package/ios/audioapi/ios/core/IOSAudioPlayer.h +1 -1
- package/ios/audioapi/ios/core/IOSAudioPlayer.mm +33 -24
- package/ios/audioapi/ios/core/IOSAudioRecorder.h +3 -2
- package/ios/audioapi/ios/core/IOSAudioRecorder.mm +6 -4
- package/ios/audioapi/ios/core/NativeAudioPlayer.m +18 -9
- package/ios/audioapi/ios/core/NativeAudioRecorder.h +2 -1
- package/ios/audioapi/ios/core/NativeAudioRecorder.m +45 -27
- package/ios/audioapi/ios/core/utils/AudioDecoder.mm +44 -19
- package/ios/audioapi/ios/system/AudioEngine.h +4 -2
- package/ios/audioapi/ios/system/AudioEngine.mm +22 -8
- package/ios/audioapi/ios/system/AudioSessionManager.h +9 -5
- package/ios/audioapi/ios/system/AudioSessionManager.mm +51 -21
- package/ios/audioapi/ios/system/LockScreenManager.mm +137 -88
- package/ios/audioapi/ios/system/NotificationManager.mm +79 -48
- package/lib/commonjs/api.js.map +1 -1
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/commonjs/core/AudioContext.js +1 -1
- package/lib/commonjs/core/AudioContext.js.map +1 -1
- package/lib/commonjs/core/ConvolverNode.js +2 -2
- package/lib/commonjs/core/ConvolverNode.js.map +1 -1
- package/lib/commonjs/web-core/AudioContext.js +1 -1
- package/lib/commonjs/web-core/AudioContext.js.map +1 -1
- package/lib/module/api.js.map +1 -1
- package/lib/module/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/module/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/module/core/AudioContext.js +1 -1
- package/lib/module/core/AudioContext.js.map +1 -1
- package/lib/module/core/ConvolverNode.js +2 -2
- package/lib/module/core/ConvolverNode.js.map +1 -1
- package/lib/module/web-core/AudioContext.js +1 -1
- package/lib/module/web-core/AudioContext.js.map +1 -1
- package/lib/typescript/api.d.ts +1 -1
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts +1 -0
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +4 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +0 -1
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/api.ts +0 -1
- package/src/core/AudioBufferBaseSourceNode.ts +8 -0
- package/src/core/AudioContext.ts +0 -1
- package/src/core/ConvolverNode.ts +2 -2
- package/src/interfaces.ts +6 -1
- package/src/types.ts +0 -1
- package/src/web-core/AudioContext.tsx +1 -1
|
@@ -96,12 +96,11 @@ class AudioAPIModuleInstaller {
|
|
|
96
96
|
size_t count) -> jsi::Value {
|
|
97
97
|
std::shared_ptr<AudioContext> audioContext;
|
|
98
98
|
auto sampleRate = static_cast<float>(args[0].getNumber());
|
|
99
|
-
auto initSuspended = args[1].getBool();
|
|
100
99
|
|
|
101
100
|
#if RN_AUDIO_API_ENABLE_WORKLETS
|
|
102
101
|
auto runtimeRegistry = RuntimeRegistry{
|
|
103
102
|
.uiRuntime = uiRuntime,
|
|
104
|
-
.audioRuntime = worklets::extractWorkletRuntime(runtime, args[
|
|
103
|
+
.audioRuntime = worklets::extractWorkletRuntime(runtime, args[1])
|
|
105
104
|
};
|
|
106
105
|
#else
|
|
107
106
|
auto runtimeRegistry = RuntimeRegistry{};
|
|
@@ -109,7 +108,6 @@ class AudioAPIModuleInstaller {
|
|
|
109
108
|
|
|
110
109
|
audioContext = std::make_shared<AudioContext>(
|
|
111
110
|
sampleRate,
|
|
112
|
-
initSuspended,
|
|
113
111
|
audioEventHandlerRegistry,
|
|
114
112
|
runtimeRegistry);
|
|
115
113
|
AudioAPIModuleInstaller::contexts_.push_back(audioContext);
|
|
@@ -192,8 +190,12 @@ class AudioAPIModuleInstaller {
|
|
|
192
190
|
std::make_shared<AudioRecorderHostObject>(
|
|
193
191
|
audioEventHandlerRegistry, sampleRate, bufferLength);
|
|
194
192
|
|
|
195
|
-
|
|
193
|
+
auto jsiObject = jsi::Object::createFromHostObject(
|
|
196
194
|
runtime, audioRecorderHostObject);
|
|
195
|
+
jsiObject.setExternalMemoryPressure(
|
|
196
|
+
runtime, sizeof(float) * bufferLength); // rough estimate of underlying buffer
|
|
197
|
+
|
|
198
|
+
return jsiObject;
|
|
197
199
|
});
|
|
198
200
|
}
|
|
199
201
|
|
|
@@ -33,7 +33,9 @@ JSI_HOST_FUNCTION_IMPL(AudioContextHostObject, resume) {
|
|
|
33
33
|
auto promise = promiseVendor_->createAsyncPromise(
|
|
34
34
|
[audioContext = std::move(audioContext)]() {
|
|
35
35
|
auto result = audioContext->resume();
|
|
36
|
-
return [result](jsi::Runtime &runtime) {
|
|
36
|
+
return [result](jsi::Runtime &runtime) {
|
|
37
|
+
return jsi::Value(result);
|
|
38
|
+
};
|
|
37
39
|
});
|
|
38
40
|
return promise;
|
|
39
41
|
}
|
|
@@ -43,7 +45,9 @@ JSI_HOST_FUNCTION_IMPL(AudioContextHostObject, suspend) {
|
|
|
43
45
|
auto promise = promiseVendor_->createAsyncPromise(
|
|
44
46
|
[audioContext = std::move(audioContext)]() {
|
|
45
47
|
auto result = audioContext->suspend();
|
|
46
|
-
return [result](jsi::Runtime &runtime) {
|
|
48
|
+
return [result](jsi::Runtime &runtime) {
|
|
49
|
+
return jsi::Value(result);
|
|
50
|
+
};
|
|
47
51
|
});
|
|
48
52
|
|
|
49
53
|
return promise;
|
|
@@ -121,7 +121,13 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createWorkletNode) {
|
|
|
121
121
|
shouldLockRuntime);
|
|
122
122
|
auto workletNodeHostObject =
|
|
123
123
|
std::make_shared<WorkletNodeHostObject>(workletNode);
|
|
124
|
-
|
|
124
|
+
auto jsiObject =
|
|
125
|
+
jsi::Object::createFromHostObject(runtime, workletNodeHostObject);
|
|
126
|
+
jsiObject.setExternalMemoryPressure(
|
|
127
|
+
runtime,
|
|
128
|
+
sizeof(float) * bufferLength *
|
|
129
|
+
inputChannelCount); // rough estimate of underlying buffer
|
|
130
|
+
return jsiObject;
|
|
125
131
|
#endif
|
|
126
132
|
return jsi::Value::undefined();
|
|
127
133
|
}
|
|
@@ -285,6 +291,14 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConvolver) {
|
|
|
285
291
|
}
|
|
286
292
|
auto convolverHostObject =
|
|
287
293
|
std::make_shared<ConvolverNodeHostObject>(convolver);
|
|
288
|
-
|
|
294
|
+
auto jsiObject =
|
|
295
|
+
jsi::Object::createFromHostObject(runtime, convolverHostObject);
|
|
296
|
+
if (!args[0].isUndefined()) {
|
|
297
|
+
auto bufferHostObject =
|
|
298
|
+
args[0].getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
|
|
299
|
+
jsiObject.setExternalMemoryPressure(
|
|
300
|
+
runtime, bufferHostObject->getSizeInBytes());
|
|
301
|
+
}
|
|
302
|
+
return jsiObject;
|
|
289
303
|
}
|
|
290
304
|
} // namespace audioapi
|
|
@@ -20,7 +20,9 @@ JSI_HOST_FUNCTION_IMPL(OfflineAudioContextHostObject, resume) {
|
|
|
20
20
|
auto audioContext = std::static_pointer_cast<OfflineAudioContext>(context_);
|
|
21
21
|
auto promise = promiseVendor_->createAsyncPromise([audioContext]() {
|
|
22
22
|
audioContext->resume();
|
|
23
|
-
return [](jsi::Runtime &runtime) {
|
|
23
|
+
return [](jsi::Runtime &runtime) {
|
|
24
|
+
return jsi::Value::undefined();
|
|
25
|
+
};
|
|
24
26
|
});
|
|
25
27
|
|
|
26
28
|
return promise;
|
|
@@ -11,9 +11,8 @@ ConvolverNodeHostObject::ConvolverNodeHostObject(
|
|
|
11
11
|
addGetters(
|
|
12
12
|
JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, normalize),
|
|
13
13
|
JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, buffer));
|
|
14
|
-
addSetters(
|
|
15
|
-
|
|
16
|
-
JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, buffer));
|
|
14
|
+
addSetters(JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, normalize));
|
|
15
|
+
addFunctions(JSI_EXPORT_FUNCTION(ConvolverNodeHostObject, setBuffer));
|
|
17
16
|
}
|
|
18
17
|
|
|
19
18
|
JSI_PROPERTY_GETTER_IMPL(ConvolverNodeHostObject, normalize) {
|
|
@@ -25,7 +24,10 @@ JSI_PROPERTY_GETTER_IMPL(ConvolverNodeHostObject, buffer) {
|
|
|
25
24
|
auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
|
|
26
25
|
auto buffer = convolverNode->getBuffer();
|
|
27
26
|
auto bufferHostObject = std::make_shared<AudioBufferHostObject>(buffer);
|
|
28
|
-
|
|
27
|
+
auto jsiObject = jsi::Object::createFromHostObject(runtime, bufferHostObject);
|
|
28
|
+
jsiObject.setExternalMemoryPressure(
|
|
29
|
+
runtime, bufferHostObject->getSizeInBytes() + 16);
|
|
30
|
+
return jsiObject;
|
|
29
31
|
}
|
|
30
32
|
|
|
31
33
|
JSI_PROPERTY_SETTER_IMPL(ConvolverNodeHostObject, normalize) {
|
|
@@ -33,15 +35,18 @@ JSI_PROPERTY_SETTER_IMPL(ConvolverNodeHostObject, normalize) {
|
|
|
33
35
|
convolverNode->setNormalize(value.getBool());
|
|
34
36
|
}
|
|
35
37
|
|
|
36
|
-
|
|
38
|
+
JSI_HOST_FUNCTION_IMPL(ConvolverNodeHostObject, setBuffer) {
|
|
37
39
|
auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
|
|
38
|
-
if (
|
|
40
|
+
if (args[0].isUndefined()) {
|
|
39
41
|
convolverNode->setBuffer(nullptr);
|
|
40
|
-
return;
|
|
42
|
+
return jsi::Value::undefined();
|
|
41
43
|
}
|
|
42
44
|
|
|
43
45
|
auto bufferHostObject =
|
|
44
|
-
|
|
46
|
+
args[0].getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
|
|
45
47
|
convolverNode->setBuffer(bufferHostObject->audioBuffer_);
|
|
48
|
+
thisValue.asObject(runtime).setExternalMemoryPressure(
|
|
49
|
+
runtime, bufferHostObject->getSizeInBytes() + 16);
|
|
50
|
+
return jsi::Value::undefined();
|
|
46
51
|
}
|
|
47
|
-
} // namespace audioapi
|
|
52
|
+
} // namespace audioapi
|
|
@@ -15,6 +15,6 @@ class ConvolverNodeHostObject : public AudioNodeHostObject {
|
|
|
15
15
|
JSI_PROPERTY_GETTER_DECL(normalize);
|
|
16
16
|
JSI_PROPERTY_GETTER_DECL(buffer);
|
|
17
17
|
JSI_PROPERTY_SETTER_DECL(normalize);
|
|
18
|
-
|
|
18
|
+
JSI_HOST_FUNCTION_DECL(setBuffer);
|
|
19
19
|
};
|
|
20
20
|
} // namespace audioapi
|
|
@@ -20,6 +20,11 @@ AudioBufferBaseSourceNodeHostObject::AudioBufferBaseSourceNodeHostObject(
|
|
|
20
20
|
AudioBufferBaseSourceNodeHostObject, onPositionChanged),
|
|
21
21
|
JSI_EXPORT_PROPERTY_SETTER(
|
|
22
22
|
AudioBufferBaseSourceNodeHostObject, onPositionChangedInterval));
|
|
23
|
+
|
|
24
|
+
addFunctions(
|
|
25
|
+
JSI_EXPORT_FUNCTION(AudioBufferBaseSourceNodeHostObject, getInputLatency),
|
|
26
|
+
JSI_EXPORT_FUNCTION(
|
|
27
|
+
AudioBufferBaseSourceNodeHostObject, getOutputLatency));
|
|
23
28
|
}
|
|
24
29
|
|
|
25
30
|
AudioBufferBaseSourceNodeHostObject::~AudioBufferBaseSourceNodeHostObject() {
|
|
@@ -70,4 +75,18 @@ JSI_PROPERTY_SETTER_IMPL(
|
|
|
70
75
|
sourceNode->setOnPositionChangedInterval(static_cast<int>(value.getNumber()));
|
|
71
76
|
}
|
|
72
77
|
|
|
78
|
+
JSI_HOST_FUNCTION_IMPL(AudioBufferBaseSourceNodeHostObject, getInputLatency) {
|
|
79
|
+
auto audioBufferBaseSourceNode =
|
|
80
|
+
std::static_pointer_cast<AudioBufferBaseSourceNode>(node_);
|
|
81
|
+
|
|
82
|
+
return audioBufferBaseSourceNode->getInputLatency();
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
JSI_HOST_FUNCTION_IMPL(AudioBufferBaseSourceNodeHostObject, getOutputLatency) {
|
|
86
|
+
auto audioBufferBaseSourceNode =
|
|
87
|
+
std::static_pointer_cast<AudioBufferBaseSourceNode>(node_);
|
|
88
|
+
|
|
89
|
+
return audioBufferBaseSourceNode->getOutputLatency();
|
|
90
|
+
}
|
|
91
|
+
|
|
73
92
|
} // namespace audioapi
|
|
@@ -24,6 +24,9 @@ class AudioBufferBaseSourceNodeHostObject
|
|
|
24
24
|
|
|
25
25
|
JSI_PROPERTY_SETTER_DECL(onPositionChanged);
|
|
26
26
|
JSI_PROPERTY_SETTER_DECL(onPositionChangedInterval);
|
|
27
|
+
|
|
28
|
+
JSI_HOST_FUNCTION_DECL(getInputLatency);
|
|
29
|
+
JSI_HOST_FUNCTION_DECL(getOutputLatency);
|
|
27
30
|
};
|
|
28
31
|
|
|
29
32
|
} // namespace audioapi
|
|
@@ -52,6 +52,8 @@ JSI_HOST_FUNCTION_IMPL(AudioBufferHostObject, getChannelData) {
|
|
|
52
52
|
auto float32Array = float32ArrayCtor.callAsConstructor(runtime, arrayBuffer)
|
|
53
53
|
.getObject(runtime);
|
|
54
54
|
|
|
55
|
+
float32Array.setExternalMemoryPressure(runtime, audioArrayBuffer->size());
|
|
56
|
+
|
|
55
57
|
return float32Array;
|
|
56
58
|
}
|
|
57
59
|
|
|
@@ -64,7 +64,10 @@ JSI_PROPERTY_GETTER_IMPL(AudioBufferSourceNodeHostObject, buffer) {
|
|
|
64
64
|
}
|
|
65
65
|
|
|
66
66
|
auto bufferHostObject = std::make_shared<AudioBufferHostObject>(buffer);
|
|
67
|
-
|
|
67
|
+
auto jsiObject = jsi::Object::createFromHostObject(runtime, bufferHostObject);
|
|
68
|
+
jsiObject.setExternalMemoryPressure(
|
|
69
|
+
runtime, bufferHostObject->getSizeInBytes() + 16);
|
|
70
|
+
return jsiObject;
|
|
68
71
|
}
|
|
69
72
|
|
|
70
73
|
JSI_PROPERTY_GETTER_IMPL(AudioBufferSourceNodeHostObject, loopStart) {
|
|
@@ -11,7 +11,6 @@
|
|
|
11
11
|
namespace audioapi {
|
|
12
12
|
AudioContext::AudioContext(
|
|
13
13
|
float sampleRate,
|
|
14
|
-
bool initSuspended,
|
|
15
14
|
const std::shared_ptr<IAudioEventHandlerRegistry>
|
|
16
15
|
&audioEventHandlerRegistry,
|
|
17
16
|
const RuntimeRegistry &runtimeRegistry)
|
|
@@ -25,17 +24,8 @@ AudioContext::AudioContext(
|
|
|
25
24
|
#endif
|
|
26
25
|
|
|
27
26
|
sampleRate_ = sampleRate;
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
playerHasBeenStarted_ = false;
|
|
31
|
-
state_ = ContextState::SUSPENDED;
|
|
32
|
-
|
|
33
|
-
return;
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
playerHasBeenStarted_ = true;
|
|
37
|
-
audioPlayer_->start();
|
|
38
|
-
state_ = ContextState::RUNNING;
|
|
27
|
+
playerHasBeenStarted_ = false;
|
|
28
|
+
state_ = ContextState::SUSPENDED;
|
|
39
29
|
}
|
|
40
30
|
|
|
41
31
|
AudioContext::~AudioContext() {
|
|
@@ -61,22 +51,12 @@ bool AudioContext::resume() {
|
|
|
61
51
|
return true;
|
|
62
52
|
}
|
|
63
53
|
|
|
64
|
-
if (
|
|
65
|
-
if (audioPlayer_->start()) {
|
|
66
|
-
playerHasBeenStarted_ = true;
|
|
67
|
-
state_ = ContextState::RUNNING;
|
|
68
|
-
return true;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
return false;
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
if (audioPlayer_->resume()) {
|
|
54
|
+
if (playerHasBeenStarted_ && audioPlayer_->resume()) {
|
|
75
55
|
state_ = ContextState::RUNNING;
|
|
76
56
|
return true;
|
|
77
57
|
}
|
|
78
58
|
|
|
79
|
-
return
|
|
59
|
+
return start();
|
|
80
60
|
}
|
|
81
61
|
|
|
82
62
|
bool AudioContext::suspend() {
|
|
@@ -94,6 +74,21 @@ bool AudioContext::suspend() {
|
|
|
94
74
|
return true;
|
|
95
75
|
}
|
|
96
76
|
|
|
77
|
+
bool AudioContext::start() {
|
|
78
|
+
if (isClosed()) {
|
|
79
|
+
return false;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
if (!playerHasBeenStarted_ && audioPlayer_->start()) {
|
|
83
|
+
playerHasBeenStarted_ = true;
|
|
84
|
+
state_ = ContextState::RUNNING;
|
|
85
|
+
|
|
86
|
+
return true;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
return false;
|
|
90
|
+
}
|
|
91
|
+
|
|
97
92
|
std::function<void(std::shared_ptr<AudioBus>, int)>
|
|
98
93
|
AudioContext::renderAudio() {
|
|
99
94
|
return [this](const std::shared_ptr<AudioBus> &data, int frames) {
|
|
@@ -15,13 +15,13 @@ class IOSAudioPlayer;
|
|
|
15
15
|
|
|
16
16
|
class AudioContext : public BaseAudioContext {
|
|
17
17
|
public:
|
|
18
|
-
explicit AudioContext(float sampleRate,
|
|
18
|
+
explicit AudioContext(float sampleRate, const std::shared_ptr<IAudioEventHandlerRegistry> &audioEventHandlerRegistry, const RuntimeRegistry &runtimeRegistry);
|
|
19
19
|
~AudioContext() override;
|
|
20
20
|
|
|
21
21
|
void close();
|
|
22
22
|
bool resume();
|
|
23
23
|
bool suspend();
|
|
24
|
-
|
|
24
|
+
bool start();
|
|
25
25
|
|
|
26
26
|
private:
|
|
27
27
|
#ifdef ANDROID
|
|
@@ -60,6 +60,22 @@ std::mutex &AudioBufferBaseSourceNode::getBufferLock() {
|
|
|
60
60
|
return bufferLock_;
|
|
61
61
|
}
|
|
62
62
|
|
|
63
|
+
double AudioBufferBaseSourceNode::getInputLatency() const {
|
|
64
|
+
if (pitchCorrection_) {
|
|
65
|
+
return static_cast<double>(stretch_->inputLatency()) /
|
|
66
|
+
context_->getSampleRate();
|
|
67
|
+
}
|
|
68
|
+
return 0;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
double AudioBufferBaseSourceNode::getOutputLatency() const {
|
|
72
|
+
if (pitchCorrection_) {
|
|
73
|
+
return static_cast<double>(stretch_->outputLatency()) /
|
|
74
|
+
context_->getSampleRate();
|
|
75
|
+
}
|
|
76
|
+
return 0;
|
|
77
|
+
}
|
|
78
|
+
|
|
63
79
|
void AudioBufferBaseSourceNode::sendOnPositionChangedEvent() {
|
|
64
80
|
auto onPositionChangedCallbackId =
|
|
65
81
|
onPositionChangedCallbackId_.load(std::memory_order_acquire);
|
|
@@ -22,6 +22,8 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode {
|
|
|
22
22
|
void setOnPositionChangedCallbackId(uint64_t callbackId);
|
|
23
23
|
void setOnPositionChangedInterval(int interval);
|
|
24
24
|
[[nodiscard]] int getOnPositionChangedInterval() const;
|
|
25
|
+
[[nodiscard]] double getInputLatency() const;
|
|
26
|
+
[[nodiscard]] double getOutputLatency() const;
|
|
25
27
|
|
|
26
28
|
protected:
|
|
27
29
|
// pitch correction
|
|
@@ -17,6 +17,19 @@ AudioBufferQueueSourceNode::AudioBufferQueueSourceNode(
|
|
|
17
17
|
buffers_ = {};
|
|
18
18
|
stretch_->presetDefault(channelCount_, context_->getSampleRate());
|
|
19
19
|
|
|
20
|
+
if (pitchCorrection) {
|
|
21
|
+
// If pitch correction is enabled, add extra frames at the end
|
|
22
|
+
// to compensate for processing latency.
|
|
23
|
+
addExtraTailFrames_ = true;
|
|
24
|
+
|
|
25
|
+
int extraTailFrames =
|
|
26
|
+
static_cast<int>(stretch_->inputLatency() + stretch_->outputLatency());
|
|
27
|
+
tailBuffer_ = std::make_shared<AudioBuffer>(
|
|
28
|
+
channelCount_, extraTailFrames, context_->getSampleRate());
|
|
29
|
+
|
|
30
|
+
tailBuffer_->bus_->zero();
|
|
31
|
+
}
|
|
32
|
+
|
|
20
33
|
isInitialized_ = true;
|
|
21
34
|
}
|
|
22
35
|
|
|
@@ -177,10 +190,17 @@ void AudioBufferQueueSourceNode::processWithoutInterpolation(
|
|
|
177
190
|
"ended", onEndedCallbackId_, body);
|
|
178
191
|
|
|
179
192
|
if (buffers_.empty()) {
|
|
180
|
-
|
|
181
|
-
|
|
193
|
+
if (addExtraTailFrames_) {
|
|
194
|
+
buffers_.emplace(bufferId_, tailBuffer_);
|
|
195
|
+
bufferId_++;
|
|
182
196
|
|
|
183
|
-
|
|
197
|
+
addExtraTailFrames_ = false;
|
|
198
|
+
} else if (buffers_.empty()) {
|
|
199
|
+
processingBus->zero(writeIndex, framesLeft);
|
|
200
|
+
readIndex = 0;
|
|
201
|
+
|
|
202
|
+
break;
|
|
203
|
+
}
|
|
184
204
|
}
|
|
185
205
|
|
|
186
206
|
data = buffers_.front();
|
|
@@ -82,17 +82,30 @@ void AudioBufferSourceNode::setBuffer(
|
|
|
82
82
|
}
|
|
83
83
|
|
|
84
84
|
buffer_ = buffer;
|
|
85
|
-
alignedBus_ = std::make_shared<AudioBus>(*buffer_->bus_);
|
|
86
85
|
channelCount_ = buffer_->getNumberOfChannels();
|
|
87
86
|
|
|
87
|
+
stretch_->presetDefault(channelCount_, buffer_->getSampleRate());
|
|
88
|
+
|
|
89
|
+
if (pitchCorrection_) {
|
|
90
|
+
int extraTailFrames = static_cast<int>(
|
|
91
|
+
(getInputLatency() + getOutputLatency()) * context_->getSampleRate());
|
|
92
|
+
size_t totalSize = buffer_->getLength() + extraTailFrames;
|
|
93
|
+
|
|
94
|
+
alignedBus_ = std::make_shared<AudioBus>(
|
|
95
|
+
totalSize, channelCount_, buffer_->getSampleRate());
|
|
96
|
+
alignedBus_->copy(buffer_->bus_.get(), 0, 0, buffer_->getLength());
|
|
97
|
+
|
|
98
|
+
alignedBus_->zero(buffer_->getLength(), extraTailFrames);
|
|
99
|
+
} else {
|
|
100
|
+
alignedBus_ = std::make_shared<AudioBus>(*buffer_->bus_);
|
|
101
|
+
}
|
|
102
|
+
|
|
88
103
|
audioBus_ = std::make_shared<AudioBus>(
|
|
89
104
|
RENDER_QUANTUM_SIZE, channelCount_, context_->getSampleRate());
|
|
90
105
|
playbackRateBus_ = std::make_shared<AudioBus>(
|
|
91
106
|
RENDER_QUANTUM_SIZE * 3, channelCount_, context_->getSampleRate());
|
|
92
107
|
|
|
93
108
|
loopEnd_ = buffer_->getDuration();
|
|
94
|
-
|
|
95
|
-
stretch_->presetDefault(channelCount_, buffer_->getSampleRate());
|
|
96
109
|
}
|
|
97
110
|
|
|
98
111
|
void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
@@ -4,9 +4,9 @@
|
|
|
4
4
|
#include <audioapi/core/sources/AudioBufferBaseSourceNode.h>
|
|
5
5
|
#include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>
|
|
6
6
|
|
|
7
|
-
#include <memory>
|
|
8
|
-
#include <cstddef>
|
|
9
7
|
#include <algorithm>
|
|
8
|
+
#include <cstddef>
|
|
9
|
+
#include <memory>
|
|
10
10
|
#include <string>
|
|
11
11
|
|
|
12
12
|
namespace audioapi {
|
|
@@ -38,7 +38,7 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode {
|
|
|
38
38
|
void setOnLoopEndedCallbackId(uint64_t callbackId);
|
|
39
39
|
|
|
40
40
|
protected:
|
|
41
|
-
std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus
|
|
41
|
+
std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus> &processingBus, int framesToProcess) override;
|
|
42
42
|
double getCurrentPosition() const override;
|
|
43
43
|
|
|
44
44
|
private:
|
|
@@ -56,13 +56,13 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode {
|
|
|
56
56
|
void sendOnLoopEndedEvent();
|
|
57
57
|
|
|
58
58
|
void processWithoutInterpolation(
|
|
59
|
-
const std::shared_ptr<AudioBus
|
|
59
|
+
const std::shared_ptr<AudioBus> &processingBus,
|
|
60
60
|
size_t startOffset,
|
|
61
61
|
size_t offsetLength,
|
|
62
62
|
float playbackRate) override;
|
|
63
63
|
|
|
64
64
|
void processWithInterpolation(
|
|
65
|
-
const std::shared_ptr<AudioBus
|
|
65
|
+
const std::shared_ptr<AudioBus> &processingBus,
|
|
66
66
|
size_t startOffset,
|
|
67
67
|
size_t offsetLength,
|
|
68
68
|
float playbackRate) override;
|
|
@@ -6,6 +6,10 @@
|
|
|
6
6
|
#include <audioapi/utils/AudioArray.h>
|
|
7
7
|
#include <audioapi/utils/AudioBus.h>
|
|
8
8
|
|
|
9
|
+
#if !RN_AUDIO_API_TEST
|
|
10
|
+
#include <audioapi/core/AudioContext.h>
|
|
11
|
+
#endif
|
|
12
|
+
|
|
9
13
|
namespace audioapi {
|
|
10
14
|
|
|
11
15
|
AudioScheduledSourceNode::AudioScheduledSourceNode(BaseAudioContext *context)
|
|
@@ -18,6 +22,12 @@ AudioScheduledSourceNode::AudioScheduledSourceNode(BaseAudioContext *context)
|
|
|
18
22
|
}
|
|
19
23
|
|
|
20
24
|
void AudioScheduledSourceNode::start(double when) {
|
|
25
|
+
#if !RN_AUDIO_API_TEST
|
|
26
|
+
if (auto context = dynamic_cast<AudioContext *>(context_)) {
|
|
27
|
+
context->start();
|
|
28
|
+
}
|
|
29
|
+
#endif
|
|
30
|
+
|
|
21
31
|
playbackState_ = PlaybackState::SCHEDULED;
|
|
22
32
|
startTime_ = when;
|
|
23
33
|
}
|
|
@@ -508,15 +508,15 @@ void multiply(
|
|
|
508
508
|
bool source2Aligned = is16ByteAligned(inputVector2);
|
|
509
509
|
bool destAligned = is16ByteAligned(outputVector);
|
|
510
510
|
|
|
511
|
-
#define SSE2_MULT(loadInstr, storeInstr)
|
|
512
|
-
while (outputVector < endP) {
|
|
513
|
-
pSource1 = _mm_load_ps(inputVector1);
|
|
511
|
+
#define SSE2_MULT(loadInstr, storeInstr) \
|
|
512
|
+
while (outputVector < endP) { \
|
|
513
|
+
pSource1 = _mm_load_ps(inputVector1); \
|
|
514
514
|
pSource2 = _mm_##loadInstr##_ps(inputVector2); \
|
|
515
|
-
dest = _mm_mul_ps(pSource1, pSource2);
|
|
516
|
-
_mm_##storeInstr##_ps(outputVector, dest);
|
|
517
|
-
inputVector1 += 4;
|
|
518
|
-
inputVector2 += 4;
|
|
519
|
-
outputVector += 4;
|
|
515
|
+
dest = _mm_mul_ps(pSource1, pSource2); \
|
|
516
|
+
_mm_##storeInstr##_ps(outputVector, dest); \
|
|
517
|
+
inputVector1 += 4; \
|
|
518
|
+
inputVector2 += 4; \
|
|
519
|
+
outputVector += 4; \
|
|
520
520
|
}
|
|
521
521
|
|
|
522
522
|
if (source2Aligned && destAligned) // Both aligned.
|
|
@@ -647,15 +647,15 @@ void multiplyByScalarThenAddToOutput(
|
|
|
647
647
|
|
|
648
648
|
bool destAligned = is16ByteAligned(outputVector);
|
|
649
649
|
|
|
650
|
-
#define SSE2_MULT_ADD(loadInstr, storeInstr)
|
|
651
|
-
while (outputVector < endP) {
|
|
652
|
-
pSource = _mm_load_ps(inputVector);
|
|
653
|
-
temp = _mm_mul_ps(pSource, mScale);
|
|
650
|
+
#define SSE2_MULT_ADD(loadInstr, storeInstr) \
|
|
651
|
+
while (outputVector < endP) { \
|
|
652
|
+
pSource = _mm_load_ps(inputVector); \
|
|
653
|
+
temp = _mm_mul_ps(pSource, mScale); \
|
|
654
654
|
dest = _mm_##loadInstr##_ps(outputVector); \
|
|
655
|
-
dest = _mm_add_ps(dest, temp);
|
|
655
|
+
dest = _mm_add_ps(dest, temp); \
|
|
656
656
|
_mm_##storeInstr##_ps(outputVector, dest); \
|
|
657
|
-
inputVector += 4;
|
|
658
|
-
outputVector += 4;
|
|
657
|
+
inputVector += 4; \
|
|
658
|
+
outputVector += 4; \
|
|
659
659
|
}
|
|
660
660
|
|
|
661
661
|
if (destAligned)
|
|
@@ -89,18 +89,18 @@ INSTANTIATE_TEST_SUITE_P(
|
|
|
89
89
|
Frequencies,
|
|
90
90
|
BiquadFilterFrequencyTest,
|
|
91
91
|
::testing::Values(
|
|
92
|
-
0.0f,
|
|
93
|
-
10.0f,
|
|
92
|
+
0.0f, // 0 Hz - the filter should block all input signal
|
|
93
|
+
10.0f, // very low frequency
|
|
94
94
|
350.0f, // default
|
|
95
95
|
nyquistFrequency - 0.0001f, // frequency near Nyquist
|
|
96
|
-
nyquistFrequency));
|
|
96
|
+
nyquistFrequency)); // maximal frequency
|
|
97
97
|
|
|
98
98
|
INSTANTIATE_TEST_SUITE_P(
|
|
99
99
|
QEdgeCases,
|
|
100
100
|
BiquadFilterQTestLowpassHighpass,
|
|
101
101
|
::testing::Values(
|
|
102
|
-
-770.63678f,
|
|
103
|
-
0.0f,
|
|
102
|
+
-770.63678f, // min value for lowpass and highpass
|
|
103
|
+
0.0f, // default
|
|
104
104
|
770.63678f)); // max value for lowpass and highpass
|
|
105
105
|
|
|
106
106
|
INSTANTIATE_TEST_SUITE_P(
|
|
@@ -13,11 +13,12 @@
|
|
|
13
13
|
@class AudioSessionManager;
|
|
14
14
|
@class LockScreenManager;
|
|
15
15
|
|
|
16
|
-
@interface AudioAPIModule
|
|
16
|
+
@interface AudioAPIModule
|
|
17
|
+
: RCTEventEmitter
|
|
17
18
|
#ifdef RCT_NEW_ARCH_ENABLED
|
|
18
|
-
|
|
19
|
+
<NativeAudioAPIModuleSpec, RCTCallInvokerModule, RCTInvalidating>
|
|
19
20
|
#else
|
|
20
|
-
|
|
21
|
+
<RCTBridgeModule>
|
|
21
22
|
#endif // RCT_NEW_ARCH_ENABLED
|
|
22
23
|
|
|
23
24
|
@property (nonatomic, strong) AudioEngine *audioEngine;
|
|
@@ -25,6 +26,7 @@
|
|
|
25
26
|
@property (nonatomic, strong) AudioSessionManager *audioSessionManager;
|
|
26
27
|
@property (nonatomic, strong) LockScreenManager *lockScreenManager;
|
|
27
28
|
|
|
28
|
-
- (void)invokeHandlerWithEventName:(NSString *)eventName
|
|
29
|
+
- (void)invokeHandlerWithEventName:(NSString *)eventName
|
|
30
|
+
eventBody:(NSDictionary *)eventBody;
|
|
29
31
|
|
|
30
32
|
@end
|