react-native-audio-api 0.5.5 → 0.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +28 -0
- package/common/cpp/audioapi/HostObjects/OfflineAudioContextHostObject.h +70 -0
- package/common/cpp/audioapi/core/OfflineAudioContext.cpp +117 -0
- package/common/cpp/audioapi/core/OfflineAudioContext.h +40 -0
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +3 -3
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +28 -2
- package/common/cpp/audioapi/core/utils/AudioNodeDestructor.cpp +53 -0
- package/common/cpp/audioapi/core/utils/AudioNodeDestructor.h +33 -0
- package/common/cpp/audioapi/core/utils/AudioNodeManager.cpp +13 -10
- package/common/cpp/audioapi/core/utils/AudioNodeManager.h +3 -0
- package/common/cpp/audioapi/libs/signalsmith-stretch/fft-accelerate.h +326 -0
- package/common/cpp/audioapi/libs/signalsmith-stretch/fft.h +1257 -413
- package/common/cpp/audioapi/libs/signalsmith-stretch/signalsmith-stretch.h +398 -232
- package/common/cpp/audioapi/libs/signalsmith-stretch/stft.h +625 -0
- package/lib/module/api.js +2 -1
- package/lib/module/api.js.map +1 -1
- package/lib/module/api.web.js +1 -0
- package/lib/module/api.web.js.map +1 -1
- package/lib/module/core/OfflineAudioContext.js +57 -0
- package/lib/module/core/OfflineAudioContext.js.map +1 -0
- package/lib/module/web-core/OfflineAudioContext.js +90 -0
- package/lib/module/web-core/OfflineAudioContext.js.map +1 -0
- package/lib/typescript/api.d.ts +3 -1
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/api.web.d.ts +1 -0
- package/lib/typescript/api.web.d.ts.map +1 -1
- package/lib/typescript/core/OfflineAudioContext.d.ts +14 -0
- package/lib/typescript/core/OfflineAudioContext.d.ts.map +1 -0
- package/lib/typescript/interfaces.d.ts +6 -0
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +5 -0
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/OfflineAudioContext.d.ts +34 -0
- package/lib/typescript/web-core/OfflineAudioContext.d.ts.map +1 -0
- package/package.json +1 -1
- package/src/api.ts +11 -2
- package/src/api.web.ts +1 -0
- package/src/core/OfflineAudioContext.ts +94 -0
- package/src/interfaces.ts +11 -0
- package/src/types.ts +6 -0
- package/src/web-core/OfflineAudioContext.tsx +163 -0
- package/common/cpp/audioapi/libs/signalsmith-stretch/delay.h +0 -715
- package/common/cpp/audioapi/libs/signalsmith-stretch/perf.h +0 -82
- package/common/cpp/audioapi/libs/signalsmith-stretch/spectral.h +0 -493
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
#include <audioapi/jsi/JsiPromise.h>
|
|
4
4
|
#include <audioapi/core/AudioContext.h>
|
|
5
|
+
#include <audioapi/core/OfflineAudioContext.h>
|
|
5
6
|
#include <audioapi/HostObjects/AudioContextHostObject.h>
|
|
7
|
+
#include <audioapi/HostObjects/OfflineAudioContextHostObject.h>
|
|
6
8
|
|
|
7
9
|
#include <memory>
|
|
8
10
|
|
|
@@ -14,8 +16,11 @@ class AudioAPIModuleInstaller {
|
|
|
14
16
|
public:
|
|
15
17
|
static void injectJSIBindings(jsi::Runtime *jsiRuntime, const std::shared_ptr<react::CallInvoker> &jsCallInvoker) {
|
|
16
18
|
auto createAudioContext = getCreateAudioContextFunction(jsiRuntime, jsCallInvoker);
|
|
19
|
+
auto createOfflineAudioContext = getCreateOfflineAudioContextFunction(jsiRuntime, jsCallInvoker);
|
|
17
20
|
jsiRuntime->global().setProperty(
|
|
18
21
|
*jsiRuntime, "createAudioContext", createAudioContext);
|
|
22
|
+
jsiRuntime->global().setProperty(
|
|
23
|
+
*jsiRuntime, "createOfflineAudioContext", createOfflineAudioContext);
|
|
19
24
|
}
|
|
20
25
|
|
|
21
26
|
private:
|
|
@@ -44,6 +49,29 @@ class AudioAPIModuleInstaller {
|
|
|
44
49
|
runtime, audioContextHostObject);
|
|
45
50
|
});
|
|
46
51
|
}
|
|
52
|
+
|
|
53
|
+
static jsi::Function getCreateOfflineAudioContextFunction(jsi::Runtime *jsiRuntime, const std::shared_ptr<react::CallInvoker> &jsCallInvoker) {
|
|
54
|
+
return jsi::Function::createFromHostFunction(
|
|
55
|
+
*jsiRuntime,
|
|
56
|
+
jsi::PropNameID::forAscii(*jsiRuntime, "createOfflineAudioContext"),
|
|
57
|
+
0,
|
|
58
|
+
[jsiRuntime, jsCallInvoker](
|
|
59
|
+
jsi::Runtime &runtime,
|
|
60
|
+
const jsi::Value &thisValue,
|
|
61
|
+
const jsi::Value *args,
|
|
62
|
+
size_t count) -> jsi::Value {
|
|
63
|
+
auto numberOfChannels = static_cast<int>(args[0].getNumber());
|
|
64
|
+
auto length = static_cast<size_t>(args[1].getNumber());
|
|
65
|
+
auto sampleRate = static_cast<float>(args[2].getNumber());
|
|
66
|
+
|
|
67
|
+
std::shared_ptr<OfflineAudioContext> offlineAudioContext = std::make_shared<OfflineAudioContext>(numberOfChannels, length, sampleRate);
|
|
68
|
+
auto audioContextHostObject = std::make_shared<OfflineAudioContextHostObject>(
|
|
69
|
+
offlineAudioContext, jsiRuntime, jsCallInvoker);
|
|
70
|
+
|
|
71
|
+
return jsi::Object::createFromHostObject(
|
|
72
|
+
runtime, audioContextHostObject);
|
|
73
|
+
});
|
|
74
|
+
}
|
|
47
75
|
};
|
|
48
76
|
|
|
49
77
|
} // namespace audioapi
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include <audioapi/core/OfflineAudioContext.h>
|
|
4
|
+
#include <audioapi/HostObjects/BaseAudioContextHostObject.h>
|
|
5
|
+
|
|
6
|
+
#include <jsi/jsi.h>
|
|
7
|
+
#include <memory>
|
|
8
|
+
#include <utility>
|
|
9
|
+
#include <vector>
|
|
10
|
+
|
|
11
|
+
namespace audioapi {
|
|
12
|
+
using namespace facebook;
|
|
13
|
+
|
|
14
|
+
class OfflineAudioContextHostObject : public BaseAudioContextHostObject {
|
|
15
|
+
public:
|
|
16
|
+
explicit OfflineAudioContextHostObject(
|
|
17
|
+
const std::shared_ptr<OfflineAudioContext> &offlineAudioContext,
|
|
18
|
+
jsi::Runtime *runtime,
|
|
19
|
+
const std::shared_ptr<react::CallInvoker> &callInvoker)
|
|
20
|
+
: BaseAudioContextHostObject(offlineAudioContext, runtime, callInvoker) {
|
|
21
|
+
addFunctions(
|
|
22
|
+
JSI_EXPORT_FUNCTION(OfflineAudioContextHostObject, resume),
|
|
23
|
+
JSI_EXPORT_FUNCTION(OfflineAudioContextHostObject, suspend),
|
|
24
|
+
JSI_EXPORT_FUNCTION(OfflineAudioContextHostObject, startRendering));
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
JSI_HOST_FUNCTION(resume) {
|
|
28
|
+
auto promise = promiseVendor_->createPromise([this](const std::shared_ptr<Promise>& promise) {
|
|
29
|
+
auto audioContext = std::static_pointer_cast<OfflineAudioContext>(context_);
|
|
30
|
+
audioContext->resume();
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
return promise;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
JSI_HOST_FUNCTION(suspend) {
|
|
37
|
+
double when = args[0].getNumber();
|
|
38
|
+
|
|
39
|
+
auto promise = promiseVendor_->createPromise([this, when](const std::shared_ptr<Promise>& promise) {
|
|
40
|
+
auto audioContext = std::static_pointer_cast<OfflineAudioContext>(context_);
|
|
41
|
+
OfflineAudioContextSuspendCallback callback = [promise]() {
|
|
42
|
+
promise->resolve([](jsi::Runtime &runtime) {
|
|
43
|
+
return jsi::Value::undefined();
|
|
44
|
+
});
|
|
45
|
+
};
|
|
46
|
+
audioContext->suspend(when, callback);
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
return promise;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
JSI_HOST_FUNCTION(startRendering) {
|
|
53
|
+
auto promise = promiseVendor_->createPromise([this](const std::shared_ptr<Promise>& promise) {
|
|
54
|
+
auto audioContext = std::static_pointer_cast<OfflineAudioContext>(context_);
|
|
55
|
+
|
|
56
|
+
OfflineAudioContextResultCallback callback =
|
|
57
|
+
[promise](const std::shared_ptr<AudioBuffer>& audioBuffer) -> void {
|
|
58
|
+
auto audioBufferHostObject = std::make_shared<AudioBufferHostObject>(audioBuffer);
|
|
59
|
+
promise->resolve([audioBufferHostObject = std::move(audioBufferHostObject)](jsi::Runtime &runtime) {
|
|
60
|
+
return jsi::Object::createFromHostObject(runtime, audioBufferHostObject);
|
|
61
|
+
});
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
audioContext->startRendering(callback);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
return promise;
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
} // namespace audioapi
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#include "OfflineAudioContext.h"
|
|
2
|
+
|
|
3
|
+
#include <audioapi/core/AudioContext.h>
|
|
4
|
+
#include <audioapi/core/Constants.h>
|
|
5
|
+
#include <audioapi/core/destinations/AudioDestinationNode.h>
|
|
6
|
+
#include <audioapi/core/sources/AudioBuffer.h>
|
|
7
|
+
#include <audioapi/core/utils/AudioDecoder.h>
|
|
8
|
+
#include <audioapi/core/utils/AudioNodeManager.h>
|
|
9
|
+
#include <audioapi/core/utils/Locker.h>
|
|
10
|
+
#include <audioapi/utils/AudioArray.h>
|
|
11
|
+
#include <audioapi/utils/AudioBus.h>
|
|
12
|
+
|
|
13
|
+
#include <algorithm>
|
|
14
|
+
#include <cassert>
|
|
15
|
+
#include <iostream>
|
|
16
|
+
#include <thread>
|
|
17
|
+
#include <utility>
|
|
18
|
+
|
|
19
|
+
namespace audioapi {
|
|
20
|
+
|
|
21
|
+
OfflineAudioContext::OfflineAudioContext(
|
|
22
|
+
int numberOfChannels,
|
|
23
|
+
size_t length,
|
|
24
|
+
float sampleRate)
|
|
25
|
+
: BaseAudioContext(),
|
|
26
|
+
length_(length),
|
|
27
|
+
numberOfChannels_(numberOfChannels),
|
|
28
|
+
currentSampleFrame_(0) {
|
|
29
|
+
sampleRate_ = sampleRate;
|
|
30
|
+
audioDecoder_ = std::make_shared<AudioDecoder>(sampleRate_);
|
|
31
|
+
resultBus_ = std::make_shared<AudioBus>(
|
|
32
|
+
static_cast<int>(length_), numberOfChannels_, sampleRate_);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
OfflineAudioContext::~OfflineAudioContext() {
|
|
36
|
+
nodeManager_->cleanup();
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
void OfflineAudioContext::resume() {
|
|
40
|
+
Locker locker(mutex_);
|
|
41
|
+
|
|
42
|
+
if (state_ == ContextState::RUNNING) {
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
renderAudio();
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
void OfflineAudioContext::suspend(
|
|
50
|
+
double when,
|
|
51
|
+
const std::function<void()> &callback) {
|
|
52
|
+
Locker locker(mutex_);
|
|
53
|
+
|
|
54
|
+
// we can only suspend once per render quantum at the end of the quantum
|
|
55
|
+
// first quantum is [0, RENDER_QUANTUM_SIZE)
|
|
56
|
+
auto frame = static_cast<size_t>(when * sampleRate_);
|
|
57
|
+
frame = RENDER_QUANTUM_SIZE *
|
|
58
|
+
((frame + RENDER_QUANTUM_SIZE - 1) / RENDER_QUANTUM_SIZE);
|
|
59
|
+
|
|
60
|
+
if (scheduledSuspends_.find(frame) != scheduledSuspends_.end()) {
|
|
61
|
+
throw std::runtime_error(
|
|
62
|
+
"cannot schedule more than one suspend at frame " +
|
|
63
|
+
std::to_string(frame) + " (" + std::to_string(when) + " seconds)");
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
scheduledSuspends_.emplace(frame, callback);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
void OfflineAudioContext::renderAudio() {
|
|
70
|
+
state_ = ContextState::RUNNING;
|
|
71
|
+
std::thread([this]() {
|
|
72
|
+
auto audioBus = std::make_shared<AudioBus>(
|
|
73
|
+
RENDER_QUANTUM_SIZE, numberOfChannels_, sampleRate_);
|
|
74
|
+
|
|
75
|
+
while (currentSampleFrame_ < length_) {
|
|
76
|
+
Locker locker(mutex_);
|
|
77
|
+
int framesToProcess = std::min(
|
|
78
|
+
static_cast<int>(length_ - currentSampleFrame_), RENDER_QUANTUM_SIZE);
|
|
79
|
+
|
|
80
|
+
destination_->renderAudio(audioBus, framesToProcess);
|
|
81
|
+
|
|
82
|
+
for (int i = 0; i < framesToProcess; i++) {
|
|
83
|
+
for (int channel = 0; channel < numberOfChannels_; channel += 1) {
|
|
84
|
+
resultBus_->getChannel(channel)->getData()[currentSampleFrame_ + i] =
|
|
85
|
+
audioBus->getChannel(channel)->getData()[i];
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
currentSampleFrame_ += framesToProcess;
|
|
90
|
+
|
|
91
|
+
// Execute scheduled suspend if exists
|
|
92
|
+
auto suspend = scheduledSuspends_.find(currentSampleFrame_);
|
|
93
|
+
if (suspend != scheduledSuspends_.end()) {
|
|
94
|
+
assert(currentSampleFrame_ < length_);
|
|
95
|
+
auto callback = suspend->second;
|
|
96
|
+
scheduledSuspends_.erase(currentSampleFrame_);
|
|
97
|
+
state_ = ContextState::SUSPENDED;
|
|
98
|
+
callback();
|
|
99
|
+
return;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Rendering completed
|
|
104
|
+
auto buffer = std::make_shared<AudioBuffer>(resultBus_);
|
|
105
|
+
resultCallback_(buffer);
|
|
106
|
+
}).detach();
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
void OfflineAudioContext::startRendering(
|
|
110
|
+
OfflineAudioContextResultCallback callback) {
|
|
111
|
+
Locker locker(mutex_);
|
|
112
|
+
|
|
113
|
+
resultCallback_ = std::move(callback);
|
|
114
|
+
renderAudio();
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
} // namespace audioapi
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "BaseAudioContext.h"
|
|
4
|
+
|
|
5
|
+
#include <mutex>
|
|
6
|
+
#include <map>
|
|
7
|
+
#include <unordered_map>
|
|
8
|
+
#include <memory>
|
|
9
|
+
|
|
10
|
+
namespace audioapi {
|
|
11
|
+
|
|
12
|
+
using OfflineAudioContextSuspendCallback = std::function<void()>;
|
|
13
|
+
using OfflineAudioContextResultCallback = std::function<void(std::shared_ptr<AudioBuffer>)>;
|
|
14
|
+
|
|
15
|
+
class OfflineAudioContext : public BaseAudioContext {
|
|
16
|
+
public:
|
|
17
|
+
explicit OfflineAudioContext(int numberOfChannels, size_t length, float sampleRate);
|
|
18
|
+
~OfflineAudioContext() override;
|
|
19
|
+
|
|
20
|
+
void resume();
|
|
21
|
+
void suspend(double when, const OfflineAudioContextSuspendCallback& callback);
|
|
22
|
+
|
|
23
|
+
void startRendering(OfflineAudioContextResultCallback callback);
|
|
24
|
+
|
|
25
|
+
private:
|
|
26
|
+
std::mutex mutex_;
|
|
27
|
+
|
|
28
|
+
std::unordered_map<size_t, OfflineAudioContextSuspendCallback> scheduledSuspends_;
|
|
29
|
+
OfflineAudioContextResultCallback resultCallback_;
|
|
30
|
+
|
|
31
|
+
size_t length_;
|
|
32
|
+
int numberOfChannels_;
|
|
33
|
+
size_t currentSampleFrame_;
|
|
34
|
+
|
|
35
|
+
std::shared_ptr<AudioBus> resultBus_;
|
|
36
|
+
|
|
37
|
+
void renderAudio();
|
|
38
|
+
};
|
|
39
|
+
|
|
40
|
+
} // namespace audioapi
|
|
@@ -101,7 +101,7 @@ void AudioBufferSourceNode::setBuffer(
|
|
|
101
101
|
|
|
102
102
|
loopEnd_ = buffer_->getDuration();
|
|
103
103
|
|
|
104
|
-
stretch_->presetDefault(channelCount_, buffer_->getSampleRate());
|
|
104
|
+
stretch_->presetDefault(channelCount_, buffer_->getSampleRate(), true);
|
|
105
105
|
}
|
|
106
106
|
|
|
107
107
|
void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
@@ -181,7 +181,7 @@ void AudioBufferSourceNode::processWithoutPitchCorrection(
|
|
|
181
181
|
auto computedPlaybackRate = getComputedPlaybackRateValue();
|
|
182
182
|
updatePlaybackInfo(processingBus, framesToProcess, startOffset, offsetLength);
|
|
183
183
|
|
|
184
|
-
if (computedPlaybackRate == 0.0f || !isPlaying()) {
|
|
184
|
+
if (computedPlaybackRate == 0.0f || (!isPlaying() && !isStopScheduled())) {
|
|
185
185
|
processingBus->zero();
|
|
186
186
|
return;
|
|
187
187
|
}
|
|
@@ -215,7 +215,7 @@ void AudioBufferSourceNode::processWithPitchCorrection(
|
|
|
215
215
|
updatePlaybackInfo(
|
|
216
216
|
playbackRateBus_, framesNeededToStretch, startOffset, offsetLength);
|
|
217
217
|
|
|
218
|
-
if (playbackRate == 0.0f || !isPlaying()) {
|
|
218
|
+
if (playbackRate == 0.0f || (!isPlaying() && !isStopScheduled())) {
|
|
219
219
|
processingBus->zero();
|
|
220
220
|
return;
|
|
221
221
|
}
|
|
@@ -71,7 +71,7 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
|
|
|
71
71
|
std::max(dsp::timeToSampleFrame(startTime_, sampleRate), firstFrame);
|
|
72
72
|
size_t stopFrame = stopTime_ == -1.0
|
|
73
73
|
? std::numeric_limits<size_t>::max()
|
|
74
|
-
:
|
|
74
|
+
: dsp::timeToSampleFrame(stopTime_, sampleRate);
|
|
75
75
|
|
|
76
76
|
if (isUnscheduled() || isFinished()) {
|
|
77
77
|
startOffset = 0;
|
|
@@ -93,7 +93,18 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
|
|
|
93
93
|
startOffset = std::max(startFrame, firstFrame) - firstFrame > 0
|
|
94
94
|
? std::max(startFrame, firstFrame) - firstFrame
|
|
95
95
|
: 0;
|
|
96
|
-
nonSilentFramesToProcess =
|
|
96
|
+
nonSilentFramesToProcess =
|
|
97
|
+
std::max(std::min(lastFrame, stopFrame), startFrame) - startFrame;
|
|
98
|
+
|
|
99
|
+
assert(startOffset <= framesToProcess);
|
|
100
|
+
assert(nonSilentFramesToProcess <= framesToProcess);
|
|
101
|
+
|
|
102
|
+
// stop will happen in the same render quantum
|
|
103
|
+
if (stopFrame < lastFrame && stopFrame >= firstFrame) {
|
|
104
|
+
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
105
|
+
processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame);
|
|
106
|
+
}
|
|
107
|
+
|
|
97
108
|
processingBus->zero(0, startOffset);
|
|
98
109
|
return;
|
|
99
110
|
}
|
|
@@ -106,10 +117,25 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
|
|
|
106
117
|
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
107
118
|
startOffset = 0;
|
|
108
119
|
nonSilentFramesToProcess = stopFrame - firstFrame;
|
|
120
|
+
|
|
121
|
+
assert(startOffset <= framesToProcess);
|
|
122
|
+
assert(nonSilentFramesToProcess <= framesToProcess);
|
|
123
|
+
|
|
109
124
|
processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame);
|
|
110
125
|
return;
|
|
111
126
|
}
|
|
112
127
|
|
|
128
|
+
// mark as finished in first silent render quantum
|
|
129
|
+
if (stopFrame < firstFrame) {
|
|
130
|
+
startOffset = 0;
|
|
131
|
+
nonSilentFramesToProcess = 0;
|
|
132
|
+
|
|
133
|
+
playbackState_ = PlaybackState::STOP_SCHEDULED;
|
|
134
|
+
handleStopScheduled();
|
|
135
|
+
playbackState_ = PlaybackState::FINISHED;
|
|
136
|
+
return;
|
|
137
|
+
}
|
|
138
|
+
|
|
113
139
|
// normal "mid-buffer" playback
|
|
114
140
|
startOffset = 0;
|
|
115
141
|
nonSilentFramesToProcess = framesToProcess;
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
#include <audioapi/core/AudioNode.h>
|
|
2
|
+
#include <audioapi/core/utils/AudioNodeDestructor.h>
|
|
3
|
+
#include <audioapi/core/utils/Locker.h>
|
|
4
|
+
|
|
5
|
+
namespace audioapi {
|
|
6
|
+
|
|
7
|
+
AudioNodeDestructor::AudioNodeDestructor()
|
|
8
|
+
: thread_(std::thread(&AudioNodeDestructor::process, this)),
|
|
9
|
+
isExiting_(false) {}
|
|
10
|
+
|
|
11
|
+
AudioNodeDestructor::~AudioNodeDestructor() {
|
|
12
|
+
isExiting_ = true;
|
|
13
|
+
|
|
14
|
+
cv_.notify_one(); // call process for the last time
|
|
15
|
+
if (thread_.joinable()) {
|
|
16
|
+
thread_.join();
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
void AudioNodeDestructor::tryCallWithLock(
|
|
21
|
+
const std::function<void()> &callback) {
|
|
22
|
+
if (auto lock = Locker::tryLock(mutex_)) {
|
|
23
|
+
callback();
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
void AudioNodeDestructor::addNodeForDeconstruction(
|
|
28
|
+
const std::shared_ptr<AudioNode> &node) {
|
|
29
|
+
// NOTE: this method must be called within `tryCallWithLock`
|
|
30
|
+
nodesForDeconstruction_.emplace_back(node);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
void AudioNodeDestructor::notify() {
|
|
34
|
+
cv_.notify_one();
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
void AudioNodeDestructor::process() {
|
|
38
|
+
std::unique_lock<std::mutex> lock(mutex_);
|
|
39
|
+
while (!isExiting_) {
|
|
40
|
+
cv_.wait(lock, [this] {
|
|
41
|
+
return isExiting_ || !nodesForDeconstruction_.empty();
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
if (isExiting_)
|
|
45
|
+
break;
|
|
46
|
+
|
|
47
|
+
if (!isExiting_ && !nodesForDeconstruction_.empty()) {
|
|
48
|
+
nodesForDeconstruction_.clear();
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
} // namespace audioapi
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include <condition_variable>
|
|
4
|
+
#include <mutex>
|
|
5
|
+
#include <thread>
|
|
6
|
+
#include <atomic>
|
|
7
|
+
#include <vector>
|
|
8
|
+
#include <memory>
|
|
9
|
+
|
|
10
|
+
namespace audioapi {
|
|
11
|
+
|
|
12
|
+
class AudioNode;
|
|
13
|
+
|
|
14
|
+
class AudioNodeDestructor {
|
|
15
|
+
public:
|
|
16
|
+
AudioNodeDestructor();
|
|
17
|
+
~AudioNodeDestructor();
|
|
18
|
+
|
|
19
|
+
void tryCallWithLock(const std::function<void()> &callback);
|
|
20
|
+
void addNodeForDeconstruction(const std::shared_ptr<AudioNode> &node);
|
|
21
|
+
void notify();
|
|
22
|
+
|
|
23
|
+
private:
|
|
24
|
+
mutable std::mutex mutex_;
|
|
25
|
+
std::thread thread_;
|
|
26
|
+
std::condition_variable cv_;
|
|
27
|
+
std::vector<std::shared_ptr<AudioNode>> nodesForDeconstruction_;
|
|
28
|
+
|
|
29
|
+
std::atomic<bool> isExiting_;
|
|
30
|
+
|
|
31
|
+
void process();
|
|
32
|
+
};
|
|
33
|
+
} // namespace audioapi
|
|
@@ -55,17 +55,20 @@ void AudioNodeManager::settlePendingConnections() {
|
|
|
55
55
|
}
|
|
56
56
|
|
|
57
57
|
void AudioNodeManager::prepareNodesForDestruction() {
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
58
|
+
nodeDeconstructor_.tryCallWithLock([this]() {
|
|
59
|
+
auto it = nodes_.begin();
|
|
60
|
+
|
|
61
|
+
while (it != nodes_.end()) {
|
|
62
|
+
if (it->use_count() == 1) {
|
|
63
|
+
nodeDeconstructor_.addNodeForDeconstruction(*it);
|
|
64
|
+
it->get()->cleanup();
|
|
65
|
+
it = nodes_.erase(it);
|
|
66
|
+
} else {
|
|
67
|
+
++it;
|
|
68
|
+
}
|
|
67
69
|
}
|
|
68
|
-
}
|
|
70
|
+
});
|
|
71
|
+
nodeDeconstructor_.notify();
|
|
69
72
|
}
|
|
70
73
|
|
|
71
74
|
void AudioNodeManager::cleanup() {
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
+
#include <audioapi/core/utils/AudioNodeDestructor.h>
|
|
4
|
+
|
|
3
5
|
#include <memory>
|
|
4
6
|
#include <mutex>
|
|
5
7
|
#include <tuple>
|
|
@@ -31,6 +33,7 @@ class AudioNodeManager {
|
|
|
31
33
|
|
|
32
34
|
private:
|
|
33
35
|
std::mutex graphLock_;
|
|
36
|
+
AudioNodeDestructor nodeDeconstructor_;
|
|
34
37
|
|
|
35
38
|
// all nodes created in the context
|
|
36
39
|
std::unordered_set<std::shared_ptr<AudioNode>> nodes_;
|