react-native-audio-api 0.11.0-nightly-568a154-20251222 → 0.11.0-nightly-94b7f30-20251224
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp +1 -1
- package/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp +11 -3
- package/android/src/main/cpp/audioapi/android/core/utils/ffmpegBackend/FFmpegFileWriter.cpp +47 -79
- package/android/src/main/cpp/audioapi/android/core/utils/ffmpegBackend/FFmpegFileWriter.h +3 -2
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +2 -0
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +9 -1
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +1 -0
- package/common/cpp/audioapi/HostObjects/effects/DelayNodeHostObject.cpp +6 -2
- package/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp +72 -0
- package/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.h +23 -0
- package/common/cpp/audioapi/core/AudioContext.cpp +15 -13
- package/common/cpp/audioapi/core/AudioContext.h +2 -1
- package/common/cpp/audioapi/core/AudioNode.cpp +39 -24
- package/common/cpp/audioapi/core/AudioNode.h +3 -3
- package/common/cpp/audioapi/core/AudioParam.cpp +9 -6
- package/common/cpp/audioapi/core/AudioParam.h +2 -2
- package/common/cpp/audioapi/core/BaseAudioContext.cpp +32 -21
- package/common/cpp/audioapi/core/BaseAudioContext.h +5 -1
- package/common/cpp/audioapi/core/analysis/AnalyserNode.cpp +8 -11
- package/common/cpp/audioapi/core/analysis/AnalyserNode.h +1 -1
- package/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp +9 -3
- package/common/cpp/audioapi/core/destinations/AudioDestinationNode.h +1 -1
- package/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +18 -9
- package/common/cpp/audioapi/core/effects/BiquadFilterNode.h +1 -1
- package/common/cpp/audioapi/core/effects/ConvolverNode.cpp +3 -3
- package/common/cpp/audioapi/core/effects/ConvolverNode.h +1 -1
- package/common/cpp/audioapi/core/effects/DelayNode.cpp +20 -11
- package/common/cpp/audioapi/core/effects/DelayNode.h +1 -1
- package/common/cpp/audioapi/core/effects/GainNode.cpp +12 -4
- package/common/cpp/audioapi/core/effects/GainNode.h +1 -1
- package/common/cpp/audioapi/core/effects/IIRFilterNode.cpp +6 -3
- package/common/cpp/audioapi/core/effects/IIRFilterNode.h +1 -1
- package/common/cpp/audioapi/core/effects/StereoPannerNode.cpp +7 -4
- package/common/cpp/audioapi/core/effects/StereoPannerNode.h +1 -1
- package/common/cpp/audioapi/core/effects/WaveShaperNode.cpp +79 -0
- package/common/cpp/audioapi/core/effects/WaveShaperNode.h +66 -0
- package/common/cpp/audioapi/core/effects/WorkletNode.cpp +2 -2
- package/common/cpp/audioapi/core/effects/WorkletNode.h +2 -2
- package/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp +7 -4
- package/common/cpp/audioapi/core/effects/WorkletProcessingNode.h +6 -2
- package/common/cpp/audioapi/core/sources/AudioBuffer.cpp +2 -3
- package/common/cpp/audioapi/core/sources/AudioBuffer.h +1 -1
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +59 -25
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +4 -2
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +18 -11
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +3 -1
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +37 -21
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +3 -3
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +11 -11
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +4 -2
- package/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp +16 -8
- package/common/cpp/audioapi/core/sources/ConstantSourceNode.h +1 -1
- package/common/cpp/audioapi/core/sources/OscillatorNode.cpp +30 -18
- package/common/cpp/audioapi/core/sources/OscillatorNode.h +1 -1
- package/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp +4 -4
- package/common/cpp/audioapi/core/sources/RecorderAdapterNode.h +1 -1
- package/common/cpp/audioapi/core/sources/StreamerNode.cpp +24 -10
- package/common/cpp/audioapi/core/sources/StreamerNode.h +4 -3
- package/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp +11 -4
- package/common/cpp/audioapi/core/sources/WorkletSourceNode.h +6 -2
- package/common/cpp/audioapi/core/types/OverSampleType.h +7 -0
- package/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp +1 -0
- package/common/cpp/audioapi/dsp/Resampler.cpp +200 -0
- package/common/cpp/audioapi/dsp/Resampler.h +65 -0
- package/common/cpp/audioapi/dsp/WaveShaper.cpp +105 -0
- package/common/cpp/audioapi/dsp/WaveShaper.h +46 -0
- package/common/cpp/audioapi/utils/AudioArray.cpp +5 -0
- package/common/cpp/audioapi/utils/AudioArray.h +6 -0
- package/common/cpp/test/RunTests.sh +1 -1
- package/common/cpp/test/src/AudioParamTest.cpp +10 -10
- package/common/cpp/test/src/AudioScheduledSourceTest.cpp +31 -15
- package/common/cpp/test/src/ConstantSourceTest.cpp +16 -14
- package/common/cpp/test/src/DelayTest.cpp +14 -13
- package/common/cpp/test/src/GainTest.cpp +10 -9
- package/common/cpp/test/src/IIRFilterTest.cpp +4 -4
- package/common/cpp/test/src/OscillatorTest.cpp +2 -2
- package/common/cpp/test/src/StereoPannerTest.cpp +14 -12
- package/common/cpp/test/src/biquad/BiquadFilterTest.cpp +25 -25
- package/common/cpp/test/src/biquad/BiquadFilterTest.h +3 -5
- package/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp +76 -0
- package/common/cpp/test/src/dsp/ResamplerTest.cpp +117 -0
- package/ios/audioapi/ios/AudioAPIModule.mm +4 -4
- package/ios/audioapi/ios/core/IOSAudioRecorder.mm +1 -1
- package/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm +9 -3
- package/lib/commonjs/AudioAPIModule/AudioAPIModule.js +0 -3
- package/lib/commonjs/AudioAPIModule/AudioAPIModule.js.map +1 -1
- package/lib/commonjs/AudioAPIModule/AudioAPIModule.web.js +20 -0
- package/lib/commonjs/AudioAPIModule/AudioAPIModule.web.js.map +1 -0
- package/lib/commonjs/AudioAPIModule/ModuleInterfaces.js +6 -0
- package/lib/commonjs/AudioAPIModule/ModuleInterfaces.js.map +1 -0
- package/lib/commonjs/api.js +16 -0
- package/lib/commonjs/api.js.map +1 -1
- package/lib/commonjs/api.web.js +23 -0
- package/lib/commonjs/api.web.js.map +1 -1
- package/lib/commonjs/core/BaseAudioContext.js +4 -0
- package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
- package/lib/commonjs/core/WaveShaperNode.js +38 -0
- package/lib/commonjs/core/WaveShaperNode.js.map +1 -0
- package/lib/commonjs/specs/NativeAudioAPIModule.js.map +1 -1
- package/lib/commonjs/specs/NativeAudioAPIModule.web.js +47 -0
- package/lib/commonjs/specs/NativeAudioAPIModule.web.js.map +1 -0
- package/lib/commonjs/system/AudioManager.js.map +1 -1
- package/lib/commonjs/system/types.js +4 -0
- package/lib/commonjs/web-core/AudioContext.js +4 -0
- package/lib/commonjs/web-core/AudioContext.js.map +1 -1
- package/lib/commonjs/web-core/OfflineAudioContext.js +4 -0
- package/lib/commonjs/web-core/OfflineAudioContext.js.map +1 -1
- package/lib/commonjs/web-core/WaveShaperNode.js +38 -0
- package/lib/commonjs/web-core/WaveShaperNode.js.map +1 -0
- package/lib/commonjs/web-system/AudioManager.js +30 -0
- package/lib/commonjs/web-system/AudioManager.js.map +1 -0
- package/lib/commonjs/web-system/index.js +12 -0
- package/lib/commonjs/web-system/index.js.map +1 -1
- package/lib/module/AudioAPIModule/AudioAPIModule.js +0 -4
- package/lib/module/AudioAPIModule/AudioAPIModule.js.map +1 -1
- package/lib/module/AudioAPIModule/AudioAPIModule.web.js +16 -0
- package/lib/module/AudioAPIModule/AudioAPIModule.web.js.map +1 -0
- package/lib/module/AudioAPIModule/ModuleInterfaces.js +4 -0
- package/lib/module/AudioAPIModule/ModuleInterfaces.js.map +1 -0
- package/lib/module/AudioAPIModule/index.js +1 -1
- package/lib/module/AudioAPIModule/index.js.map +1 -1
- package/lib/module/api.js +2 -0
- package/lib/module/api.js.map +1 -1
- package/lib/module/api.web.js +3 -1
- package/lib/module/api.web.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +4 -0
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/module/core/WaveShaperNode.js +32 -0
- package/lib/module/core/WaveShaperNode.js.map +1 -0
- package/lib/module/specs/NativeAudioAPIModule.js.map +1 -1
- package/lib/module/specs/NativeAudioAPIModule.web.js +44 -0
- package/lib/module/specs/NativeAudioAPIModule.web.js.map +1 -0
- package/lib/module/specs/index.js +1 -1
- package/lib/module/specs/index.js.map +1 -1
- package/lib/module/system/AudioManager.js.map +1 -1
- package/lib/module/system/types.js +2 -0
- package/lib/module/web-core/AudioContext.js +4 -0
- package/lib/module/web-core/AudioContext.js.map +1 -1
- package/lib/module/web-core/OfflineAudioContext.js +4 -0
- package/lib/module/web-core/OfflineAudioContext.js.map +1 -1
- package/lib/module/web-core/WaveShaperNode.js +32 -0
- package/lib/module/web-core/WaveShaperNode.js.map +1 -0
- package/lib/module/web-system/AudioManager.js +26 -0
- package/lib/module/web-system/AudioManager.js.map +1 -0
- package/lib/module/web-system/index.js +1 -0
- package/lib/module/web-system/index.js.map +1 -1
- package/lib/typescript/AudioAPIModule/AudioAPIModule.d.ts +2 -10
- package/lib/typescript/AudioAPIModule/AudioAPIModule.d.ts.map +1 -1
- package/lib/typescript/AudioAPIModule/AudioAPIModule.web.d.ts +13 -0
- package/lib/typescript/AudioAPIModule/AudioAPIModule.web.d.ts.map +1 -0
- package/lib/typescript/AudioAPIModule/ModuleInterfaces.d.ts +18 -0
- package/lib/typescript/AudioAPIModule/ModuleInterfaces.d.ts.map +1 -0
- package/lib/typescript/api.d.ts +2 -0
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/api.web.d.ts +3 -1
- package/lib/typescript/api.web.d.ts.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +2 -0
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/core/WaveShaperNode.d.ts +9 -0
- package/lib/typescript/core/WaveShaperNode.d.ts.map +1 -0
- package/lib/typescript/interfaces.d.ts +8 -2
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/specs/NativeAudioAPIModule.d.ts +1 -1
- package/lib/typescript/specs/NativeAudioAPIModule.d.ts.map +1 -1
- package/lib/typescript/specs/NativeAudioAPIModule.web.d.ts +34 -0
- package/lib/typescript/specs/NativeAudioAPIModule.web.d.ts.map +1 -0
- package/lib/typescript/system/AudioManager.d.ts +2 -2
- package/lib/typescript/system/AudioManager.d.ts.map +1 -1
- package/lib/typescript/system/notification/types.d.ts +1 -1
- package/lib/typescript/system/notification/types.d.ts.map +1 -1
- package/lib/typescript/system/types.d.ts +17 -0
- package/lib/typescript/system/types.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +1 -0
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts +2 -0
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/web-core/BaseAudioContext.d.ts +3 -1
- package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/web-core/OfflineAudioContext.d.ts +2 -0
- package/lib/typescript/web-core/OfflineAudioContext.d.ts.map +1 -1
- package/lib/typescript/web-core/WaveShaperNode.d.ts +9 -0
- package/lib/typescript/web-core/WaveShaperNode.d.ts.map +1 -0
- package/lib/typescript/web-system/AudioManager.d.ts +24 -0
- package/lib/typescript/web-system/AudioManager.d.ts.map +1 -0
- package/lib/typescript/web-system/index.d.ts +1 -0
- package/lib/typescript/web-system/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/AudioAPIModule/AudioAPIModule.ts +6 -17
- package/src/AudioAPIModule/AudioAPIModule.web.ts +18 -0
- package/src/AudioAPIModule/ModuleInterfaces.ts +25 -0
- package/src/api.ts +2 -0
- package/src/api.web.ts +3 -0
- package/src/core/BaseAudioContext.ts +5 -0
- package/src/core/WaveShaperNode.ts +43 -0
- package/src/interfaces.ts +9 -1
- package/src/specs/NativeAudioAPIModule.ts +5 -3
- package/src/specs/NativeAudioAPIModule.web.ts +93 -0
- package/src/system/AudioManager.ts +19 -14
- package/src/system/notification/types.ts +1 -1
- package/src/system/types.ts +22 -0
- package/src/types.ts +2 -0
- package/src/web-core/AudioContext.tsx +5 -0
- package/src/web-core/BaseAudioContext.tsx +3 -1
- package/src/web-core/OfflineAudioContext.tsx +5 -0
- package/src/web-core/WaveShaperNode.tsx +42 -0
- package/src/web-system/AudioManager.ts +33 -0
- package/src/web-system/index.ts +1 -0
|
@@ -12,7 +12,7 @@ AudioParam::AudioParam(
|
|
|
12
12
|
float defaultValue,
|
|
13
13
|
float minValue,
|
|
14
14
|
float maxValue,
|
|
15
|
-
BaseAudioContext
|
|
15
|
+
std::shared_ptr<BaseAudioContext> context)
|
|
16
16
|
: context_(context),
|
|
17
17
|
value_(defaultValue),
|
|
18
18
|
defaultValue_(defaultValue),
|
|
@@ -20,13 +20,13 @@ AudioParam::AudioParam(
|
|
|
20
20
|
maxValue_(maxValue),
|
|
21
21
|
eventsQueue_(),
|
|
22
22
|
eventScheduler_(32),
|
|
23
|
+
startTime_(0),
|
|
24
|
+
endTime_(0),
|
|
25
|
+
startValue_(defaultValue),
|
|
26
|
+
endValue_(defaultValue),
|
|
23
27
|
audioBus_(std::make_shared<AudioBus>(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())) {
|
|
24
28
|
inputBuses_.reserve(4);
|
|
25
29
|
inputNodes_.reserve(4);
|
|
26
|
-
startTime_ = 0;
|
|
27
|
-
endTime_ = 0;
|
|
28
|
-
startValue_ = value_;
|
|
29
|
-
endValue_ = value_;
|
|
30
30
|
// Default calculation function just returns the static value
|
|
31
31
|
calculateValue_ = [this](double, double, float, float, double) {
|
|
32
32
|
return value_;
|
|
@@ -258,7 +258,10 @@ std::shared_ptr<AudioBus> AudioParam::processARateParam(int framesToProcess, dou
|
|
|
258
258
|
processScheduledEvents();
|
|
259
259
|
auto processingBus = calculateInputs(audioBus_, framesToProcess);
|
|
260
260
|
|
|
261
|
-
|
|
261
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
262
|
+
if (context == nullptr)
|
|
263
|
+
return processingBus;
|
|
264
|
+
float sampleRate = context->getSampleRate();
|
|
262
265
|
float *busData = processingBus->getChannel(0)->getData();
|
|
263
266
|
float timeCache = time;
|
|
264
267
|
float timeStep = 1.0f / sampleRate;
|
|
@@ -21,7 +21,7 @@ class AudioParam {
|
|
|
21
21
|
float defaultValue,
|
|
22
22
|
float minValue,
|
|
23
23
|
float maxValue,
|
|
24
|
-
BaseAudioContext
|
|
24
|
+
std::shared_ptr<BaseAudioContext> context);
|
|
25
25
|
|
|
26
26
|
/// JS-Thread only methods
|
|
27
27
|
/// These methods are called only from HostObjects invoked on the JS thread.
|
|
@@ -93,7 +93,7 @@ class AudioParam {
|
|
|
93
93
|
|
|
94
94
|
private:
|
|
95
95
|
// Core parameter state
|
|
96
|
-
BaseAudioContext
|
|
96
|
+
std::weak_ptr<BaseAudioContext> context_;
|
|
97
97
|
float value_;
|
|
98
98
|
float defaultValue_;
|
|
99
99
|
float minValue_;
|
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
#include <audioapi/core/effects/GainNode.h>
|
|
8
8
|
#include <audioapi/core/effects/IIRFilterNode.h>
|
|
9
9
|
#include <audioapi/core/effects/StereoPannerNode.h>
|
|
10
|
+
#include <audioapi/core/effects/WaveShaperNode.h>
|
|
10
11
|
#include <audioapi/core/effects/WorkletNode.h>
|
|
11
12
|
#include <audioapi/core/effects/WorkletProcessingNode.h>
|
|
12
13
|
#include <audioapi/core/sources/AudioBuffer.h>
|
|
@@ -35,12 +36,13 @@ namespace audioapi {
|
|
|
35
36
|
|
|
36
37
|
BaseAudioContext::BaseAudioContext(
|
|
37
38
|
const std::shared_ptr<IAudioEventHandlerRegistry> &audioEventHandlerRegistry,
|
|
38
|
-
const RuntimeRegistry &runtimeRegistry)
|
|
39
|
-
|
|
40
|
-
|
|
39
|
+
const RuntimeRegistry &runtimeRegistry)
|
|
40
|
+
: nodeManager_(std::make_shared<AudioNodeManager>()),
|
|
41
|
+
audioEventHandlerRegistry_(audioEventHandlerRegistry),
|
|
42
|
+
runtimeRegistry_(runtimeRegistry) {}
|
|
41
43
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
+
void BaseAudioContext::initialize() {
|
|
45
|
+
destination_ = std::make_shared<AudioDestinationNode>(shared_from_this());
|
|
44
46
|
}
|
|
45
47
|
|
|
46
48
|
std::string BaseAudioContext::getState() {
|
|
@@ -78,7 +80,8 @@ std::shared_ptr<WorkletSourceNode> BaseAudioContext::createWorkletSourceNode(
|
|
|
78
80
|
std::weak_ptr<worklets::WorkletRuntime> runtime,
|
|
79
81
|
bool shouldLockRuntime) {
|
|
80
82
|
WorkletsRunner workletRunner(runtime, shareableWorklet, shouldLockRuntime);
|
|
81
|
-
auto workletSourceNode =
|
|
83
|
+
auto workletSourceNode =
|
|
84
|
+
std::make_shared<WorkletSourceNode>(shared_from_this(), std::move(workletRunner));
|
|
82
85
|
nodeManager_->addSourceNode(workletSourceNode);
|
|
83
86
|
return workletSourceNode;
|
|
84
87
|
}
|
|
@@ -91,7 +94,7 @@ std::shared_ptr<WorkletNode> BaseAudioContext::createWorkletNode(
|
|
|
91
94
|
bool shouldLockRuntime) {
|
|
92
95
|
WorkletsRunner workletRunner(runtime, shareableWorklet, shouldLockRuntime);
|
|
93
96
|
auto workletNode = std::make_shared<WorkletNode>(
|
|
94
|
-
|
|
97
|
+
shared_from_this(), bufferLength, inputChannelCount, std::move(workletRunner));
|
|
95
98
|
nodeManager_->addProcessingNode(workletNode);
|
|
96
99
|
return workletNode;
|
|
97
100
|
}
|
|
@@ -102,32 +105,32 @@ std::shared_ptr<WorkletProcessingNode> BaseAudioContext::createWorkletProcessing
|
|
|
102
105
|
bool shouldLockRuntime) {
|
|
103
106
|
WorkletsRunner workletRunner(runtime, shareableWorklet, shouldLockRuntime);
|
|
104
107
|
auto workletProcessingNode =
|
|
105
|
-
std::make_shared<WorkletProcessingNode>(
|
|
108
|
+
std::make_shared<WorkletProcessingNode>(shared_from_this(), std::move(workletRunner));
|
|
106
109
|
nodeManager_->addProcessingNode(workletProcessingNode);
|
|
107
110
|
return workletProcessingNode;
|
|
108
111
|
}
|
|
109
112
|
|
|
110
113
|
std::shared_ptr<RecorderAdapterNode> BaseAudioContext::createRecorderAdapter() {
|
|
111
|
-
auto recorderAdapter = std::make_shared<RecorderAdapterNode>(
|
|
114
|
+
auto recorderAdapter = std::make_shared<RecorderAdapterNode>(shared_from_this());
|
|
112
115
|
nodeManager_->addProcessingNode(recorderAdapter);
|
|
113
116
|
return recorderAdapter;
|
|
114
117
|
}
|
|
115
118
|
|
|
116
119
|
std::shared_ptr<OscillatorNode> BaseAudioContext::createOscillator() {
|
|
117
|
-
auto oscillator = std::make_shared<OscillatorNode>(
|
|
120
|
+
auto oscillator = std::make_shared<OscillatorNode>(shared_from_this());
|
|
118
121
|
nodeManager_->addSourceNode(oscillator);
|
|
119
122
|
return oscillator;
|
|
120
123
|
}
|
|
121
124
|
|
|
122
125
|
std::shared_ptr<ConstantSourceNode> BaseAudioContext::createConstantSource() {
|
|
123
|
-
auto constantSource = std::make_shared<ConstantSourceNode>(
|
|
126
|
+
auto constantSource = std::make_shared<ConstantSourceNode>(shared_from_this());
|
|
124
127
|
nodeManager_->addSourceNode(constantSource);
|
|
125
128
|
return constantSource;
|
|
126
129
|
}
|
|
127
130
|
|
|
128
131
|
std::shared_ptr<StreamerNode> BaseAudioContext::createStreamer() {
|
|
129
132
|
#if !RN_AUDIO_API_FFMPEG_DISABLED
|
|
130
|
-
auto streamer = std::make_shared<StreamerNode>(
|
|
133
|
+
auto streamer = std::make_shared<StreamerNode>(shared_from_this());
|
|
131
134
|
nodeManager_->addSourceNode(streamer);
|
|
132
135
|
return streamer;
|
|
133
136
|
#else
|
|
@@ -136,25 +139,25 @@ std::shared_ptr<StreamerNode> BaseAudioContext::createStreamer() {
|
|
|
136
139
|
}
|
|
137
140
|
|
|
138
141
|
std::shared_ptr<GainNode> BaseAudioContext::createGain() {
|
|
139
|
-
auto gain = std::make_shared<GainNode>(
|
|
142
|
+
auto gain = std::make_shared<GainNode>(shared_from_this());
|
|
140
143
|
nodeManager_->addProcessingNode(gain);
|
|
141
144
|
return gain;
|
|
142
145
|
}
|
|
143
146
|
|
|
144
147
|
std::shared_ptr<DelayNode> BaseAudioContext::createDelay(float maxDelayTime) {
|
|
145
|
-
auto delay = std::make_shared<DelayNode>(
|
|
148
|
+
auto delay = std::make_shared<DelayNode>(shared_from_this(), maxDelayTime);
|
|
146
149
|
nodeManager_->addProcessingNode(delay);
|
|
147
150
|
return delay;
|
|
148
151
|
}
|
|
149
152
|
|
|
150
153
|
std::shared_ptr<StereoPannerNode> BaseAudioContext::createStereoPanner() {
|
|
151
|
-
auto stereoPanner = std::make_shared<StereoPannerNode>(
|
|
154
|
+
auto stereoPanner = std::make_shared<StereoPannerNode>(shared_from_this());
|
|
152
155
|
nodeManager_->addProcessingNode(stereoPanner);
|
|
153
156
|
return stereoPanner;
|
|
154
157
|
}
|
|
155
158
|
|
|
156
159
|
std::shared_ptr<BiquadFilterNode> BaseAudioContext::createBiquadFilter() {
|
|
157
|
-
auto biquadFilter = std::make_shared<BiquadFilterNode>(
|
|
160
|
+
auto biquadFilter = std::make_shared<BiquadFilterNode>(shared_from_this());
|
|
158
161
|
nodeManager_->addProcessingNode(biquadFilter);
|
|
159
162
|
return biquadFilter;
|
|
160
163
|
}
|
|
@@ -162,20 +165,21 @@ std::shared_ptr<BiquadFilterNode> BaseAudioContext::createBiquadFilter() {
|
|
|
162
165
|
std::shared_ptr<IIRFilterNode> BaseAudioContext::createIIRFilter(
|
|
163
166
|
const std::vector<float> &feedforward,
|
|
164
167
|
const std::vector<float> &feedback) {
|
|
165
|
-
auto iirFilter = std::make_shared<IIRFilterNode>(
|
|
168
|
+
auto iirFilter = std::make_shared<IIRFilterNode>(shared_from_this(), feedforward, feedback);
|
|
166
169
|
nodeManager_->addProcessingNode(iirFilter);
|
|
167
170
|
return iirFilter;
|
|
168
171
|
}
|
|
169
172
|
|
|
170
173
|
std::shared_ptr<AudioBufferSourceNode> BaseAudioContext::createBufferSource(bool pitchCorrection) {
|
|
171
|
-
auto bufferSource = std::make_shared<AudioBufferSourceNode>(
|
|
174
|
+
auto bufferSource = std::make_shared<AudioBufferSourceNode>(shared_from_this(), pitchCorrection);
|
|
172
175
|
nodeManager_->addSourceNode(bufferSource);
|
|
173
176
|
return bufferSource;
|
|
174
177
|
}
|
|
175
178
|
|
|
176
179
|
std::shared_ptr<AudioBufferQueueSourceNode> BaseAudioContext::createBufferQueueSource(
|
|
177
180
|
bool pitchCorrection) {
|
|
178
|
-
auto bufferSource =
|
|
181
|
+
auto bufferSource =
|
|
182
|
+
std::make_shared<AudioBufferQueueSourceNode>(shared_from_this(), pitchCorrection);
|
|
179
183
|
nodeManager_->addSourceNode(bufferSource);
|
|
180
184
|
return bufferSource;
|
|
181
185
|
}
|
|
@@ -193,7 +197,7 @@ std::shared_ptr<PeriodicWave> BaseAudioContext::createPeriodicWave(
|
|
|
193
197
|
}
|
|
194
198
|
|
|
195
199
|
std::shared_ptr<AnalyserNode> BaseAudioContext::createAnalyser() {
|
|
196
|
-
auto analyser = std::make_shared<AnalyserNode>(
|
|
200
|
+
auto analyser = std::make_shared<AnalyserNode>(shared_from_this());
|
|
197
201
|
nodeManager_->addProcessingNode(analyser);
|
|
198
202
|
return analyser;
|
|
199
203
|
}
|
|
@@ -201,11 +205,18 @@ std::shared_ptr<AnalyserNode> BaseAudioContext::createAnalyser() {
|
|
|
201
205
|
std::shared_ptr<ConvolverNode> BaseAudioContext::createConvolver(
|
|
202
206
|
std::shared_ptr<AudioBuffer> buffer,
|
|
203
207
|
bool disableNormalization) {
|
|
204
|
-
auto convolver =
|
|
208
|
+
auto convolver =
|
|
209
|
+
std::make_shared<ConvolverNode>(shared_from_this(), buffer, disableNormalization);
|
|
205
210
|
nodeManager_->addProcessingNode(convolver);
|
|
206
211
|
return convolver;
|
|
207
212
|
}
|
|
208
213
|
|
|
214
|
+
std::shared_ptr<WaveShaperNode> BaseAudioContext::createWaveShaper() {
|
|
215
|
+
auto waveShaper = std::make_shared<WaveShaperNode>(shared_from_this());
|
|
216
|
+
nodeManager_->addProcessingNode(waveShaper);
|
|
217
|
+
return waveShaper;
|
|
218
|
+
}
|
|
219
|
+
|
|
209
220
|
AudioNodeManager *BaseAudioContext::getNodeManager() {
|
|
210
221
|
return nodeManager_.get();
|
|
211
222
|
}
|
|
@@ -37,14 +37,17 @@ class WorkletSourceNode;
|
|
|
37
37
|
class WorkletNode;
|
|
38
38
|
class WorkletProcessingNode;
|
|
39
39
|
class StreamerNode;
|
|
40
|
+
class WaveShaperNode;
|
|
40
41
|
|
|
41
|
-
class BaseAudioContext {
|
|
42
|
+
class BaseAudioContext : public std::enable_shared_from_this<BaseAudioContext> {
|
|
42
43
|
public:
|
|
43
44
|
explicit BaseAudioContext(
|
|
44
45
|
const std::shared_ptr<IAudioEventHandlerRegistry> &audioEventHandlerRegistry,
|
|
45
46
|
const RuntimeRegistry &runtimeRegistry);
|
|
46
47
|
virtual ~BaseAudioContext() = default;
|
|
47
48
|
|
|
49
|
+
virtual void initialize();
|
|
50
|
+
|
|
48
51
|
std::string getState();
|
|
49
52
|
[[nodiscard]] float getSampleRate() const;
|
|
50
53
|
[[nodiscard]] double getCurrentTime() const;
|
|
@@ -88,6 +91,7 @@ class BaseAudioContext {
|
|
|
88
91
|
std::shared_ptr<ConvolverNode> createConvolver(
|
|
89
92
|
std::shared_ptr<AudioBuffer> buffer,
|
|
90
93
|
bool disableNormalization);
|
|
94
|
+
std::shared_ptr<WaveShaperNode> createWaveShaper();
|
|
91
95
|
|
|
92
96
|
std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
|
|
93
97
|
[[nodiscard]] float getNyquistFrequency() const;
|
|
@@ -12,23 +12,20 @@
|
|
|
12
12
|
#include <vector>
|
|
13
13
|
|
|
14
14
|
namespace audioapi {
|
|
15
|
-
AnalyserNode::AnalyserNode(
|
|
15
|
+
AnalyserNode::AnalyserNode(std::shared_ptr<BaseAudioContext> context)
|
|
16
16
|
: AudioNode(context),
|
|
17
17
|
fftSize_(2048),
|
|
18
18
|
minDecibels_(-100),
|
|
19
19
|
maxDecibels_(-30),
|
|
20
20
|
smoothingTimeConstant_(0.8),
|
|
21
|
-
windowType_(WindowType::BLACKMAN)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
complexData_ = std::vector<std::complex<float>>(fftSize_);
|
|
29
|
-
|
|
21
|
+
windowType_(WindowType::BLACKMAN),
|
|
22
|
+
inputBuffer_(std::make_unique<CircularAudioArray>(MAX_FFT_SIZE * 2)),
|
|
23
|
+
downMixBus_(std::make_unique<AudioBus>(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())),
|
|
24
|
+
tempBuffer_(std::make_unique<AudioArray>(fftSize_)),
|
|
25
|
+
fft_(std::make_unique<dsp::FFT>(fftSize_)),
|
|
26
|
+
complexData_(std::vector<std::complex<float>>(fftSize_)),
|
|
27
|
+
magnitudeBuffer_(std::make_unique<AudioArray>(fftSize_ / 2)) {
|
|
30
28
|
setWindowData(windowType_, fftSize_);
|
|
31
|
-
|
|
32
29
|
isInitialized_ = true;
|
|
33
30
|
}
|
|
34
31
|
|
|
@@ -19,7 +19,7 @@ class CircularAudioArray;
|
|
|
19
19
|
class AnalyserNode : public AudioNode {
|
|
20
20
|
public:
|
|
21
21
|
enum class WindowType { BLACKMAN, HANN };
|
|
22
|
-
explicit AnalyserNode(BaseAudioContext
|
|
22
|
+
explicit AnalyserNode(std::shared_ptr<BaseAudioContext> context);
|
|
23
23
|
|
|
24
24
|
int getFftSize() const;
|
|
25
25
|
int getFrequencyBinCount() const;
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
|
|
8
8
|
namespace audioapi {
|
|
9
9
|
|
|
10
|
-
AudioDestinationNode::AudioDestinationNode(BaseAudioContext
|
|
10
|
+
AudioDestinationNode::AudioDestinationNode(std::shared_ptr<BaseAudioContext> context)
|
|
11
11
|
: AudioNode(context), currentSampleFrame_(0) {
|
|
12
12
|
numberOfOutputs_ = 0;
|
|
13
13
|
numberOfInputs_ = 1;
|
|
@@ -20,7 +20,11 @@ std::size_t AudioDestinationNode::getCurrentSampleFrame() const {
|
|
|
20
20
|
}
|
|
21
21
|
|
|
22
22
|
double AudioDestinationNode::getCurrentTime() const {
|
|
23
|
-
|
|
23
|
+
if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
|
|
24
|
+
return static_cast<double>(currentSampleFrame_) / context->getSampleRate();
|
|
25
|
+
} else {
|
|
26
|
+
return 0.0;
|
|
27
|
+
}
|
|
24
28
|
}
|
|
25
29
|
|
|
26
30
|
void AudioDestinationNode::renderAudio(
|
|
@@ -30,7 +34,9 @@ void AudioDestinationNode::renderAudio(
|
|
|
30
34
|
return;
|
|
31
35
|
}
|
|
32
36
|
|
|
33
|
-
context_
|
|
37
|
+
if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
|
|
38
|
+
context->getNodeManager()->preProcessGraph();
|
|
39
|
+
}
|
|
34
40
|
|
|
35
41
|
destinationBus->zero();
|
|
36
42
|
|
|
@@ -14,7 +14,7 @@ class BaseAudioContext;
|
|
|
14
14
|
|
|
15
15
|
class AudioDestinationNode : public AudioNode {
|
|
16
16
|
public:
|
|
17
|
-
explicit AudioDestinationNode(BaseAudioContext
|
|
17
|
+
explicit AudioDestinationNode(std::shared_ptr<BaseAudioContext> context);
|
|
18
18
|
|
|
19
19
|
std::size_t getCurrentSampleFrame() const;
|
|
20
20
|
double getCurrentTime() const;
|
|
@@ -38,7 +38,7 @@
|
|
|
38
38
|
|
|
39
39
|
namespace audioapi {
|
|
40
40
|
|
|
41
|
-
BiquadFilterNode::BiquadFilterNode(BaseAudioContext
|
|
41
|
+
BiquadFilterNode::BiquadFilterNode(std::shared_ptr<BaseAudioContext> context) : AudioNode(context) {
|
|
42
42
|
frequencyParam_ =
|
|
43
43
|
std::make_shared<AudioParam>(350.0, 0.0f, context->getNyquistFrequency(), context);
|
|
44
44
|
detuneParam_ = std::make_shared<AudioParam>(
|
|
@@ -57,6 +57,7 @@ BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context) : AudioNode(contex
|
|
|
57
57
|
y2_.resize(MAX_CHANNEL_COUNT, 0.0f);
|
|
58
58
|
isInitialized_ = true;
|
|
59
59
|
channelCountMode_ = ChannelCountMode::MAX;
|
|
60
|
+
isInitialized_ = true;
|
|
60
61
|
}
|
|
61
62
|
|
|
62
63
|
std::string BiquadFilterNode::getType() {
|
|
@@ -118,7 +119,10 @@ void BiquadFilterNode::getFrequencyResponse(
|
|
|
118
119
|
double a1 = static_cast<double>(a1_);
|
|
119
120
|
double a2 = static_cast<double>(a2_);
|
|
120
121
|
|
|
121
|
-
|
|
122
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
123
|
+
if (!context)
|
|
124
|
+
return;
|
|
125
|
+
float nyquist = context->getNyquistFrequency();
|
|
122
126
|
|
|
123
127
|
for (size_t i = 0; i < length; i++) {
|
|
124
128
|
// Convert from frequency in Hz to normalized frequency [0, 1]
|
|
@@ -330,17 +334,22 @@ void BiquadFilterNode::setAllpassCoefficients(float frequency, float Q) {
|
|
|
330
334
|
}
|
|
331
335
|
|
|
332
336
|
void BiquadFilterNode::applyFilter() {
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
337
|
+
// NyquistFrequency is half of the sample rate.
|
|
338
|
+
// Normalized frequency is therefore:
|
|
339
|
+
// frequency / (sampleRate / 2) = (2 * frequency) / sampleRate
|
|
340
|
+
float normalizedFrequency;
|
|
341
|
+
double currentTime;
|
|
342
|
+
if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
|
|
343
|
+
currentTime = context->getCurrentTime();
|
|
344
|
+
float frequency = frequencyParam_->processKRateParam(RENDER_QUANTUM_SIZE, currentTime);
|
|
345
|
+
normalizedFrequency = frequency / context->getNyquistFrequency();
|
|
346
|
+
} else {
|
|
347
|
+
return;
|
|
348
|
+
}
|
|
336
349
|
float detune = detuneParam_->processKRateParam(RENDER_QUANTUM_SIZE, currentTime);
|
|
337
350
|
auto Q = QParam_->processKRateParam(RENDER_QUANTUM_SIZE, currentTime);
|
|
338
351
|
auto gain = gainParam_->processKRateParam(RENDER_QUANTUM_SIZE, currentTime);
|
|
339
352
|
|
|
340
|
-
// NyquistFrequency is half of the sample rate.
|
|
341
|
-
// Normalized frequency is therefore:
|
|
342
|
-
// frequency / (sampleRate / 2) = (2 * frequency) / sampleRate
|
|
343
|
-
float normalizedFrequency = frequency / context_->getNyquistFrequency();
|
|
344
353
|
if (detune != 0.0f) {
|
|
345
354
|
normalizedFrequency *= std::pow(2.0f, detune / 1200.0f);
|
|
346
355
|
}
|
|
@@ -54,7 +54,7 @@ class BiquadFilterNode : public AudioNode {
|
|
|
54
54
|
#endif // RN_AUDIO_API_TEST
|
|
55
55
|
|
|
56
56
|
public:
|
|
57
|
-
explicit BiquadFilterNode(BaseAudioContext
|
|
57
|
+
explicit BiquadFilterNode(std::shared_ptr<BaseAudioContext> context);
|
|
58
58
|
|
|
59
59
|
[[nodiscard]] std::string getType();
|
|
60
60
|
void setType(const std::string &type);
|
|
@@ -12,12 +12,14 @@
|
|
|
12
12
|
|
|
13
13
|
namespace audioapi {
|
|
14
14
|
ConvolverNode::ConvolverNode(
|
|
15
|
-
BaseAudioContext
|
|
15
|
+
std::shared_ptr<BaseAudioContext> context,
|
|
16
16
|
const std::shared_ptr<AudioBuffer> &buffer,
|
|
17
17
|
bool disableNormalization)
|
|
18
18
|
: AudioNode(context),
|
|
19
|
+
gainCalibrationSampleRate_(context->getSampleRate()),
|
|
19
20
|
remainingSegments_(0),
|
|
20
21
|
internalBufferIndex_(0),
|
|
22
|
+
normalize_(!disableNormalization),
|
|
21
23
|
signalledToStop_(false),
|
|
22
24
|
scaleFactor_(1.0f),
|
|
23
25
|
intermediateBus_(nullptr),
|
|
@@ -25,8 +27,6 @@ ConvolverNode::ConvolverNode(
|
|
|
25
27
|
internalBuffer_(nullptr) {
|
|
26
28
|
channelCount_ = 2;
|
|
27
29
|
channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
|
|
28
|
-
normalize_ = !disableNormalization;
|
|
29
|
-
gainCalibrationSampleRate_ = context->getSampleRate();
|
|
30
30
|
setBuffer(buffer);
|
|
31
31
|
audioBus_ =
|
|
32
32
|
std::make_shared<AudioBus>(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
|
|
@@ -7,14 +7,16 @@
|
|
|
7
7
|
|
|
8
8
|
namespace audioapi {
|
|
9
9
|
|
|
10
|
-
DelayNode::DelayNode(BaseAudioContext
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
10
|
+
DelayNode::DelayNode(std::shared_ptr<BaseAudioContext> context, float maxDelayTime)
|
|
11
|
+
: AudioNode(context),
|
|
12
|
+
delayTimeParam_(std::make_shared<AudioParam>(0, 0, maxDelayTime, context)),
|
|
13
|
+
delayBuffer_(
|
|
14
|
+
std::make_shared<AudioBus>(
|
|
15
|
+
static_cast<size_t>(
|
|
16
|
+
maxDelayTime * context->getSampleRate() +
|
|
17
|
+
1), // +1 to enable delayTime equal to maxDelayTime
|
|
18
|
+
channelCount_,
|
|
19
|
+
context->getSampleRate())) {
|
|
18
20
|
requiresTailProcessing_ = true;
|
|
19
21
|
isInitialized_ = true;
|
|
20
22
|
}
|
|
@@ -27,7 +29,11 @@ void DelayNode::onInputDisabled() {
|
|
|
27
29
|
numberOfEnabledInputNodes_ -= 1;
|
|
28
30
|
if (isEnabled() && numberOfEnabledInputNodes_ == 0) {
|
|
29
31
|
signalledToStop_ = true;
|
|
30
|
-
|
|
32
|
+
if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
|
|
33
|
+
remainingFrames_ = delayTimeParam_->getValue() * context->getSampleRate();
|
|
34
|
+
} else {
|
|
35
|
+
remainingFrames_ = 0;
|
|
36
|
+
}
|
|
31
37
|
}
|
|
32
38
|
}
|
|
33
39
|
|
|
@@ -89,8 +95,11 @@ std::shared_ptr<AudioBus> DelayNode::processNode(
|
|
|
89
95
|
}
|
|
90
96
|
|
|
91
97
|
// normal processing
|
|
92
|
-
|
|
93
|
-
|
|
98
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
99
|
+
if (context == nullptr)
|
|
100
|
+
return processingBus;
|
|
101
|
+
auto delayTime = delayTimeParam_->processKRateParam(framesToProcess, context->getCurrentTime());
|
|
102
|
+
size_t writeIndex = static_cast<size_t>(readIndex_ + delayTime * context->getSampleRate()) %
|
|
94
103
|
delayBuffer_->getSize();
|
|
95
104
|
delayBufferOperation(processingBus, framesToProcess, writeIndex, DelayNode::BufferAction::WRITE);
|
|
96
105
|
delayBufferOperation(processingBus, framesToProcess, readIndex_, DelayNode::BufferAction::READ);
|
|
@@ -12,7 +12,7 @@ class AudioBus;
|
|
|
12
12
|
|
|
13
13
|
class DelayNode : public AudioNode {
|
|
14
14
|
public:
|
|
15
|
-
explicit DelayNode(BaseAudioContext
|
|
15
|
+
explicit DelayNode(std::shared_ptr<BaseAudioContext> context, float maxDelayTime);
|
|
16
16
|
|
|
17
17
|
[[nodiscard]] std::shared_ptr<AudioParam> getDelayTimeParam() const;
|
|
18
18
|
|
|
@@ -7,9 +7,14 @@
|
|
|
7
7
|
|
|
8
8
|
namespace audioapi {
|
|
9
9
|
|
|
10
|
-
GainNode::GainNode(BaseAudioContext
|
|
11
|
-
|
|
12
|
-
|
|
10
|
+
GainNode::GainNode(std::shared_ptr<BaseAudioContext> context)
|
|
11
|
+
: AudioNode(context),
|
|
12
|
+
gainParam_(
|
|
13
|
+
std::make_shared<AudioParam>(
|
|
14
|
+
1.0,
|
|
15
|
+
MOST_NEGATIVE_SINGLE_FLOAT,
|
|
16
|
+
MOST_POSITIVE_SINGLE_FLOAT,
|
|
17
|
+
context)) {
|
|
13
18
|
isInitialized_ = true;
|
|
14
19
|
}
|
|
15
20
|
|
|
@@ -20,7 +25,10 @@ std::shared_ptr<AudioParam> GainNode::getGainParam() const {
|
|
|
20
25
|
std::shared_ptr<AudioBus> GainNode::processNode(
|
|
21
26
|
const std::shared_ptr<AudioBus> &processingBus,
|
|
22
27
|
int framesToProcess) {
|
|
23
|
-
|
|
28
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
29
|
+
if (context == nullptr)
|
|
30
|
+
return processingBus;
|
|
31
|
+
double time = context->getCurrentTime();
|
|
24
32
|
auto gainParamValues = gainParam_->processARateParam(framesToProcess, time);
|
|
25
33
|
for (int i = 0; i < processingBus->getNumberOfChannels(); i += 1) {
|
|
26
34
|
dsp::multiply(
|
|
@@ -35,11 +35,10 @@
|
|
|
35
35
|
namespace audioapi {
|
|
36
36
|
|
|
37
37
|
IIRFilterNode::IIRFilterNode(
|
|
38
|
-
BaseAudioContext
|
|
38
|
+
std::shared_ptr<BaseAudioContext> context,
|
|
39
39
|
const std::vector<float> &feedforward,
|
|
40
40
|
const std::vector<float> &feedback)
|
|
41
41
|
: AudioNode(context), feedforward_(feedforward), feedback_(feedback) {
|
|
42
|
-
isInitialized_ = true;
|
|
43
42
|
channelCountMode_ = ChannelCountMode::MAX;
|
|
44
43
|
|
|
45
44
|
int maxChannels = MAX_CHANNEL_COUNT;
|
|
@@ -65,6 +64,7 @@ IIRFilterNode::IIRFilterNode(
|
|
|
65
64
|
|
|
66
65
|
feedback_[0] = 1.0f;
|
|
67
66
|
}
|
|
67
|
+
isInitialized_ = true;
|
|
68
68
|
}
|
|
69
69
|
|
|
70
70
|
// Compute Z-transform of the filter
|
|
@@ -89,7 +89,10 @@ void IIRFilterNode::getFrequencyResponse(
|
|
|
89
89
|
float *magResponseOutput,
|
|
90
90
|
float *phaseResponseOutput,
|
|
91
91
|
size_t length) {
|
|
92
|
-
|
|
92
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
93
|
+
if (context == nullptr)
|
|
94
|
+
return;
|
|
95
|
+
float nyquist = context->getNyquistFrequency();
|
|
93
96
|
|
|
94
97
|
for (size_t k = 0; k < length; ++k) {
|
|
95
98
|
float normalizedFreq = frequencyArray[k] / nyquist;
|
|
@@ -9,9 +9,9 @@
|
|
|
9
9
|
|
|
10
10
|
namespace audioapi {
|
|
11
11
|
|
|
12
|
-
StereoPannerNode::StereoPannerNode(BaseAudioContext
|
|
12
|
+
StereoPannerNode::StereoPannerNode(std::shared_ptr<BaseAudioContext> context)
|
|
13
|
+
: AudioNode(context), panParam_(std::make_shared<AudioParam>(0.0, -1.0f, 1.0f, context)) {
|
|
13
14
|
channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
|
|
14
|
-
panParam_ = std::make_shared<AudioParam>(0.0, -1.0f, 1.0f, context);
|
|
15
15
|
isInitialized_ = true;
|
|
16
16
|
}
|
|
17
17
|
|
|
@@ -22,8 +22,11 @@ std::shared_ptr<AudioParam> StereoPannerNode::getPanParam() const {
|
|
|
22
22
|
std::shared_ptr<AudioBus> StereoPannerNode::processNode(
|
|
23
23
|
const std::shared_ptr<AudioBus> &processingBus,
|
|
24
24
|
int framesToProcess) {
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
std::shared_ptr<BaseAudioContext> context = context_.lock();
|
|
26
|
+
if (context == nullptr)
|
|
27
|
+
return processingBus;
|
|
28
|
+
double time = context->getCurrentTime();
|
|
29
|
+
double deltaTime = 1.0 / context->getSampleRate();
|
|
27
30
|
|
|
28
31
|
auto *inputLeft = processingBus->getChannelByType(AudioBus::ChannelLeft);
|
|
29
32
|
auto panParamValues =
|
|
@@ -13,7 +13,7 @@ class AudioBus;
|
|
|
13
13
|
|
|
14
14
|
class StereoPannerNode : public AudioNode {
|
|
15
15
|
public:
|
|
16
|
-
explicit StereoPannerNode(BaseAudioContext
|
|
16
|
+
explicit StereoPannerNode(std::shared_ptr<BaseAudioContext> context);
|
|
17
17
|
|
|
18
18
|
[[nodiscard]] std::shared_ptr<AudioParam> getPanParam() const;
|
|
19
19
|
|