react-native-audio-api 0.4.12-beta.5 → 0.4.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/CMakeLists.txt +2 -3
- package/android/src/main/cpp/audioapi/android/core/AudioDecoder.cpp +3 -3
- package/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp +10 -11
- package/android/src/main/cpp/audioapi/android/core/AudioPlayer.h +1 -0
- package/android/src/main/java/com/swmansion/audioapi/AudioAPIPackage.kt +0 -1
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +1 -3
- package/common/cpp/audioapi/HostObjects/AnalyserNodeHostObject.h +24 -16
- package/common/cpp/audioapi/HostObjects/AudioBufferHostObject.h +4 -0
- package/common/cpp/audioapi/HostObjects/AudioBufferSourceNodeHostObject.h +20 -4
- package/common/cpp/audioapi/HostObjects/AudioContextHostObject.h +3 -2
- package/common/cpp/audioapi/HostObjects/AudioScheduledSourceNodeHostObject.h +32 -2
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +14 -21
- package/common/cpp/audioapi/HostObjects/OscillatorNodeHostObject.h +4 -2
- package/common/cpp/audioapi/core/AudioNode.cpp +2 -2
- package/common/cpp/audioapi/core/AudioParam.cpp +1 -1
- package/common/cpp/audioapi/core/BaseAudioContext.cpp +4 -12
- package/common/cpp/audioapi/core/BaseAudioContext.h +2 -4
- package/common/cpp/audioapi/core/Constants.h +8 -33
- package/common/cpp/audioapi/core/analysis/AnalyserNode.cpp +42 -45
- package/common/cpp/audioapi/core/analysis/AnalyserNode.h +8 -6
- package/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp +1 -1
- package/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +12 -8
- package/common/cpp/audioapi/core/effects/GainNode.cpp +4 -3
- package/common/cpp/audioapi/core/effects/PeriodicWave.cpp +32 -49
- package/common/cpp/audioapi/core/effects/PeriodicWave.h +8 -3
- package/common/cpp/audioapi/core/effects/StereoPannerNode.cpp +3 -3
- package/common/cpp/audioapi/core/sources/AudioBuffer.cpp +9 -2
- package/common/cpp/audioapi/core/sources/AudioBuffer.h +5 -2
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +72 -35
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +41 -8
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +18 -6
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +7 -0
- package/common/cpp/audioapi/core/sources/OscillatorNode.cpp +12 -3
- package/common/cpp/audioapi/core/sources/OscillatorNode.h +1 -0
- package/common/cpp/audioapi/core/types/TimeStretchType.h +7 -0
- package/common/cpp/audioapi/dsp/AudioUtils.cpp +2 -2
- package/common/cpp/audioapi/dsp/AudioUtils.h +2 -2
- package/common/cpp/audioapi/dsp/FFT.cpp +41 -0
- package/common/cpp/audioapi/dsp/FFT.h +29 -0
- package/common/cpp/audioapi/dsp/VectorMath.cpp +3 -3
- package/common/cpp/audioapi/dsp/VectorMath.h +2 -2
- package/common/cpp/audioapi/dsp/Windows.cpp +80 -0
- package/common/cpp/audioapi/dsp/Windows.h +95 -0
- package/{android/src/main/cpp/audioapi/android/libs → common/cpp/audioapi/libs/pffft}/pffft.c +1 -1
- package/common/cpp/audioapi/libs/{dsp → signalsmith-stretch}/delay.h +9 -11
- package/common/cpp/audioapi/libs/{dsp → signalsmith-stretch}/fft.h +6 -7
- package/common/cpp/audioapi/libs/{dsp → signalsmith-stretch}/perf.h +0 -2
- package/common/cpp/audioapi/libs/{signalsmith-stretch.h → signalsmith-stretch/signalsmith-stretch.h} +3 -4
- package/common/cpp/audioapi/libs/{dsp → signalsmith-stretch}/spectral.h +10 -13
- package/common/cpp/audioapi/{core/utils → utils}/AudioArray.cpp +5 -5
- package/common/cpp/audioapi/{core/utils → utils}/AudioBus.cpp +29 -29
- package/ios/audioapi/ios/core/AudioDecoder.mm +3 -3
- package/ios/audioapi/ios/core/AudioPlayer.h +5 -2
- package/ios/audioapi/ios/core/AudioPlayer.m +9 -5
- package/ios/audioapi/ios/core/IOSAudioPlayer.h +1 -0
- package/ios/audioapi/ios/core/IOSAudioPlayer.mm +12 -10
- package/lib/module/api.js +1 -2
- package/lib/module/api.js.map +1 -1
- package/lib/module/api.web.js +1 -1
- package/lib/module/api.web.js.map +1 -1
- package/lib/module/core/AudioBufferSourceNode.js +6 -0
- package/lib/module/core/AudioBufferSourceNode.js.map +1 -1
- package/lib/module/core/AudioScheduledSourceNode.js +5 -0
- package/lib/module/core/AudioScheduledSourceNode.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +0 -4
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/module/web-core/AudioBufferSourceNode.js +6 -0
- package/lib/module/web-core/AudioBufferSourceNode.js.map +1 -1
- package/lib/module/web-core/AudioScheduledSourceNode.js +8 -0
- package/lib/module/web-core/AudioScheduledSourceNode.js.map +1 -1
- package/lib/module/web-core/StretcherNode.js +24 -7
- package/lib/module/web-core/StretcherNode.js.map +1 -1
- package/lib/module/web-core/custom/signalsmithStretch/README.md +1 -1
- package/lib/module/web-core/custom/signalsmithStretch/SignalsmithStretch.js +1 -0
- package/lib/module/web-core/custom/signalsmithStretch/SignalsmithStretch.js.map +1 -1
- package/lib/typescript/api.d.ts +1 -2
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/api.web.d.ts +1 -1
- package/lib/typescript/api.web.d.ts.map +1 -1
- package/lib/typescript/core/AudioBufferSourceNode.d.ts +3 -0
- package/lib/typescript/core/AudioBufferSourceNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioScheduledSourceNode.d.ts +1 -0
- package/lib/typescript/core/AudioScheduledSourceNode.d.ts.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +0 -2
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +3 -6
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +1 -0
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioBufferSourceNode.d.ts +3 -0
- package/lib/typescript/web-core/AudioBufferSourceNode.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts +1 -0
- package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts.map +1 -1
- package/lib/typescript/web-core/StretcherNode.d.ts +3 -0
- package/lib/typescript/web-core/StretcherNode.d.ts.map +1 -1
- package/package.json +3 -4
- package/scripts/setup-rn-audio-api-web.js +58 -0
- package/src/api.ts +1 -1
- package/src/api.web.ts +1 -0
- package/src/core/AudioBufferSourceNode.ts +9 -0
- package/src/core/AudioScheduledSourceNode.ts +5 -0
- package/src/core/BaseAudioContext.ts +0 -5
- package/src/interfaces.ts +3 -6
- package/src/types.ts +2 -0
- package/src/web-core/AudioBufferSourceNode.tsx +11 -0
- package/src/web-core/AudioScheduledSourceNode.tsx +9 -0
- package/src/web-core/StretcherNode.tsx +28 -8
- package/src/web-core/custom/signalsmithStretch/README.md +1 -1
- package/src/web-core/custom/signalsmithStretch/SignalsmithStretch.js +1 -0
- package/common/cpp/audioapi/HostObjects/StretcherNodeHostObject.h +0 -35
- package/common/cpp/audioapi/core/effects/StretcherNode.cpp +0 -94
- package/common/cpp/audioapi/core/effects/StretcherNode.h +0 -35
- package/common/cpp/audioapi/dsp/FFTFrame.cpp +0 -100
- package/common/cpp/audioapi/dsp/FFTFrame.h +0 -74
- package/common/cpp/audioapi/libs/dsp/common.h +0 -47
- package/common/cpp/audioapi/libs/dsp/windows.h +0 -219
- package/lib/module/core/StretcherNode.js +0 -12
- package/lib/module/core/StretcherNode.js.map +0 -1
- package/lib/typescript/core/StretcherNode.d.ts +0 -10
- package/lib/typescript/core/StretcherNode.d.ts.map +0 -1
- package/scripts/setup-custom-wasm.js +0 -104
- package/src/core/StretcherNode.ts +0 -15
- /package/common/cpp/audioapi/libs/{miniaudio.h → miniaudio/miniaudio.h} +0 -0
- /package/{android/src/main/cpp/audioapi/android/libs → common/cpp/audioapi/libs/pffft}/pffft.h +0 -0
- /package/common/cpp/audioapi/{core/utils → utils}/AudioArray.h +0 -0
- /package/common/cpp/audioapi/{core/utils → utils}/AudioBus.h +0 -0
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
#include <audioapi/core/BaseAudioContext.h>
|
|
2
2
|
#include <audioapi/core/analysis/AnalyserNode.h>
|
|
3
|
-
#include <audioapi/core/utils/AudioArray.h>
|
|
4
|
-
#include <audioapi/core/utils/AudioBus.h>
|
|
5
3
|
#include <audioapi/dsp/AudioUtils.h>
|
|
6
4
|
#include <audioapi/dsp/VectorMath.h>
|
|
5
|
+
#include <audioapi/dsp/Windows.h>
|
|
6
|
+
#include <audioapi/utils/AudioArray.h>
|
|
7
|
+
#include <audioapi/utils/AudioBus.h>
|
|
7
8
|
|
|
8
9
|
namespace audioapi {
|
|
9
10
|
AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
|
|
10
11
|
: AudioNode(context),
|
|
11
|
-
fftSize_(
|
|
12
|
-
minDecibels_(
|
|
13
|
-
maxDecibels_(
|
|
14
|
-
smoothingTimeConstant_(
|
|
12
|
+
fftSize_(2048),
|
|
13
|
+
minDecibels_(-100),
|
|
14
|
+
maxDecibels_(-30),
|
|
15
|
+
smoothingTimeConstant_(0.8),
|
|
15
16
|
windowType_(WindowType::BLACKMAN),
|
|
16
17
|
vWriteIndex_(0) {
|
|
17
18
|
inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
|
|
@@ -19,9 +20,10 @@ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
|
|
|
19
20
|
downMixBus_ = std::make_unique<AudioBus>(
|
|
20
21
|
RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
|
|
21
22
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
23
|
+
fft_ = std::make_unique<dsp::FFT>(fftSize_);
|
|
24
|
+
complexData_ = std::vector<std::complex<float>>(fftSize_);
|
|
25
|
+
|
|
26
|
+
setWindowData(windowType_, fftSize_);
|
|
25
27
|
|
|
26
28
|
isInitialized_ = true;
|
|
27
29
|
}
|
|
@@ -56,10 +58,10 @@ void AnalyserNode::setFftSize(int fftSize) {
|
|
|
56
58
|
}
|
|
57
59
|
|
|
58
60
|
fftSize_ = fftSize;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
|
|
61
|
+
fft_ = std::make_unique<dsp::FFT>(fftSize_);
|
|
62
|
+
complexData_ = std::vector<std::complex<float>>(fftSize_);
|
|
62
63
|
magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
|
|
64
|
+
setWindowData(windowType_, fftSize_);
|
|
63
65
|
}
|
|
64
66
|
|
|
65
67
|
void AnalyserNode::setMinDecibels(float minDecibels) {
|
|
@@ -75,6 +77,7 @@ void AnalyserNode::setSmoothingTimeConstant(float smoothingTimeConstant) {
|
|
|
75
77
|
}
|
|
76
78
|
|
|
77
79
|
void AnalyserNode::setWindowType(const std::string &type) {
|
|
80
|
+
setWindowData(windowType_, fftSize_);
|
|
78
81
|
windowType_ = AnalyserNode::fromString(type);
|
|
79
82
|
}
|
|
80
83
|
|
|
@@ -82,7 +85,7 @@ void AnalyserNode::getFloatFrequencyData(float *data, int length) {
|
|
|
82
85
|
doFFTAnalysis();
|
|
83
86
|
|
|
84
87
|
length = std::min(static_cast<int>(magnitudeBuffer_->getSize()), length);
|
|
85
|
-
|
|
88
|
+
dsp::linearToDecibels(magnitudeBuffer_->getData(), data, length);
|
|
86
89
|
}
|
|
87
90
|
|
|
88
91
|
void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) {
|
|
@@ -97,7 +100,7 @@ void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) {
|
|
|
97
100
|
for (int i = 0; i < length; i++) {
|
|
98
101
|
auto dbMag = magnitudeBufferData[i] == 0
|
|
99
102
|
? minDecibels_
|
|
100
|
-
:
|
|
103
|
+
: dsp::linearToDecibels(magnitudeBufferData[i]);
|
|
101
104
|
auto scaledValue = UINT8_MAX * (dbMag - minDecibels_) * rangeScaleFactor;
|
|
102
105
|
|
|
103
106
|
if (scaledValue < 0) {
|
|
@@ -202,55 +205,49 @@ void AnalyserNode::doFFTAnalysis() {
|
|
|
202
205
|
tempBuffer.copy(inputBuffer_.get(), vWriteIndex_ - fftSize_, 0, fftSize_);
|
|
203
206
|
}
|
|
204
207
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
AnalyserNode::applyHannWindow(tempBuffer.getData(), fftSize_);
|
|
211
|
-
break;
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
auto *realFFTFrameData = realData_->getData();
|
|
215
|
-
auto *imaginaryFFTFrameData = imaginaryData_->getData();
|
|
208
|
+
dsp::multiply(
|
|
209
|
+
tempBuffer.getData(),
|
|
210
|
+
windowData_->getData(),
|
|
211
|
+
tempBuffer.getData(),
|
|
212
|
+
fftSize_);
|
|
216
213
|
|
|
217
214
|
// do fft analysis - get frequency domain data
|
|
218
|
-
|
|
219
|
-
tempBuffer.getData(), realFFTFrameData, imaginaryFFTFrameData);
|
|
215
|
+
fft_->doFFT(tempBuffer.getData(), complexData_);
|
|
220
216
|
|
|
221
217
|
// Zero out nquist component
|
|
222
|
-
|
|
218
|
+
complexData_[0] = std::complex<float>(complexData_[0].real(), 0);
|
|
223
219
|
|
|
224
220
|
const float magnitudeScale = 1.0f / static_cast<float>(fftSize_);
|
|
225
221
|
auto magnitudeBufferData = magnitudeBuffer_->getData();
|
|
226
222
|
|
|
227
223
|
for (int i = 0; i < magnitudeBuffer_->getSize(); i++) {
|
|
228
|
-
std::
|
|
229
|
-
auto scalarMagnitude = std::abs(c) * magnitudeScale;
|
|
224
|
+
auto scalarMagnitude = std::abs(complexData_[i]) * magnitudeScale;
|
|
230
225
|
magnitudeBufferData[i] = static_cast<float>(
|
|
231
226
|
smoothingTimeConstant_ * magnitudeBufferData[i] +
|
|
232
227
|
(1 - smoothingTimeConstant_) * scalarMagnitude);
|
|
233
228
|
}
|
|
234
229
|
}
|
|
235
230
|
|
|
236
|
-
void AnalyserNode::
|
|
237
|
-
|
|
238
|
-
|
|
231
|
+
void AnalyserNode::setWindowData(
|
|
232
|
+
audioapi::AnalyserNode::WindowType type,
|
|
233
|
+
int size) {
|
|
234
|
+
if (windowType_ == type && windowData_ && windowData_->getSize() == size) {
|
|
235
|
+
return;
|
|
236
|
+
}
|
|
239
237
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
auto window = 0.42f - 0.5f * cos(2 * PI * x) + 0.08f * cos(4 * PI * x);
|
|
243
|
-
data[i] *= window;
|
|
238
|
+
if (!windowData_ || windowData_->getSize() != size) {
|
|
239
|
+
windowData_ = std::make_shared<AudioArray>(size);
|
|
244
240
|
}
|
|
245
|
-
}
|
|
246
241
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
242
|
+
switch (windowType_) {
|
|
243
|
+
case WindowType::BLACKMAN:
|
|
244
|
+
dsp::Blackman().apply(
|
|
245
|
+
windowData_->getData(), static_cast<int>(windowData_->getSize()));
|
|
246
|
+
break;
|
|
247
|
+
case WindowType::HANN:
|
|
248
|
+
dsp::Hann().apply(
|
|
249
|
+
windowData_->getData(), static_cast<int>(windowData_->getSize()));
|
|
250
|
+
break;
|
|
254
251
|
}
|
|
255
252
|
}
|
|
256
253
|
} // namespace audioapi
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
3
|
#include <audioapi/core/AudioNode.h>
|
|
4
|
-
#include <audioapi/dsp/
|
|
4
|
+
#include <audioapi/dsp/FFT.h>
|
|
5
5
|
|
|
6
6
|
#include <memory>
|
|
7
7
|
#include <cstddef>
|
|
8
8
|
#include <string>
|
|
9
9
|
#include <complex>
|
|
10
|
+
#include <vector>
|
|
10
11
|
|
|
11
12
|
namespace audioapi {
|
|
12
13
|
|
|
@@ -44,15 +45,16 @@ class AnalyserNode : public AudioNode {
|
|
|
44
45
|
float minDecibels_;
|
|
45
46
|
float maxDecibels_;
|
|
46
47
|
float smoothingTimeConstant_;
|
|
48
|
+
|
|
47
49
|
WindowType windowType_;
|
|
50
|
+
std::shared_ptr<AudioArray> windowData_;
|
|
48
51
|
|
|
49
52
|
std::unique_ptr<AudioArray> inputBuffer_;
|
|
50
53
|
std::unique_ptr<AudioBus> downMixBus_;
|
|
51
54
|
int vWriteIndex_;
|
|
52
55
|
|
|
53
|
-
std::unique_ptr<
|
|
54
|
-
std::
|
|
55
|
-
std::shared_ptr<AudioArray> imaginaryData_;
|
|
56
|
+
std::unique_ptr<dsp::FFT> fft_;
|
|
57
|
+
std::vector<std::complex<float>> complexData_;
|
|
56
58
|
std::unique_ptr<AudioArray> magnitudeBuffer_;
|
|
57
59
|
bool shouldDoFFTAnalysis_ { true };
|
|
58
60
|
|
|
@@ -82,8 +84,8 @@ class AnalyserNode : public AudioNode {
|
|
|
82
84
|
}
|
|
83
85
|
|
|
84
86
|
void doFFTAnalysis();
|
|
85
|
-
|
|
86
|
-
|
|
87
|
+
|
|
88
|
+
void setWindowData(WindowType type, int size);
|
|
87
89
|
};
|
|
88
90
|
|
|
89
91
|
} // namespace audioapi
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
#include <audioapi/core/AudioNode.h>
|
|
2
2
|
#include <audioapi/core/BaseAudioContext.h>
|
|
3
3
|
#include <audioapi/core/destinations/AudioDestinationNode.h>
|
|
4
|
-
#include <audioapi/core/utils/AudioBus.h>
|
|
5
4
|
#include <audioapi/core/utils/AudioNodeManager.h>
|
|
5
|
+
#include <audioapi/utils/AudioBus.h>
|
|
6
6
|
|
|
7
7
|
namespace audioapi {
|
|
8
8
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#include <audioapi/core/BaseAudioContext.h>
|
|
2
2
|
#include <audioapi/core/effects/BiquadFilterNode.h>
|
|
3
|
-
#include <audioapi/
|
|
4
|
-
#include <audioapi/
|
|
3
|
+
#include <audioapi/utils/AudioArray.h>
|
|
4
|
+
#include <audioapi/utils/AudioBus.h>
|
|
5
5
|
|
|
6
6
|
// https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html - math
|
|
7
7
|
// formulas for filters
|
|
@@ -10,12 +10,16 @@ namespace audioapi {
|
|
|
10
10
|
|
|
11
11
|
BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context)
|
|
12
12
|
: AudioNode(context) {
|
|
13
|
-
frequencyParam_ =
|
|
14
|
-
350.0,
|
|
15
|
-
detuneParam_ = std::make_shared<AudioParam>(
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
13
|
+
frequencyParam_ =
|
|
14
|
+
std::make_shared<AudioParam>(350.0, 0.0f, context->getNyquistFrequency());
|
|
15
|
+
detuneParam_ = std::make_shared<AudioParam>(
|
|
16
|
+
0.0,
|
|
17
|
+
-1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT,
|
|
18
|
+
1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT);
|
|
19
|
+
QParam_ = std::make_shared<AudioParam>(
|
|
20
|
+
1.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT);
|
|
21
|
+
gainParam_ = std::make_shared<AudioParam>(
|
|
22
|
+
0.0, MOST_NEGATIVE_SINGLE_FLOAT, 40 * LOG10_MOST_POSITIVE_SINGLE_FLOAT);
|
|
19
23
|
type_ = BiquadFilterType::LOWPASS;
|
|
20
24
|
isInitialized_ = true;
|
|
21
25
|
}
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
#include <audioapi/core/BaseAudioContext.h>
|
|
2
2
|
#include <audioapi/core/effects/GainNode.h>
|
|
3
|
-
#include <audioapi/
|
|
4
|
-
#include <audioapi/
|
|
3
|
+
#include <audioapi/utils/AudioArray.h>
|
|
4
|
+
#include <audioapi/utils/AudioBus.h>
|
|
5
5
|
|
|
6
6
|
namespace audioapi {
|
|
7
7
|
|
|
8
8
|
GainNode::GainNode(BaseAudioContext *context) : AudioNode(context) {
|
|
9
|
-
gainParam_ = std::make_shared<AudioParam>(
|
|
9
|
+
gainParam_ = std::make_shared<AudioParam>(
|
|
10
|
+
1.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT);
|
|
10
11
|
isInitialized_ = true;
|
|
11
12
|
}
|
|
12
13
|
|
|
@@ -28,7 +28,6 @@
|
|
|
28
28
|
|
|
29
29
|
#include <audioapi/core/Constants.h>
|
|
30
30
|
#include <audioapi/core/effects/PeriodicWave.h>
|
|
31
|
-
#include <audioapi/dsp/FFTFrame.h>
|
|
32
31
|
#include <audioapi/dsp/VectorMath.h>
|
|
33
32
|
|
|
34
33
|
constexpr unsigned NumberOfOctaveBands = 3;
|
|
@@ -47,6 +46,8 @@ PeriodicWave::PeriodicWave(float sampleRate, bool disableNormalization)
|
|
|
47
46
|
scale_ = static_cast<float>(getPeriodicWaveSize()) /
|
|
48
47
|
static_cast<float>(sampleRate_);
|
|
49
48
|
bandLimitedTables_ = new float *[numberOfRanges_];
|
|
49
|
+
|
|
50
|
+
fft_ = std::make_unique<dsp::FFT>(getPeriodicWaveSize());
|
|
50
51
|
}
|
|
51
52
|
|
|
52
53
|
PeriodicWave::PeriodicWave(
|
|
@@ -59,12 +60,18 @@ PeriodicWave::PeriodicWave(
|
|
|
59
60
|
|
|
60
61
|
PeriodicWave::PeriodicWave(
|
|
61
62
|
float sampleRate,
|
|
62
|
-
float
|
|
63
|
-
float *imaginary,
|
|
63
|
+
const std::vector<std::complex<float>> &complexData,
|
|
64
64
|
int length,
|
|
65
65
|
bool disableNormalization)
|
|
66
66
|
: PeriodicWave(sampleRate, disableNormalization) {
|
|
67
|
-
createBandLimitedTables(
|
|
67
|
+
createBandLimitedTables(complexData, length);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
PeriodicWave::~PeriodicWave() {
|
|
71
|
+
for (int i = 0; i < numberOfRanges_; i++) {
|
|
72
|
+
delete[] bandLimitedTables_[i];
|
|
73
|
+
}
|
|
74
|
+
delete[] bandLimitedTables_;
|
|
68
75
|
}
|
|
69
76
|
|
|
70
77
|
int PeriodicWave::getPeriodicWaveSize() const {
|
|
@@ -130,15 +137,9 @@ void PeriodicWave::generateBasicWaveForm(OscillatorType type) {
|
|
|
130
137
|
* real and imaginary can finely shape which harmonic content is retained or
|
|
131
138
|
* discarded.
|
|
132
139
|
*/
|
|
133
|
-
auto halfSize = fftSize / 2;
|
|
134
|
-
|
|
135
|
-
auto *real = new float[halfSize];
|
|
136
|
-
auto *imaginary = new float[halfSize];
|
|
137
140
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
real[0] = 0.0f;
|
|
141
|
-
imaginary[0] = 0.0f;
|
|
141
|
+
auto halfSize = fftSize / 2;
|
|
142
|
+
auto complexData = std::vector<std::complex<float>>(halfSize);
|
|
142
143
|
|
|
143
144
|
for (int i = 1; i < halfSize; i++) {
|
|
144
145
|
// All waveforms are odd functions with a positive slope at time 0.
|
|
@@ -180,16 +181,14 @@ void PeriodicWave::generateBasicWaveForm(OscillatorType type) {
|
|
|
180
181
|
throw std::invalid_argument("Custom waveforms are not supported.");
|
|
181
182
|
}
|
|
182
183
|
|
|
183
|
-
|
|
184
|
-
imaginary[i] = b;
|
|
184
|
+
complexData[i] = std::complex<float>(0.0f, b);
|
|
185
185
|
}
|
|
186
186
|
|
|
187
|
-
createBandLimitedTables(
|
|
187
|
+
createBandLimitedTables(complexData, halfSize);
|
|
188
188
|
}
|
|
189
189
|
|
|
190
190
|
void PeriodicWave::createBandLimitedTables(
|
|
191
|
-
const float
|
|
192
|
-
const float *imaginaryData,
|
|
191
|
+
const std::vector<std::complex<float>> &complexData,
|
|
193
192
|
int size) {
|
|
194
193
|
float normalizationFactor = 0.5f;
|
|
195
194
|
|
|
@@ -199,19 +198,7 @@ void PeriodicWave::createBandLimitedTables(
|
|
|
199
198
|
size = std::min(size, halfSize);
|
|
200
199
|
|
|
201
200
|
for (int rangeIndex = 0; rangeIndex < numberOfRanges_; rangeIndex++) {
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
auto *realFFTFrameData = new float[fftSize];
|
|
205
|
-
auto *imaginaryFFTFrameData = new float[fftSize];
|
|
206
|
-
|
|
207
|
-
// copy real and imaginary data to the FFT frame and scale it
|
|
208
|
-
VectorMath::multiplyByScalar(
|
|
209
|
-
realData, static_cast<float>(fftSize), realFFTFrameData, size);
|
|
210
|
-
VectorMath::multiplyByScalar(
|
|
211
|
-
imaginaryData,
|
|
212
|
-
-static_cast<float>(fftSize),
|
|
213
|
-
imaginaryFFTFrameData,
|
|
214
|
-
size);
|
|
201
|
+
auto complexFFTData = std::vector<std::complex<float>>(halfSize);
|
|
215
202
|
|
|
216
203
|
// Find the starting partial where we should start culling.
|
|
217
204
|
// We need to clear out the highest frequencies to band-limit the waveform.
|
|
@@ -219,41 +206,37 @@ void PeriodicWave::createBandLimitedTables(
|
|
|
219
206
|
|
|
220
207
|
// Clamp the size to the number of partials.
|
|
221
208
|
auto clampedSize = std::min(size, numberOfPartials);
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
209
|
+
|
|
210
|
+
// copy real and imaginary data to the FFT frame, scale it and set the
|
|
211
|
+
// higher frequencies to zero.
|
|
212
|
+
for (int i = 0; i < size; i++) {
|
|
213
|
+
if (i >= clampedSize && i < halfSize) {
|
|
214
|
+
complexFFTData[i] = std::complex<float>(0.0f, 0.0f);
|
|
215
|
+
} else {
|
|
216
|
+
complexFFTData[i] = {
|
|
217
|
+
complexData[i].real() * static_cast<float>(fftSize),
|
|
218
|
+
complexData[i].imag() * -static_cast<float>(fftSize)};
|
|
219
|
+
}
|
|
230
220
|
}
|
|
231
221
|
|
|
232
222
|
// Zero out the DC and nquist components.
|
|
233
|
-
|
|
234
|
-
imaginaryFFTFrameData[0] = 0.0f;
|
|
223
|
+
complexFFTData[0] = {0.0f, 0.0f};
|
|
235
224
|
|
|
236
225
|
bandLimitedTables_[rangeIndex] = new float[fftSize];
|
|
237
226
|
|
|
238
227
|
// Perform the inverse FFT to get the time domain representation of the
|
|
239
228
|
// band-limited waveform.
|
|
240
|
-
|
|
241
|
-
bandLimitedTables_[rangeIndex],
|
|
242
|
-
realFFTFrameData,
|
|
243
|
-
imaginaryFFTFrameData);
|
|
229
|
+
fft_->doInverseFFT(complexFFTData, bandLimitedTables_[rangeIndex]);
|
|
244
230
|
|
|
245
231
|
if (!disableNormalization_ && rangeIndex == 0) {
|
|
246
232
|
float maxValue =
|
|
247
|
-
|
|
233
|
+
dsp::maximumMagnitude(bandLimitedTables_[rangeIndex], fftSize);
|
|
248
234
|
if (maxValue != 0) {
|
|
249
235
|
normalizationFactor = 1.0f / maxValue;
|
|
250
236
|
}
|
|
251
237
|
}
|
|
252
238
|
|
|
253
|
-
|
|
254
|
-
delete[] imaginaryFFTFrameData;
|
|
255
|
-
|
|
256
|
-
VectorMath::multiplyByScalar(
|
|
239
|
+
dsp::multiplyByScalar(
|
|
257
240
|
bandLimitedTables_[rangeIndex],
|
|
258
241
|
normalizationFactor,
|
|
259
242
|
bandLimitedTables_[rangeIndex],
|
|
@@ -29,10 +29,13 @@
|
|
|
29
29
|
#pragma once
|
|
30
30
|
|
|
31
31
|
#include <audioapi/core/types/OscillatorType.h>
|
|
32
|
+
#include <audioapi/dsp/FFT.h>
|
|
32
33
|
|
|
33
34
|
#include <algorithm>
|
|
34
35
|
#include <cmath>
|
|
35
36
|
#include <memory>
|
|
37
|
+
#include <vector>
|
|
38
|
+
#include <complex>
|
|
36
39
|
|
|
37
40
|
namespace audioapi {
|
|
38
41
|
class PeriodicWave {
|
|
@@ -43,10 +46,10 @@ class PeriodicWave {
|
|
|
43
46
|
bool disableNormalization);
|
|
44
47
|
explicit PeriodicWave(
|
|
45
48
|
float sampleRate,
|
|
46
|
-
float
|
|
47
|
-
float *imaginary,
|
|
49
|
+
const std::vector<std::complex<float>> &complexData,
|
|
48
50
|
int length,
|
|
49
51
|
bool disableNormalization);
|
|
52
|
+
~PeriodicWave();
|
|
50
53
|
|
|
51
54
|
[[nodiscard]] int getPeriodicWaveSize() const;
|
|
52
55
|
[[nodiscard]] float getScale() const;
|
|
@@ -78,7 +81,7 @@ class PeriodicWave {
|
|
|
78
81
|
// For each range, the inverse FFT is performed to get the time domain
|
|
79
82
|
// representation of the band-limited waveform.
|
|
80
83
|
void
|
|
81
|
-
createBandLimitedTables(const float
|
|
84
|
+
createBandLimitedTables(const std::vector<std::complex<float>> &complexData, int size);
|
|
82
85
|
|
|
83
86
|
// This function returns the interpolation factor between the lower and higher
|
|
84
87
|
// range data and sets the lower and higher wave data for the given
|
|
@@ -111,6 +114,8 @@ class PeriodicWave {
|
|
|
111
114
|
float scale_;
|
|
112
115
|
// array of band-limited waveforms.
|
|
113
116
|
float **bandLimitedTables_;
|
|
117
|
+
//
|
|
118
|
+
std::unique_ptr<dsp::FFT> fft_;
|
|
114
119
|
// if true, the waveTable is not normalized.
|
|
115
120
|
bool disableNormalization_;
|
|
116
121
|
};
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
#include <audioapi/core/BaseAudioContext.h>
|
|
2
2
|
#include <audioapi/core/Constants.h>
|
|
3
3
|
#include <audioapi/core/effects/StereoPannerNode.h>
|
|
4
|
-
#include <audioapi/
|
|
5
|
-
#include <audioapi/
|
|
4
|
+
#include <audioapi/utils/AudioArray.h>
|
|
5
|
+
#include <audioapi/utils/AudioBus.h>
|
|
6
6
|
|
|
7
7
|
// https://webaudio.github.io/web-audio-api/#stereopanner-algorithm
|
|
8
8
|
|
|
@@ -11,7 +11,7 @@ namespace audioapi {
|
|
|
11
11
|
StereoPannerNode::StereoPannerNode(BaseAudioContext *context)
|
|
12
12
|
: AudioNode(context) {
|
|
13
13
|
channelCountMode_ = ChannelCountMode::EXPLICIT;
|
|
14
|
-
panParam_ = std::make_shared<AudioParam>(0.0,
|
|
14
|
+
panParam_ = std::make_shared<AudioParam>(0.0, -1.0f, 1.0f);
|
|
15
15
|
isInitialized_ = true;
|
|
16
16
|
}
|
|
17
17
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#include <audioapi/core/sources/AudioBuffer.h>
|
|
2
|
-
#include <audioapi/
|
|
3
|
-
#include <audioapi/
|
|
2
|
+
#include <audioapi/utils/AudioArray.h>
|
|
3
|
+
#include <audioapi/utils/AudioBus.h>
|
|
4
4
|
|
|
5
5
|
#include <utility>
|
|
6
6
|
|
|
@@ -11,10 +11,17 @@ AudioBuffer::AudioBuffer(
|
|
|
11
11
|
size_t length,
|
|
12
12
|
float sampleRate) {
|
|
13
13
|
bus_ = std::make_shared<AudioBus>(length, numberOfChannels, sampleRate);
|
|
14
|
+
stretch_ =
|
|
15
|
+
std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
|
|
16
|
+
stretch_->presetDefault(numberOfChannels, sampleRate);
|
|
14
17
|
}
|
|
15
18
|
|
|
16
19
|
AudioBuffer::AudioBuffer(std::shared_ptr<AudioBus> bus) {
|
|
17
20
|
bus_ = std::move(bus);
|
|
21
|
+
|
|
22
|
+
stretch_ =
|
|
23
|
+
std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
|
|
24
|
+
stretch_->presetDefault(bus_->getNumberOfChannels(), bus_->getSampleRate());
|
|
18
25
|
}
|
|
19
26
|
|
|
20
27
|
size_t AudioBuffer::getLength() const {
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
+
#include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>
|
|
4
|
+
|
|
3
5
|
#include <algorithm>
|
|
4
6
|
#include <memory>
|
|
5
7
|
#include <string>
|
|
@@ -34,9 +36,10 @@ class AudioBuffer : public std::enable_shared_from_this<AudioBuffer> {
|
|
|
34
36
|
size_t startInChannel);
|
|
35
37
|
|
|
36
38
|
private:
|
|
37
|
-
std::shared_ptr<AudioBus> bus_;
|
|
38
|
-
|
|
39
39
|
friend class AudioBufferSourceNode;
|
|
40
|
+
|
|
41
|
+
std::shared_ptr<AudioBus> bus_;
|
|
42
|
+
std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
|
|
40
43
|
};
|
|
41
44
|
|
|
42
45
|
} // namespace audioapi
|