react-native-audio-api 0.4.13 → 0.4.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. package/android/src/main/cpp/audioapi/CMakeLists.txt +3 -2
  2. package/android/src/main/cpp/audioapi/android/core/AudioDecoder.cpp +3 -3
  3. package/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp +13 -12
  4. package/android/src/main/cpp/audioapi/android/core/AudioPlayer.h +0 -1
  5. package/{common/cpp/audioapi/libs/pffft → android/src/main/cpp/audioapi/android/libs}/pffft.c +1 -1
  6. package/android/src/main/java/com/swmansion/audioapi/AudioAPIPackage.kt +1 -0
  7. package/common/cpp/audioapi/AudioAPIModuleInstaller.h +3 -1
  8. package/common/cpp/audioapi/HostObjects/AnalyserNodeHostObject.h +16 -24
  9. package/common/cpp/audioapi/HostObjects/AudioBufferHostObject.h +0 -4
  10. package/common/cpp/audioapi/HostObjects/AudioBufferSourceNodeHostObject.h +4 -20
  11. package/common/cpp/audioapi/HostObjects/AudioContextHostObject.h +2 -3
  12. package/common/cpp/audioapi/HostObjects/AudioScheduledSourceNodeHostObject.h +2 -32
  13. package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +21 -14
  14. package/common/cpp/audioapi/HostObjects/OscillatorNodeHostObject.h +2 -4
  15. package/common/cpp/audioapi/HostObjects/StretcherNodeHostObject.h +35 -0
  16. package/common/cpp/audioapi/core/AudioNode.cpp +2 -2
  17. package/common/cpp/audioapi/core/AudioParam.cpp +1 -1
  18. package/common/cpp/audioapi/core/BaseAudioContext.cpp +12 -4
  19. package/common/cpp/audioapi/core/BaseAudioContext.h +4 -2
  20. package/common/cpp/audioapi/core/Constants.h +33 -8
  21. package/common/cpp/audioapi/core/analysis/AnalyserNode.cpp +45 -42
  22. package/common/cpp/audioapi/core/analysis/AnalyserNode.h +6 -8
  23. package/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp +1 -1
  24. package/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +8 -12
  25. package/common/cpp/audioapi/core/effects/GainNode.cpp +3 -4
  26. package/common/cpp/audioapi/core/effects/PeriodicWave.cpp +49 -32
  27. package/common/cpp/audioapi/core/effects/PeriodicWave.h +3 -8
  28. package/common/cpp/audioapi/core/effects/StereoPannerNode.cpp +3 -3
  29. package/common/cpp/audioapi/core/effects/StretcherNode.cpp +94 -0
  30. package/common/cpp/audioapi/core/effects/StretcherNode.h +35 -0
  31. package/common/cpp/audioapi/core/sources/AudioBuffer.cpp +2 -9
  32. package/common/cpp/audioapi/core/sources/AudioBuffer.h +2 -5
  33. package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +35 -72
  34. package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +8 -41
  35. package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +6 -18
  36. package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +0 -7
  37. package/common/cpp/audioapi/core/sources/OscillatorNode.cpp +3 -12
  38. package/common/cpp/audioapi/core/sources/OscillatorNode.h +0 -1
  39. package/common/cpp/audioapi/{utils → core/utils}/AudioArray.cpp +5 -5
  40. package/common/cpp/audioapi/{utils → core/utils}/AudioBus.cpp +29 -29
  41. package/common/cpp/audioapi/dsp/AudioUtils.cpp +2 -2
  42. package/common/cpp/audioapi/dsp/AudioUtils.h +2 -2
  43. package/common/cpp/audioapi/dsp/FFTFrame.cpp +100 -0
  44. package/common/cpp/audioapi/dsp/FFTFrame.h +74 -0
  45. package/common/cpp/audioapi/dsp/VectorMath.cpp +3 -3
  46. package/common/cpp/audioapi/dsp/VectorMath.h +2 -2
  47. package/common/cpp/audioapi/libs/dsp/common.h +47 -0
  48. package/common/cpp/audioapi/libs/{signalsmith-stretch → dsp}/delay.h +11 -9
  49. package/common/cpp/audioapi/libs/{signalsmith-stretch → dsp}/fft.h +7 -6
  50. package/common/cpp/audioapi/libs/{signalsmith-stretch → dsp}/perf.h +2 -0
  51. package/common/cpp/audioapi/libs/{signalsmith-stretch → dsp}/spectral.h +13 -10
  52. package/common/cpp/audioapi/libs/dsp/windows.h +219 -0
  53. package/common/cpp/audioapi/libs/{signalsmith-stretch/signalsmith-stretch.h → signalsmith-stretch.h} +4 -3
  54. package/ios/audioapi/ios/core/AudioDecoder.mm +3 -3
  55. package/ios/audioapi/ios/core/AudioPlayer.h +2 -5
  56. package/ios/audioapi/ios/core/AudioPlayer.m +5 -9
  57. package/ios/audioapi/ios/core/IOSAudioPlayer.h +0 -1
  58. package/ios/audioapi/ios/core/IOSAudioPlayer.mm +10 -12
  59. package/lib/module/api.js +2 -1
  60. package/lib/module/api.js.map +1 -1
  61. package/lib/module/api.web.js +1 -1
  62. package/lib/module/api.web.js.map +1 -1
  63. package/lib/module/core/AudioBufferSourceNode.js +0 -6
  64. package/lib/module/core/AudioBufferSourceNode.js.map +1 -1
  65. package/lib/module/core/AudioScheduledSourceNode.js +0 -5
  66. package/lib/module/core/AudioScheduledSourceNode.js.map +1 -1
  67. package/lib/module/core/BaseAudioContext.js +4 -0
  68. package/lib/module/core/BaseAudioContext.js.map +1 -1
  69. package/lib/module/core/StretcherNode.js +12 -0
  70. package/lib/module/core/StretcherNode.js.map +1 -0
  71. package/lib/module/web-core/AudioBufferSourceNode.js +0 -6
  72. package/lib/module/web-core/AudioBufferSourceNode.js.map +1 -1
  73. package/lib/module/web-core/AudioScheduledSourceNode.js +0 -8
  74. package/lib/module/web-core/AudioScheduledSourceNode.js.map +1 -1
  75. package/lib/module/web-core/StretcherNode.js +7 -24
  76. package/lib/module/web-core/StretcherNode.js.map +1 -1
  77. package/lib/module/web-core/custom/signalsmithStretch/README.md +1 -1
  78. package/lib/module/web-core/custom/signalsmithStretch/SignalsmithStretch.js +0 -1
  79. package/lib/module/web-core/custom/signalsmithStretch/SignalsmithStretch.js.map +1 -1
  80. package/lib/typescript/api.d.ts +2 -1
  81. package/lib/typescript/api.d.ts.map +1 -1
  82. package/lib/typescript/api.web.d.ts +1 -1
  83. package/lib/typescript/api.web.d.ts.map +1 -1
  84. package/lib/typescript/core/AudioBufferSourceNode.d.ts +0 -3
  85. package/lib/typescript/core/AudioBufferSourceNode.d.ts.map +1 -1
  86. package/lib/typescript/core/AudioScheduledSourceNode.d.ts +0 -1
  87. package/lib/typescript/core/AudioScheduledSourceNode.d.ts.map +1 -1
  88. package/lib/typescript/core/BaseAudioContext.d.ts +2 -0
  89. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  90. package/lib/typescript/core/StretcherNode.d.ts +10 -0
  91. package/lib/typescript/core/StretcherNode.d.ts.map +1 -0
  92. package/lib/typescript/interfaces.d.ts +6 -3
  93. package/lib/typescript/interfaces.d.ts.map +1 -1
  94. package/lib/typescript/types.d.ts +0 -1
  95. package/lib/typescript/types.d.ts.map +1 -1
  96. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts +0 -3
  97. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts.map +1 -1
  98. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts +0 -1
  99. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts.map +1 -1
  100. package/lib/typescript/web-core/StretcherNode.d.ts +0 -3
  101. package/lib/typescript/web-core/StretcherNode.d.ts.map +1 -1
  102. package/package.json +4 -3
  103. package/scripts/setup-custom-wasm.js +104 -0
  104. package/src/api.ts +1 -1
  105. package/src/api.web.ts +0 -1
  106. package/src/core/AudioBufferSourceNode.ts +0 -9
  107. package/src/core/AudioScheduledSourceNode.ts +0 -5
  108. package/src/core/BaseAudioContext.ts +5 -0
  109. package/src/core/StretcherNode.ts +15 -0
  110. package/src/interfaces.ts +6 -3
  111. package/src/types.ts +0 -2
  112. package/src/web-core/AudioBufferSourceNode.tsx +0 -11
  113. package/src/web-core/AudioScheduledSourceNode.tsx +0 -9
  114. package/src/web-core/StretcherNode.tsx +8 -28
  115. package/src/web-core/custom/signalsmithStretch/README.md +1 -1
  116. package/src/web-core/custom/signalsmithStretch/SignalsmithStretch.js +0 -1
  117. package/common/cpp/audioapi/core/types/TimeStretchType.h +0 -7
  118. package/common/cpp/audioapi/dsp/FFT.cpp +0 -41
  119. package/common/cpp/audioapi/dsp/FFT.h +0 -29
  120. package/common/cpp/audioapi/dsp/Windows.cpp +0 -80
  121. package/common/cpp/audioapi/dsp/Windows.h +0 -95
  122. package/scripts/setup-rn-audio-api-web.js +0 -58
  123. /package/{common/cpp/audioapi/libs/pffft → android/src/main/cpp/audioapi/android/libs}/pffft.h +0 -0
  124. /package/common/cpp/audioapi/{utils → core/utils}/AudioArray.h +0 -0
  125. /package/common/cpp/audioapi/{utils → core/utils}/AudioBus.h +0 -0
  126. /package/common/cpp/audioapi/libs/{miniaudio/miniaudio.h → miniaudio.h} +0 -0
@@ -1,18 +1,17 @@
1
1
  #include <audioapi/core/BaseAudioContext.h>
2
2
  #include <audioapi/core/analysis/AnalyserNode.h>
3
+ #include <audioapi/core/utils/AudioArray.h>
4
+ #include <audioapi/core/utils/AudioBus.h>
3
5
  #include <audioapi/dsp/AudioUtils.h>
4
6
  #include <audioapi/dsp/VectorMath.h>
5
- #include <audioapi/dsp/Windows.h>
6
- #include <audioapi/utils/AudioArray.h>
7
- #include <audioapi/utils/AudioBus.h>
8
7
 
9
8
  namespace audioapi {
10
9
  AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
11
10
  : AudioNode(context),
12
- fftSize_(2048),
13
- minDecibels_(-100),
14
- maxDecibels_(-30),
15
- smoothingTimeConstant_(0.8),
11
+ fftSize_(DEFAULT_FFT_SIZE),
12
+ minDecibels_(DEFAULT_MIN_DECIBELS),
13
+ maxDecibels_(DEFAULT_MAX_DECIBELS),
14
+ smoothingTimeConstant_(DEFAULT_SMOOTHING_TIME_CONSTANT),
16
15
  windowType_(WindowType::BLACKMAN),
17
16
  vWriteIndex_(0) {
18
17
  inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
@@ -20,10 +19,9 @@ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
20
19
  downMixBus_ = std::make_unique<AudioBus>(
21
20
  RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
22
21
 
23
- fft_ = std::make_unique<dsp::FFT>(fftSize_);
24
- complexData_ = std::vector<std::complex<float>>(fftSize_);
25
-
26
- setWindowData(windowType_, fftSize_);
22
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
23
+ realData_ = std::make_shared<AudioArray>(fftSize_);
24
+ imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
27
25
 
28
26
  isInitialized_ = true;
29
27
  }
@@ -58,10 +56,10 @@ void AnalyserNode::setFftSize(int fftSize) {
58
56
  }
59
57
 
60
58
  fftSize_ = fftSize;
61
- fft_ = std::make_unique<dsp::FFT>(fftSize_);
62
- complexData_ = std::vector<std::complex<float>>(fftSize_);
59
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
60
+ realData_ = std::make_shared<AudioArray>(fftSize_);
61
+ imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
63
62
  magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
64
- setWindowData(windowType_, fftSize_);
65
63
  }
66
64
 
67
65
  void AnalyserNode::setMinDecibels(float minDecibels) {
@@ -77,7 +75,6 @@ void AnalyserNode::setSmoothingTimeConstant(float smoothingTimeConstant) {
77
75
  }
78
76
 
79
77
  void AnalyserNode::setWindowType(const std::string &type) {
80
- setWindowData(windowType_, fftSize_);
81
78
  windowType_ = AnalyserNode::fromString(type);
82
79
  }
83
80
 
@@ -85,7 +82,7 @@ void AnalyserNode::getFloatFrequencyData(float *data, int length) {
85
82
  doFFTAnalysis();
86
83
 
87
84
  length = std::min(static_cast<int>(magnitudeBuffer_->getSize()), length);
88
- dsp::linearToDecibels(magnitudeBuffer_->getData(), data, length);
85
+ VectorMath::linearToDecibels(magnitudeBuffer_->getData(), data, length);
89
86
  }
90
87
 
91
88
  void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) {
@@ -100,7 +97,7 @@ void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) {
100
97
  for (int i = 0; i < length; i++) {
101
98
  auto dbMag = magnitudeBufferData[i] == 0
102
99
  ? minDecibels_
103
- : dsp::linearToDecibels(magnitudeBufferData[i]);
100
+ : AudioUtils::linearToDecibels(magnitudeBufferData[i]);
104
101
  auto scaledValue = UINT8_MAX * (dbMag - minDecibels_) * rangeScaleFactor;
105
102
 
106
103
  if (scaledValue < 0) {
@@ -205,49 +202,55 @@ void AnalyserNode::doFFTAnalysis() {
205
202
  tempBuffer.copy(inputBuffer_.get(), vWriteIndex_ - fftSize_, 0, fftSize_);
206
203
  }
207
204
 
208
- dsp::multiply(
209
- tempBuffer.getData(),
210
- windowData_->getData(),
211
- tempBuffer.getData(),
212
- fftSize_);
205
+ switch (windowType_) {
206
+ case WindowType::BLACKMAN:
207
+ AnalyserNode::applyBlackManWindow(tempBuffer.getData(), fftSize_);
208
+ break;
209
+ case WindowType::HANN:
210
+ AnalyserNode::applyHannWindow(tempBuffer.getData(), fftSize_);
211
+ break;
212
+ }
213
+
214
+ auto *realFFTFrameData = realData_->getData();
215
+ auto *imaginaryFFTFrameData = imaginaryData_->getData();
213
216
 
214
217
  // do fft analysis - get frequency domain data
215
- fft_->doFFT(tempBuffer.getData(), complexData_);
218
+ fftFrame_->doFFT(
219
+ tempBuffer.getData(), realFFTFrameData, imaginaryFFTFrameData);
216
220
 
217
221
  // Zero out nquist component
218
- complexData_[0] = std::complex<float>(complexData_[0].real(), 0);
222
+ imaginaryFFTFrameData[0] = 0.0f;
219
223
 
220
224
  const float magnitudeScale = 1.0f / static_cast<float>(fftSize_);
221
225
  auto magnitudeBufferData = magnitudeBuffer_->getData();
222
226
 
223
227
  for (int i = 0; i < magnitudeBuffer_->getSize(); i++) {
224
- auto scalarMagnitude = std::abs(complexData_[i]) * magnitudeScale;
228
+ std::complex<float> c(realFFTFrameData[i], imaginaryFFTFrameData[i]);
229
+ auto scalarMagnitude = std::abs(c) * magnitudeScale;
225
230
  magnitudeBufferData[i] = static_cast<float>(
226
231
  smoothingTimeConstant_ * magnitudeBufferData[i] +
227
232
  (1 - smoothingTimeConstant_) * scalarMagnitude);
228
233
  }
229
234
  }
230
235
 
231
- void AnalyserNode::setWindowData(
232
- audioapi::AnalyserNode::WindowType type,
233
- int size) {
234
- if (windowType_ == type && windowData_ && windowData_->getSize() == size) {
235
- return;
236
- }
236
+ void AnalyserNode::applyBlackManWindow(float *data, int length) {
237
+ // https://www.sciencedirect.com/topics/engineering/blackman-window
238
+ // https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.blackman.html#scipy.signal.windows.blackman
237
239
 
238
- if (!windowData_ || windowData_->getSize() != size) {
239
- windowData_ = std::make_shared<AudioArray>(size);
240
+ for (int i = 0; i < length; ++i) {
241
+ auto x = static_cast<float>(i) / static_cast<float>(length);
242
+ auto window = 0.42f - 0.5f * cos(2 * PI * x) + 0.08f * cos(4 * PI * x);
243
+ data[i] *= window;
240
244
  }
245
+ }
241
246
 
242
- switch (windowType_) {
243
- case WindowType::BLACKMAN:
244
- dsp::Blackman().apply(
245
- windowData_->getData(), static_cast<int>(windowData_->getSize()));
246
- break;
247
- case WindowType::HANN:
248
- dsp::Hann().apply(
249
- windowData_->getData(), static_cast<int>(windowData_->getSize()));
250
- break;
247
+ void AnalyserNode::applyHannWindow(float *data, int length) {
248
+ // https://www.sciencedirect.com/topics/engineering/hanning-window
249
+ // https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.hann.html#scipy.signal.windows.hann
250
+ for (int i = 0; i < length; ++i) {
251
+ auto x = static_cast<float>(i) / static_cast<float>(length - 1);
252
+ auto window = 0.5f - 0.5f * cos(2 * PI * x);
253
+ data[i] *= window;
251
254
  }
252
255
  }
253
256
  } // namespace audioapi
@@ -1,13 +1,12 @@
1
1
  #pragma once
2
2
 
3
3
  #include <audioapi/core/AudioNode.h>
4
- #include <audioapi/dsp/FFT.h>
4
+ #include <audioapi/dsp/FFTFrame.h>
5
5
 
6
6
  #include <memory>
7
7
  #include <cstddef>
8
8
  #include <string>
9
9
  #include <complex>
10
- #include <vector>
11
10
 
12
11
  namespace audioapi {
13
12
 
@@ -45,16 +44,15 @@ class AnalyserNode : public AudioNode {
45
44
  float minDecibels_;
46
45
  float maxDecibels_;
47
46
  float smoothingTimeConstant_;
48
-
49
47
  WindowType windowType_;
50
- std::shared_ptr<AudioArray> windowData_;
51
48
 
52
49
  std::unique_ptr<AudioArray> inputBuffer_;
53
50
  std::unique_ptr<AudioBus> downMixBus_;
54
51
  int vWriteIndex_;
55
52
 
56
- std::unique_ptr<dsp::FFT> fft_;
57
- std::vector<std::complex<float>> complexData_;
53
+ std::unique_ptr<FFTFrame> fftFrame_;
54
+ std::shared_ptr<AudioArray> realData_;
55
+ std::shared_ptr<AudioArray> imaginaryData_;
58
56
  std::unique_ptr<AudioArray> magnitudeBuffer_;
59
57
  bool shouldDoFFTAnalysis_ { true };
60
58
 
@@ -84,8 +82,8 @@ class AnalyserNode : public AudioNode {
84
82
  }
85
83
 
86
84
  void doFFTAnalysis();
87
-
88
- void setWindowData(WindowType type, int size);
85
+ static void applyBlackManWindow(float *data, int length);
86
+ static void applyHannWindow(float *data, int length);
89
87
  };
90
88
 
91
89
  } // namespace audioapi
@@ -1,8 +1,8 @@
1
1
  #include <audioapi/core/AudioNode.h>
2
2
  #include <audioapi/core/BaseAudioContext.h>
3
3
  #include <audioapi/core/destinations/AudioDestinationNode.h>
4
+ #include <audioapi/core/utils/AudioBus.h>
4
5
  #include <audioapi/core/utils/AudioNodeManager.h>
5
- #include <audioapi/utils/AudioBus.h>
6
6
 
7
7
  namespace audioapi {
8
8
 
@@ -1,7 +1,7 @@
1
1
  #include <audioapi/core/BaseAudioContext.h>
2
2
  #include <audioapi/core/effects/BiquadFilterNode.h>
3
- #include <audioapi/utils/AudioArray.h>
4
- #include <audioapi/utils/AudioBus.h>
3
+ #include <audioapi/core/utils/AudioArray.h>
4
+ #include <audioapi/core/utils/AudioBus.h>
5
5
 
6
6
  // https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html - math
7
7
  // formulas for filters
@@ -10,16 +10,12 @@ namespace audioapi {
10
10
 
11
11
  BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context)
12
12
  : AudioNode(context) {
13
- frequencyParam_ =
14
- std::make_shared<AudioParam>(350.0, 0.0f, context->getNyquistFrequency());
15
- detuneParam_ = std::make_shared<AudioParam>(
16
- 0.0,
17
- -1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT,
18
- 1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT);
19
- QParam_ = std::make_shared<AudioParam>(
20
- 1.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT);
21
- gainParam_ = std::make_shared<AudioParam>(
22
- 0.0, MOST_NEGATIVE_SINGLE_FLOAT, 40 * LOG10_MOST_POSITIVE_SINGLE_FLOAT);
13
+ frequencyParam_ = std::make_shared<AudioParam>(
14
+ 350.0, MIN_FILTER_FREQUENCY, context->getNyquistFrequency());
15
+ detuneParam_ = std::make_shared<AudioParam>(0.0, -MAX_DETUNE, MAX_DETUNE);
16
+ QParam_ = std::make_shared<AudioParam>(1.0, MIN_FILTER_Q, MAX_FILTER_Q);
17
+ gainParam_ =
18
+ std::make_shared<AudioParam>(0.0, MIN_FILTER_GAIN, MAX_FILTER_GAIN);
23
19
  type_ = BiquadFilterType::LOWPASS;
24
20
  isInitialized_ = true;
25
21
  }
@@ -1,13 +1,12 @@
1
1
  #include <audioapi/core/BaseAudioContext.h>
2
2
  #include <audioapi/core/effects/GainNode.h>
3
- #include <audioapi/utils/AudioArray.h>
4
- #include <audioapi/utils/AudioBus.h>
3
+ #include <audioapi/core/utils/AudioArray.h>
4
+ #include <audioapi/core/utils/AudioBus.h>
5
5
 
6
6
  namespace audioapi {
7
7
 
8
8
  GainNode::GainNode(BaseAudioContext *context) : AudioNode(context) {
9
- gainParam_ = std::make_shared<AudioParam>(
10
- 1.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT);
9
+ gainParam_ = std::make_shared<AudioParam>(1.0, MIN_GAIN, MAX_GAIN);
11
10
  isInitialized_ = true;
12
11
  }
13
12
 
@@ -28,6 +28,7 @@
28
28
 
29
29
  #include <audioapi/core/Constants.h>
30
30
  #include <audioapi/core/effects/PeriodicWave.h>
31
+ #include <audioapi/dsp/FFTFrame.h>
31
32
  #include <audioapi/dsp/VectorMath.h>
32
33
 
33
34
  constexpr unsigned NumberOfOctaveBands = 3;
@@ -46,8 +47,6 @@ PeriodicWave::PeriodicWave(float sampleRate, bool disableNormalization)
46
47
  scale_ = static_cast<float>(getPeriodicWaveSize()) /
47
48
  static_cast<float>(sampleRate_);
48
49
  bandLimitedTables_ = new float *[numberOfRanges_];
49
-
50
- fft_ = std::make_unique<dsp::FFT>(getPeriodicWaveSize());
51
50
  }
52
51
 
53
52
  PeriodicWave::PeriodicWave(
@@ -60,18 +59,12 @@ PeriodicWave::PeriodicWave(
60
59
 
61
60
  PeriodicWave::PeriodicWave(
62
61
  float sampleRate,
63
- const std::vector<std::complex<float>> &complexData,
62
+ float *real,
63
+ float *imaginary,
64
64
  int length,
65
65
  bool disableNormalization)
66
66
  : PeriodicWave(sampleRate, disableNormalization) {
67
- createBandLimitedTables(complexData, length);
68
- }
69
-
70
- PeriodicWave::~PeriodicWave() {
71
- for (int i = 0; i < numberOfRanges_; i++) {
72
- delete[] bandLimitedTables_[i];
73
- }
74
- delete[] bandLimitedTables_;
67
+ createBandLimitedTables(real, imaginary, length);
75
68
  }
76
69
 
77
70
  int PeriodicWave::getPeriodicWaveSize() const {
@@ -137,9 +130,15 @@ void PeriodicWave::generateBasicWaveForm(OscillatorType type) {
137
130
  * real and imaginary can finely shape which harmonic content is retained or
138
131
  * discarded.
139
132
  */
140
-
141
133
  auto halfSize = fftSize / 2;
142
- auto complexData = std::vector<std::complex<float>>(halfSize);
134
+
135
+ auto *real = new float[halfSize];
136
+ auto *imaginary = new float[halfSize];
137
+
138
+ // Reset Direct Current (DC) component. First element of frequency domain
139
+ // representation - c0. https://math24.net/complex-form-fourier-series.html
140
+ real[0] = 0.0f;
141
+ imaginary[0] = 0.0f;
143
142
 
144
143
  for (int i = 1; i < halfSize; i++) {
145
144
  // All waveforms are odd functions with a positive slope at time 0.
@@ -181,14 +180,16 @@ void PeriodicWave::generateBasicWaveForm(OscillatorType type) {
181
180
  throw std::invalid_argument("Custom waveforms are not supported.");
182
181
  }
183
182
 
184
- complexData[i] = std::complex<float>(0.0f, b);
183
+ real[i] = 0.0f;
184
+ imaginary[i] = b;
185
185
  }
186
186
 
187
- createBandLimitedTables(complexData, halfSize);
187
+ createBandLimitedTables(real, imaginary, halfSize);
188
188
  }
189
189
 
190
190
  void PeriodicWave::createBandLimitedTables(
191
- const std::vector<std::complex<float>> &complexData,
191
+ const float *realData,
192
+ const float *imaginaryData,
192
193
  int size) {
193
194
  float normalizationFactor = 0.5f;
194
195
 
@@ -198,7 +199,19 @@ void PeriodicWave::createBandLimitedTables(
198
199
  size = std::min(size, halfSize);
199
200
 
200
201
  for (int rangeIndex = 0; rangeIndex < numberOfRanges_; rangeIndex++) {
201
- auto complexFFTData = std::vector<std::complex<float>>(halfSize);
202
+ FFTFrame fftFrame(fftSize);
203
+
204
+ auto *realFFTFrameData = new float[fftSize];
205
+ auto *imaginaryFFTFrameData = new float[fftSize];
206
+
207
+ // copy real and imaginary data to the FFT frame and scale it
208
+ VectorMath::multiplyByScalar(
209
+ realData, static_cast<float>(fftSize), realFFTFrameData, size);
210
+ VectorMath::multiplyByScalar(
211
+ imaginaryData,
212
+ -static_cast<float>(fftSize),
213
+ imaginaryFFTFrameData,
214
+ size);
202
215
 
203
216
  // Find the starting partial where we should start culling.
204
217
  // We need to clear out the highest frequencies to band-limit the waveform.
@@ -206,37 +219,41 @@ void PeriodicWave::createBandLimitedTables(
206
219
 
207
220
  // Clamp the size to the number of partials.
208
221
  auto clampedSize = std::min(size, numberOfPartials);
209
-
210
- // copy real and imaginary data to the FFT frame, scale it and set the
211
- // higher frequencies to zero.
212
- for (int i = 0; i < size; i++) {
213
- if (i >= clampedSize && i < halfSize) {
214
- complexFFTData[i] = std::complex<float>(0.0f, 0.0f);
215
- } else {
216
- complexFFTData[i] = {
217
- complexData[i].real() * static_cast<float>(fftSize),
218
- complexData[i].imag() * -static_cast<float>(fftSize)};
219
- }
222
+ if (clampedSize < halfSize) {
223
+ // Zero out the higher frequencies for certain range.
224
+ std::fill(
225
+ realFFTFrameData + clampedSize, realFFTFrameData + halfSize, 0.0f);
226
+ std::fill(
227
+ imaginaryFFTFrameData + clampedSize,
228
+ imaginaryFFTFrameData + halfSize,
229
+ 0.0f);
220
230
  }
221
231
 
222
232
  // Zero out the DC and nquist components.
223
- complexFFTData[0] = {0.0f, 0.0f};
233
+ realFFTFrameData[0] = 0.0f;
234
+ imaginaryFFTFrameData[0] = 0.0f;
224
235
 
225
236
  bandLimitedTables_[rangeIndex] = new float[fftSize];
226
237
 
227
238
  // Perform the inverse FFT to get the time domain representation of the
228
239
  // band-limited waveform.
229
- fft_->doInverseFFT(complexFFTData, bandLimitedTables_[rangeIndex]);
240
+ fftFrame.doInverseFFT(
241
+ bandLimitedTables_[rangeIndex],
242
+ realFFTFrameData,
243
+ imaginaryFFTFrameData);
230
244
 
231
245
  if (!disableNormalization_ && rangeIndex == 0) {
232
246
  float maxValue =
233
- dsp::maximumMagnitude(bandLimitedTables_[rangeIndex], fftSize);
247
+ VectorMath::maximumMagnitude(bandLimitedTables_[rangeIndex], fftSize);
234
248
  if (maxValue != 0) {
235
249
  normalizationFactor = 1.0f / maxValue;
236
250
  }
237
251
  }
238
252
 
239
- dsp::multiplyByScalar(
253
+ delete[] realFFTFrameData;
254
+ delete[] imaginaryFFTFrameData;
255
+
256
+ VectorMath::multiplyByScalar(
240
257
  bandLimitedTables_[rangeIndex],
241
258
  normalizationFactor,
242
259
  bandLimitedTables_[rangeIndex],
@@ -29,13 +29,10 @@
29
29
  #pragma once
30
30
 
31
31
  #include <audioapi/core/types/OscillatorType.h>
32
- #include <audioapi/dsp/FFT.h>
33
32
 
34
33
  #include <algorithm>
35
34
  #include <cmath>
36
35
  #include <memory>
37
- #include <vector>
38
- #include <complex>
39
36
 
40
37
  namespace audioapi {
41
38
  class PeriodicWave {
@@ -46,10 +43,10 @@ class PeriodicWave {
46
43
  bool disableNormalization);
47
44
  explicit PeriodicWave(
48
45
  float sampleRate,
49
- const std::vector<std::complex<float>> &complexData,
46
+ float *real,
47
+ float *imaginary,
50
48
  int length,
51
49
  bool disableNormalization);
52
- ~PeriodicWave();
53
50
 
54
51
  [[nodiscard]] int getPeriodicWaveSize() const;
55
52
  [[nodiscard]] float getScale() const;
@@ -81,7 +78,7 @@ class PeriodicWave {
81
78
  // For each range, the inverse FFT is performed to get the time domain
82
79
  // representation of the band-limited waveform.
83
80
  void
84
- createBandLimitedTables(const std::vector<std::complex<float>> &complexData, int size);
81
+ createBandLimitedTables(const float *real, const float *imaginary, int size);
85
82
 
86
83
  // This function returns the interpolation factor between the lower and higher
87
84
  // range data and sets the lower and higher wave data for the given
@@ -114,8 +111,6 @@ class PeriodicWave {
114
111
  float scale_;
115
112
  // array of band-limited waveforms.
116
113
  float **bandLimitedTables_;
117
- //
118
- std::unique_ptr<dsp::FFT> fft_;
119
114
  // if true, the waveTable is not normalized.
120
115
  bool disableNormalization_;
121
116
  };
@@ -1,8 +1,8 @@
1
1
  #include <audioapi/core/BaseAudioContext.h>
2
2
  #include <audioapi/core/Constants.h>
3
3
  #include <audioapi/core/effects/StereoPannerNode.h>
4
- #include <audioapi/utils/AudioArray.h>
5
- #include <audioapi/utils/AudioBus.h>
4
+ #include <audioapi/core/utils/AudioArray.h>
5
+ #include <audioapi/core/utils/AudioBus.h>
6
6
 
7
7
  // https://webaudio.github.io/web-audio-api/#stereopanner-algorithm
8
8
 
@@ -11,7 +11,7 @@ namespace audioapi {
11
11
  StereoPannerNode::StereoPannerNode(BaseAudioContext *context)
12
12
  : AudioNode(context) {
13
13
  channelCountMode_ = ChannelCountMode::EXPLICIT;
14
- panParam_ = std::make_shared<AudioParam>(0.0, -1.0f, 1.0f);
14
+ panParam_ = std::make_shared<AudioParam>(0.0, MIN_PAN, MAX_PAN);
15
15
  isInitialized_ = true;
16
16
  }
17
17
 
@@ -0,0 +1,94 @@
1
+ #include <audioapi/core/BaseAudioContext.h>
2
+ #include <audioapi/core/effects/StretcherNode.h>
3
+ #include <audioapi/core/utils/AudioArray.h>
4
+ #include <audioapi/core/utils/AudioBus.h>
5
+
6
+ namespace audioapi {
7
+
8
+ StretcherNode::StretcherNode(BaseAudioContext *context) : AudioNode(context) {
9
+ channelCountMode_ = ChannelCountMode::EXPLICIT;
10
+ rate_ = std::make_shared<AudioParam>(1.0, 0.0, 3.0);
11
+ semitones_ = std::make_shared<AudioParam>(0.0, -12.0, 12.0);
12
+
13
+ stretch_ =
14
+ std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
15
+ stretch_->presetDefault(channelCount_, context->getSampleRate());
16
+ playbackRateBus_ = std::make_shared<AudioBus>(
17
+ RENDER_QUANTUM_SIZE * 3, channelCount_, context_->getSampleRate());
18
+
19
+ isInitialized_ = true;
20
+ }
21
+
22
+ std::shared_ptr<AudioParam> StretcherNode::getRateParam() const {
23
+ return rate_;
24
+ }
25
+
26
+ std::shared_ptr<AudioParam> StretcherNode::getSemitonesParam() const {
27
+ return semitones_;
28
+ }
29
+
30
+ void StretcherNode::processNode(
31
+ const std::shared_ptr<AudioBus> &processingBus,
32
+ int framesToProcess) {
33
+ auto time = context_->getCurrentTime();
34
+ auto semitones = semitones_->getValueAtTime(time);
35
+
36
+ stretch_->setTransposeSemitones(semitones);
37
+ stretch_->process(
38
+ playbackRateBus_.get()[0],
39
+ framesNeededToStretch_,
40
+ audioBus_.get()[0],
41
+ framesToProcess);
42
+ }
43
+
44
+ std::shared_ptr<AudioBus> StretcherNode::processAudio(
45
+ std::shared_ptr<AudioBus> outputBus,
46
+ int framesToProcess,
47
+ bool checkIsAlreadyProcessed) {
48
+ if (!isInitialized_) {
49
+ return outputBus;
50
+ }
51
+
52
+ if (isAlreadyProcessed()) {
53
+ return audioBus_;
54
+ }
55
+
56
+ auto time = context_->getCurrentTime();
57
+
58
+ auto rate = rate_->getValueAtTime(time);
59
+ framesNeededToStretch_ =
60
+ static_cast<int>(rate * static_cast<float>(framesToProcess));
61
+
62
+ playbackRateBus_->zero();
63
+ auto writeIndex = 0;
64
+ auto framesNeededToStretch = framesNeededToStretch_;
65
+
66
+ // Collecting frames needed to stretch
67
+ while (framesNeededToStretch > 0) {
68
+ auto framesToCopy = std::min(framesNeededToStretch, framesToProcess);
69
+
70
+ // Process inputs and return the bus with the most channels. We must not
71
+ // check if the node has already been processed, cause we need to process it
72
+ // multiple times in this case.
73
+ auto processingBus = processInputs(outputBus, framesToCopy, false);
74
+
75
+ // Apply channel count mode.
76
+ processingBus = applyChannelCountMode(processingBus);
77
+
78
+ // Mix all input buses into the processing bus.
79
+ mixInputsBuses(processingBus);
80
+
81
+ assert(processingBus != nullptr);
82
+
83
+ playbackRateBus_->copy(processingBus.get(), 0, writeIndex, framesToCopy);
84
+
85
+ writeIndex += framesToCopy;
86
+ framesNeededToStretch -= framesToCopy;
87
+ }
88
+
89
+ processNode(audioBus_, framesToProcess);
90
+
91
+ return audioBus_;
92
+ }
93
+
94
+ } // namespace audioapi
@@ -0,0 +1,35 @@
1
+ #pragma once
2
+
3
+ #include <audioapi/libs/signalsmith-stretch.h>
4
+ #include <audioapi/core/AudioNode.h>
5
+ #include <audioapi/core/AudioParam.h>
6
+
7
+ #include <memory>
8
+ #include <string>
9
+ #include <cassert>
10
+
11
+ namespace audioapi {
12
+ class AudioBus;
13
+
14
+ class StretcherNode : public AudioNode {
15
+ public:
16
+ explicit StretcherNode(BaseAudioContext *context);
17
+
18
+ [[nodiscard]] std::shared_ptr<AudioParam> getRateParam() const;
19
+ [[nodiscard]] std::shared_ptr<AudioParam> getSemitonesParam() const;
20
+
21
+ protected:
22
+ void processNode(const std::shared_ptr<AudioBus>& processingBus, int framesToProcess) override;
23
+ std::shared_ptr<AudioBus> processAudio(std::shared_ptr<AudioBus> outputBus, int framesToProcess, bool checkIsAlreadyProcessed) override;
24
+
25
+ private:
26
+ // k-rate params
27
+ std::shared_ptr<AudioParam> rate_;
28
+ std::shared_ptr<AudioParam> semitones_;
29
+
30
+ std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
31
+ std::shared_ptr<AudioBus> playbackRateBus_;
32
+ int framesNeededToStretch_ = RENDER_QUANTUM_SIZE;
33
+ };
34
+
35
+ } // namespace audioapi
@@ -1,6 +1,6 @@
1
1
  #include <audioapi/core/sources/AudioBuffer.h>
2
- #include <audioapi/utils/AudioArray.h>
3
- #include <audioapi/utils/AudioBus.h>
2
+ #include <audioapi/core/utils/AudioArray.h>
3
+ #include <audioapi/core/utils/AudioBus.h>
4
4
 
5
5
  #include <utility>
6
6
 
@@ -11,17 +11,10 @@ AudioBuffer::AudioBuffer(
11
11
  size_t length,
12
12
  float sampleRate) {
13
13
  bus_ = std::make_shared<AudioBus>(length, numberOfChannels, sampleRate);
14
- stretch_ =
15
- std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
16
- stretch_->presetDefault(numberOfChannels, sampleRate);
17
14
  }
18
15
 
19
16
  AudioBuffer::AudioBuffer(std::shared_ptr<AudioBus> bus) {
20
17
  bus_ = std::move(bus);
21
-
22
- stretch_ =
23
- std::make_shared<signalsmith::stretch::SignalsmithStretch<float>>();
24
- stretch_->presetDefault(bus_->getNumberOfChannels(), bus_->getSampleRate());
25
18
  }
26
19
 
27
20
  size_t AudioBuffer::getLength() const {
@@ -1,7 +1,5 @@
1
1
  #pragma once
2
2
 
3
- #include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>
4
-
5
3
  #include <algorithm>
6
4
  #include <memory>
7
5
  #include <string>
@@ -36,10 +34,9 @@ class AudioBuffer : public std::enable_shared_from_this<AudioBuffer> {
36
34
  size_t startInChannel);
37
35
 
38
36
  private:
39
- friend class AudioBufferSourceNode;
40
-
41
37
  std::shared_ptr<AudioBus> bus_;
42
- std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
38
+
39
+ friend class AudioBufferSourceNode;
43
40
  };
44
41
 
45
42
  } // namespace audioapi