react-native-audio-api 0.4.8-rc2 → 0.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. package/android/CMakeLists.txt +2 -5
  2. package/android/build.gradle +25 -2
  3. package/android/src/main/cpp/core/AudioDecoder.cpp +10 -1
  4. package/android/src/main/cpp/core/AudioPlayer.cpp +6 -3
  5. package/android/src/main/cpp/libs/pffft.c +1906 -0
  6. package/android/src/main/cpp/libs/pffft.h +198 -0
  7. package/common/cpp/core/AnalyserNode.cpp +11 -10
  8. package/common/cpp/core/AnalyserNode.h +2 -0
  9. package/common/cpp/core/AudioBuffer.cpp +1 -1
  10. package/common/cpp/core/AudioBufferSourceNode.cpp +26 -16
  11. package/common/cpp/core/AudioBus.cpp +105 -13
  12. package/common/cpp/core/AudioBus.h +6 -4
  13. package/common/cpp/core/AudioContext.cpp +4 -3
  14. package/common/cpp/core/AudioContext.h +4 -4
  15. package/common/cpp/core/AudioDestinationNode.cpp +2 -3
  16. package/common/cpp/core/AudioNode.cpp +78 -58
  17. package/common/cpp/core/AudioNode.h +10 -1
  18. package/common/cpp/core/AudioNodeManager.cpp +13 -1
  19. package/common/cpp/core/AudioNodeManager.h +2 -0
  20. package/common/cpp/core/AudioScheduledSourceNode.cpp +5 -1
  21. package/common/cpp/core/BaseAudioContext.cpp +4 -1
  22. package/common/cpp/core/BaseAudioContext.h +4 -2
  23. package/common/cpp/core/PeriodicWave.cpp +9 -3
  24. package/common/cpp/core/StereoPannerNode.cpp +9 -12
  25. package/common/cpp/utils/FFTFrame.cpp +44 -37
  26. package/common/cpp/utils/FFTFrame.h +5 -14
  27. package/ios/core/AudioDecoder.mm +10 -1
  28. package/ios/core/AudioPlayer.m +23 -23
  29. package/ios/core/IOSAudioPlayer.mm +3 -3
  30. package/lib/module/core/AudioBufferSourceNode.js +2 -2
  31. package/lib/module/core/AudioBufferSourceNode.js.map +1 -1
  32. package/lib/module/index.js +19 -335
  33. package/lib/module/index.js.map +1 -1
  34. package/lib/module/index.web.js +18 -0
  35. package/lib/module/index.web.js.map +1 -0
  36. package/lib/module/types.js.map +1 -0
  37. package/lib/module/web-core/AnalyserNode.js +48 -0
  38. package/lib/module/web-core/AnalyserNode.js.map +1 -0
  39. package/lib/module/web-core/AudioBuffer.js +43 -0
  40. package/lib/module/web-core/AudioBuffer.js.map +1 -0
  41. package/lib/module/web-core/AudioBufferSourceNode.js +62 -0
  42. package/lib/module/web-core/AudioBufferSourceNode.js.map +1 -0
  43. package/lib/module/web-core/AudioContext.js +69 -0
  44. package/lib/module/web-core/AudioContext.js.map +1 -0
  45. package/lib/module/web-core/AudioDestinationNode.js +5 -0
  46. package/lib/module/web-core/AudioDestinationNode.js.map +1 -0
  47. package/lib/module/web-core/AudioNode.js +27 -0
  48. package/lib/module/web-core/AudioNode.js.map +1 -0
  49. package/lib/module/web-core/AudioParam.js +60 -0
  50. package/lib/module/web-core/AudioParam.js.map +1 -0
  51. package/lib/module/web-core/AudioScheduledSourceNode.js +27 -0
  52. package/lib/module/web-core/AudioScheduledSourceNode.js.map +1 -0
  53. package/lib/module/web-core/BaseAudioContext.js +2 -0
  54. package/lib/module/{core/types.js.map → web-core/BaseAudioContext.js.map} +1 -1
  55. package/lib/module/web-core/BiquadFilterNode.js +35 -0
  56. package/lib/module/web-core/BiquadFilterNode.js.map +1 -0
  57. package/lib/module/web-core/GainNode.js +11 -0
  58. package/lib/module/web-core/GainNode.js.map +1 -0
  59. package/lib/module/web-core/OscillatorNode.js +25 -0
  60. package/lib/module/web-core/OscillatorNode.js.map +1 -0
  61. package/lib/module/web-core/PeriodicWave.js +10 -0
  62. package/lib/module/web-core/PeriodicWave.js.map +1 -0
  63. package/lib/module/web-core/StereoPannerNode.js +11 -0
  64. package/lib/module/web-core/StereoPannerNode.js.map +1 -0
  65. package/lib/typescript/core/AnalyserNode.d.ts +1 -1
  66. package/lib/typescript/core/AnalyserNode.d.ts.map +1 -1
  67. package/lib/typescript/core/AudioNode.d.ts +1 -1
  68. package/lib/typescript/core/AudioNode.d.ts.map +1 -1
  69. package/lib/typescript/core/BaseAudioContext.d.ts +1 -1
  70. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  71. package/lib/typescript/core/BiquadFilterNode.d.ts +1 -1
  72. package/lib/typescript/core/BiquadFilterNode.d.ts.map +1 -1
  73. package/lib/typescript/core/OscillatorNode.d.ts +1 -1
  74. package/lib/typescript/core/OscillatorNode.d.ts.map +1 -1
  75. package/lib/typescript/index.d.ts +15 -126
  76. package/lib/typescript/index.d.ts.map +1 -1
  77. package/lib/typescript/index.web.d.ts +16 -0
  78. package/lib/typescript/index.web.d.ts.map +1 -0
  79. package/lib/typescript/interfaces.d.ts +1 -1
  80. package/lib/typescript/interfaces.d.ts.map +1 -1
  81. package/lib/typescript/types.d.ts.map +1 -0
  82. package/lib/typescript/web-core/AnalyserNode.d.ts +18 -0
  83. package/lib/typescript/web-core/AnalyserNode.d.ts.map +1 -0
  84. package/lib/typescript/web-core/AudioBuffer.d.ts +13 -0
  85. package/lib/typescript/web-core/AudioBuffer.d.ts.map +1 -0
  86. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts +19 -0
  87. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts.map +1 -0
  88. package/lib/typescript/web-core/AudioContext.d.ts +30 -0
  89. package/lib/typescript/web-core/AudioContext.d.ts.map +1 -0
  90. package/lib/typescript/web-core/AudioDestinationNode.d.ts +4 -0
  91. package/lib/typescript/web-core/AudioDestinationNode.d.ts.map +1 -0
  92. package/lib/typescript/web-core/AudioNode.d.ts +15 -0
  93. package/lib/typescript/web-core/AudioNode.d.ts.map +1 -0
  94. package/lib/typescript/web-core/AudioParam.d.ts +17 -0
  95. package/lib/typescript/web-core/AudioParam.d.ts.map +1 -0
  96. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts +7 -0
  97. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts.map +1 -0
  98. package/lib/typescript/web-core/BaseAudioContext.d.ts +27 -0
  99. package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -0
  100. package/lib/typescript/web-core/BiquadFilterNode.d.ts +15 -0
  101. package/lib/typescript/web-core/BiquadFilterNode.d.ts.map +1 -0
  102. package/lib/typescript/web-core/GainNode.d.ts +8 -0
  103. package/lib/typescript/web-core/GainNode.d.ts.map +1 -0
  104. package/lib/typescript/web-core/OscillatorNode.d.ts +14 -0
  105. package/lib/typescript/web-core/OscillatorNode.d.ts.map +1 -0
  106. package/lib/typescript/web-core/PeriodicWave.d.ts +6 -0
  107. package/lib/typescript/web-core/PeriodicWave.d.ts.map +1 -0
  108. package/lib/typescript/web-core/StereoPannerNode.d.ts +8 -0
  109. package/lib/typescript/web-core/StereoPannerNode.d.ts.map +1 -0
  110. package/package.json +1 -1
  111. package/src/core/AnalyserNode.ts +1 -1
  112. package/src/core/AudioBufferSourceNode.ts +2 -2
  113. package/src/core/AudioNode.ts +1 -1
  114. package/src/core/BaseAudioContext.ts +1 -1
  115. package/src/core/BiquadFilterNode.ts +1 -1
  116. package/src/core/OscillatorNode.ts +1 -1
  117. package/src/index.ts +30 -568
  118. package/src/index.web.ts +30 -0
  119. package/src/interfaces.ts +1 -1
  120. package/src/web-core/AnalyserNode.tsx +69 -0
  121. package/src/web-core/AudioBuffer.tsx +79 -0
  122. package/src/web-core/AudioBufferSourceNode.tsx +94 -0
  123. package/src/web-core/AudioContext.tsx +114 -0
  124. package/src/web-core/AudioDestinationNode.tsx +3 -0
  125. package/src/web-core/AudioNode.tsx +40 -0
  126. package/src/web-core/AudioParam.tsx +106 -0
  127. package/src/web-core/AudioScheduledSourceNode.tsx +37 -0
  128. package/src/web-core/BaseAudioContext.tsx +37 -0
  129. package/src/web-core/BiquadFilterNode.tsx +62 -0
  130. package/src/web-core/GainNode.tsx +12 -0
  131. package/src/web-core/OscillatorNode.tsx +36 -0
  132. package/src/web-core/PeriodicWave.tsx +8 -0
  133. package/src/web-core/StereoPannerNode.tsx +12 -0
  134. package/android/libs/arm64-v8a/libfftw3.a +0 -0
  135. package/android/libs/armeabi-v7a/libfftw3.a +0 -0
  136. package/android/libs/include/fftw3.h +0 -413
  137. package/android/libs/x86/libfftw3.a +0 -0
  138. package/android/libs/x86_64/libfftw3.a +0 -0
  139. package/lib/module/index.native.js +0 -21
  140. package/lib/module/index.native.js.map +0 -1
  141. package/lib/typescript/core/types.d.ts.map +0 -1
  142. package/lib/typescript/index.native.d.ts +0 -15
  143. package/lib/typescript/index.native.d.ts.map +0 -1
  144. package/src/index.native.ts +0 -27
  145. /package/lib/module/{core/types.js → types.js} +0 -0
  146. /package/lib/typescript/{core/types.d.ts → types.d.ts} +0 -0
  147. /package/src/{core/types.ts → types.ts} +0 -0
@@ -1,3 +1,4 @@
1
+ #include <cassert>
1
2
  #include <memory>
2
3
 
3
4
  #include "AudioBus.h"
@@ -9,11 +10,11 @@ namespace audioapi {
9
10
 
10
11
  AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
11
12
  audioBus_ = std::make_shared<AudioBus>(
12
- context->getSampleRate(), RENDER_QUANTUM_SIZE, channelCount_);
13
+ RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
13
14
  }
14
15
 
15
16
  AudioNode::~AudioNode() {
16
- cleanup();
17
+ isInitialized_ = false;
17
18
  }
18
19
 
19
20
  int AudioNode::getNumberOfInputs() const {
@@ -101,87 +102,91 @@ AudioBus *AudioNode::processAudio(AudioBus *outputBus, int framesToProcess) {
101
102
  return outputBus;
102
103
  }
103
104
 
105
+ if (isAlreadyProcessed()) {
106
+ return audioBus_.get();
107
+ }
108
+
109
+ // Process inputs and return the bus with the most channels.
110
+ AudioBus *processingBus = processInputs(outputBus, framesToProcess);
111
+
112
+ // Apply channel count mode.
113
+ processingBus = applyChannelCountMode(processingBus);
114
+
115
+ // Mix all input buses into the processing bus.
116
+ mixInputsBuses(processingBus);
117
+
118
+ assert(processingBus != nullptr);
119
+ // Finally, process the node itself.
120
+ processNode(processingBus, framesToProcess);
121
+
122
+ return processingBus;
123
+ }
124
+
125
+ bool AudioNode::isAlreadyProcessed() {
126
+ assert(context_ != nullptr);
127
+
104
128
  std::size_t currentSampleFrame = context_->getCurrentSampleFrame();
105
129
 
106
130
  // check if the node has already been processed for this rendering quantum
107
- bool isAlreadyProcessed = currentSampleFrame == lastRenderedFrame_;
108
-
109
- // Node can't use output bus if:
110
- // - outputBus is not provided, which means that next node is doing a
111
- // multi-node summing.
112
- // - it has more than one input, which means that it has to sum all inputs
113
- // using internal bus.
114
- // - it has more than one output, so each output node can get the processed
115
- // data without re-calculating the node.
116
- bool canUseOutputBus =
117
- outputBus != nullptr && inputNodes_.size() < 2 && outputNodes_.size() < 2;
118
-
119
- if (isAlreadyProcessed) {
120
- // If it was already processed in the rendering quantum, return it.
121
- return audioBus_.get();
131
+ if (currentSampleFrame == lastRenderedFrame_) {
132
+ return true;
122
133
  }
123
134
 
124
135
  // Update the last rendered frame before processing node and its inputs.
125
136
  lastRenderedFrame_ = currentSampleFrame;
126
137
 
127
- AudioBus *processingBus = canUseOutputBus ? outputBus : audioBus_.get();
138
+ return false;
139
+ }
128
140
 
129
- if (!canUseOutputBus) {
130
- // Clear the bus before summing all connected nodes.
131
- processingBus->zero();
132
- }
141
+ AudioBus *AudioNode::processInputs(AudioBus *outputBus, int framesToProcess) {
142
+ AudioBus *processingBus = audioBus_.get();
143
+ processingBus->zero();
133
144
 
134
- if (inputNodes_.empty()) {
135
- // If there are no connected inputs, if processing node is the source node,
136
- // it will fill processing bus with the audio data, otherwise it will return
137
- // silence.
138
- processNode(processingBus, framesToProcess);
139
- return processingBus;
140
- }
145
+ int maxNumberOfChannels = 0;
146
+ for (auto inputNode : inputNodes_) {
147
+ assert(inputNode != nullptr);
141
148
 
142
- for (auto it = inputNodes_.begin(); it != inputNodes_.end(); ++it) {
143
- if (!(*it)->isEnabled()) {
149
+ if (!inputNode->isEnabled()) {
144
150
  continue;
145
151
  }
146
152
 
147
- // Process first connected node, it can be directly connected to the
148
- // processingBus, resulting in one less summing operation.
149
- if (it == inputNodes_.begin()) {
150
- AudioBus *inputBus = (*it)->processAudio(processingBus, framesToProcess);
151
-
152
- if (inputBus != processingBus) {
153
- // add assert
154
- processingBus->sum(inputBus);
155
- }
156
- } else {
157
- // Enforce the summing to be done using the internal bus.
158
- AudioBus *inputBus = (*it)->processAudio(nullptr, framesToProcess);
159
- if (inputBus) {
160
- // add assert
161
- processingBus->sum(inputBus);
162
- }
153
+ auto inputBus = inputNode->processAudio(outputBus, framesToProcess);
154
+ inputBuses_.push_back(inputBus);
155
+
156
+ if (maxNumberOfChannels < inputBus->getNumberOfChannels()) {
157
+ maxNumberOfChannels = inputBus->getNumberOfChannels();
158
+ processingBus = inputBus;
163
159
  }
164
160
  }
165
161
 
166
- // Finally, process the node itself.
167
- processNode(processingBus, framesToProcess);
168
-
169
162
  return processingBus;
170
163
  }
171
164
 
172
- void AudioNode::cleanup() {
173
- isInitialized_ = false;
165
+ AudioBus *AudioNode::applyChannelCountMode(AudioBus *processingBus) {
166
+ // If the channelCountMode is EXPLICIT, the node should output the number of
167
+ // channels specified by the channelCount.
168
+ if (channelCountMode_ == ChannelCountMode::EXPLICIT) {
169
+ return audioBus_.get();
170
+ }
174
171
 
175
- for (const auto &outputNode : outputNodes_) {
176
- outputNode->onInputDisconnected(this);
172
+ // If the channelCountMode is CLAMPED_MAX, the node should output the maximum
173
+ // number of channels clamped to channelCount.
174
+ if (channelCountMode_ == ChannelCountMode::CLAMPED_MAX &&
175
+ processingBus->getNumberOfChannels() >= channelCount_) {
176
+ return audioBus_.get();
177
177
  }
178
178
 
179
- for (const auto &inputNode : inputNodes_) {
180
- inputNode->disconnectNode(shared_from_this());
179
+ return processingBus;
180
+ }
181
+
182
+ void AudioNode::mixInputsBuses(AudioBus *processingBus) {
183
+ assert(processingBus != nullptr);
184
+
185
+ for (auto inputBus : inputBuses_) {
186
+ processingBus->sum(inputBus, channelInterpretation_);
181
187
  }
182
188
 
183
- outputNodes_.clear();
184
- inputNodes_.clear();
189
+ inputBuses_.clear();
185
190
  }
186
191
 
187
192
  void AudioNode::connectNode(const std::shared_ptr<AudioNode> &node) {
@@ -234,4 +239,19 @@ void AudioNode::onInputDisconnected(AudioNode *node) {
234
239
  }
235
240
  }
236
241
 
242
+ void AudioNode::cleanup() {
243
+ isInitialized_ = false;
244
+
245
+ for (const auto &outputNode : outputNodes_) {
246
+ outputNode->onInputDisconnected(this);
247
+ }
248
+
249
+ for (const auto &inputNode : inputNodes_) {
250
+ inputNode->disconnectNode(shared_from_this());
251
+ }
252
+
253
+ outputNodes_.clear();
254
+ inputNodes_.clear();
255
+ }
256
+
237
257
  } // namespace audioapi
@@ -4,6 +4,7 @@
4
4
  #include <string>
5
5
  #include <unordered_set>
6
6
  #include <cstddef>
7
+ #include <vector>
7
8
 
8
9
  #include "ChannelCountMode.h"
9
10
  #include "ChannelInterpretation.h"
@@ -56,13 +57,19 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
56
57
  std::size_t lastRenderedFrame_{SIZE_MAX};
57
58
 
58
59
  private:
60
+ std::vector<AudioBus*> inputBuses_;
61
+
59
62
  static std::string toString(ChannelCountMode mode);
60
63
  static std::string toString(ChannelInterpretation interpretation);
61
64
 
62
- void cleanup();
63
65
  AudioBus *processAudio(AudioBus *outputBus, int framesToProcess);
64
66
  virtual void processNode(AudioBus *processingBus, int framesToProcess) = 0;
65
67
 
68
+ bool isAlreadyProcessed();
69
+ AudioBus *processInputs(AudioBus *outputBus, int framesToProcess);
70
+ AudioBus *applyChannelCountMode(AudioBus *processingBus);
71
+ void mixInputsBuses(AudioBus *processingBus);
72
+
66
73
  void connectNode(const std::shared_ptr<AudioNode> &node);
67
74
  void disconnectNode(const std::shared_ptr<AudioNode> &node);
68
75
 
@@ -70,6 +77,8 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
70
77
  void onInputDisabled();
71
78
  void onInputConnected(AudioNode *node);
72
79
  void onInputDisconnected(AudioNode *node);
80
+
81
+ void cleanup();
73
82
  };
74
83
 
75
84
  } // namespace audioapi
@@ -43,7 +43,9 @@ void AudioNodeManager::settlePendingConnections() {
43
43
  std::shared_ptr<AudioNode> to = std::get<1>(connection);
44
44
  ConnectionType type = std::get<2>(connection);
45
45
 
46
- // add assert to check if from and to are neither null nor uninitialized
46
+ if (!to || !from) {
47
+ continue;
48
+ }
47
49
 
48
50
  if (type == ConnectionType::CONNECT) {
49
51
  from->connectNode(to);
@@ -66,4 +68,14 @@ void AudioNodeManager::prepareNodesForDestruction() {
66
68
  }
67
69
  }
68
70
 
71
+ void AudioNodeManager::cleanup() {
72
+ Locker lock(getGraphLock());
73
+
74
+ for (auto &node : nodes_) {
75
+ node->cleanup();
76
+ }
77
+
78
+ nodes_.clear();
79
+ }
80
+
69
81
  } // namespace audioapi
@@ -27,6 +27,8 @@ class AudioNodeManager {
27
27
 
28
28
  void addNode(const std::shared_ptr<AudioNode> &node);
29
29
 
30
+ void cleanup();
31
+
30
32
  private:
31
33
  std::mutex graphLock_;
32
34
 
@@ -1,7 +1,9 @@
1
- #include "AudioScheduledSourceNode.h"
1
+ #include <cassert>
2
+
2
3
  #include "AudioArray.h"
3
4
  #include "AudioBus.h"
4
5
  #include "AudioNodeManager.h"
6
+ #include "AudioScheduledSourceNode.h"
5
7
  #include "AudioUtils.h"
6
8
  #include "BaseAudioContext.h"
7
9
 
@@ -51,6 +53,8 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
51
53
  return;
52
54
  }
53
55
 
56
+ assert(context_ != nullptr);
57
+
54
58
  auto sampleRate = context_->getSampleRate();
55
59
 
56
60
  size_t firstFrame = context_->getCurrentSampleFrame();
@@ -1,4 +1,4 @@
1
- #include "BaseAudioContext.h"
1
+ #include <cassert>
2
2
 
3
3
  #include "AnalyserNode.h"
4
4
  #include "AudioArray.h"
@@ -8,6 +8,7 @@
8
8
  #include "AudioDecoder.h"
9
9
  #include "AudioDestinationNode.h"
10
10
  #include "AudioNodeManager.h"
11
+ #include "BaseAudioContext.h"
11
12
  #include "BiquadFilterNode.h"
12
13
  #include "ContextState.h"
13
14
  #include "GainNode.h"
@@ -30,10 +31,12 @@ float BaseAudioContext::getSampleRate() const {
30
31
  }
31
32
 
32
33
  std::size_t BaseAudioContext::getCurrentSampleFrame() const {
34
+ assert(destination_ != nullptr);
33
35
  return destination_->getCurrentSampleFrame();
34
36
  }
35
37
 
36
38
  double BaseAudioContext::getCurrentTime() const {
39
+ assert(destination_ != nullptr);
37
40
  return destination_->getCurrentTime();
38
41
  }
39
42
 
@@ -49,20 +49,22 @@ class BaseAudioContext {
49
49
  bool disableNormalization,
50
50
  int length);
51
51
  std::shared_ptr<AnalyserNode> createAnalyser();
52
+
52
53
  std::shared_ptr<AudioBuffer> decodeAudioDataSource(const std::string &path);
53
54
 
54
55
  std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
56
+ [[nodiscard]] float getNyquistFrequency() const;
55
57
  AudioNodeManager *getNodeManager();
58
+
56
59
  [[nodiscard]] bool isRunning() const;
57
60
  [[nodiscard]] bool isClosed() const;
58
- [[nodiscard]] float getNyquistFrequency() const;
59
61
 
60
62
  protected:
61
63
  static std::string toString(ContextState state);
64
+
62
65
  std::shared_ptr<AudioDestinationNode> destination_;
63
66
  // init in AudioContext or OfflineContext constructor
64
67
  std::shared_ptr<AudioDecoder> audioDecoder_ {};
65
-
66
68
  // init in AudioContext or OfflineContext constructor
67
69
  float sampleRate_ {};
68
70
  ContextState state_ = ContextState::RUNNING;
@@ -199,8 +199,8 @@ void PeriodicWave::createBandLimitedTables(
199
199
  for (int rangeIndex = 0; rangeIndex < numberOfRanges_; rangeIndex++) {
200
200
  FFTFrame fftFrame(fftSize);
201
201
 
202
- auto *realFFTFrameData = fftFrame.getRealData();
203
- auto *imaginaryFFTFrameData = fftFrame.getImaginaryData();
202
+ auto *realFFTFrameData = new float[fftSize];
203
+ auto *imaginaryFFTFrameData = new float[fftSize];
204
204
 
205
205
  // copy real and imaginary data to the FFT frame and scale it
206
206
  VectorMath::multiplyByScalar(
@@ -235,7 +235,10 @@ void PeriodicWave::createBandLimitedTables(
235
235
 
236
236
  // Perform the inverse FFT to get the time domain representation of the
237
237
  // band-limited waveform.
238
- fftFrame.doInverseFFT(bandLimitedTables_[rangeIndex]);
238
+ fftFrame.doInverseFFT(
239
+ bandLimitedTables_[rangeIndex],
240
+ realFFTFrameData,
241
+ imaginaryFFTFrameData);
239
242
 
240
243
  if (!disableNormalization_ && rangeIndex == 0) {
241
244
  float maxValue =
@@ -245,6 +248,9 @@ void PeriodicWave::createBandLimitedTables(
245
248
  }
246
249
  }
247
250
 
251
+ delete[] realFFTFrameData;
252
+ delete[] imaginaryFFTFrameData;
253
+
248
254
  VectorMath::multiplyByScalar(
249
255
  bandLimitedTables_[rangeIndex],
250
256
  normalizationFactor,
@@ -1,8 +1,10 @@
1
- #include "StereoPannerNode.h"
1
+ #include <cassert>
2
+
2
3
  #include "AudioArray.h"
3
4
  #include "AudioBus.h"
4
5
  #include "BaseAudioContext.h"
5
6
  #include "Constants.h"
7
+ #include "StereoPannerNode.h"
6
8
 
7
9
  // https://webaudio.github.io/web-audio-api/#stereopanner-algorithm
8
10
 
@@ -10,7 +12,7 @@ namespace audioapi {
10
12
 
11
13
  StereoPannerNode::StereoPannerNode(BaseAudioContext *context)
12
14
  : AudioNode(context) {
13
- channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
15
+ channelCountMode_ = ChannelCountMode::EXPLICIT;
14
16
  panParam_ = std::make_shared<AudioParam>(0.0, MIN_PAN, MAX_PAN);
15
17
  isInitialized_ = true;
16
18
  }
@@ -22,12 +24,6 @@ std::shared_ptr<AudioParam> StereoPannerNode::getPanParam() const {
22
24
  void StereoPannerNode::processNode(
23
25
  AudioBus *processingBus,
24
26
  int framesToProcess) {
25
- // TODO: Currently assumed channelCount is 2
26
- // it should:
27
- // - support mono-channel buses
28
- // - throw errors when trying to setup stereo panner with more than 2
29
- // channels
30
-
31
27
  double time = context_->getCurrentTime();
32
28
  double deltaTime = 1.0 / context_->getSampleRate();
33
29
 
@@ -35,11 +31,12 @@ void StereoPannerNode::processNode(
35
31
  AudioArray *right = processingBus->getChannelByType(AudioBus::ChannelRight);
36
32
 
37
33
  for (int i = 0; i < framesToProcess; i += 1) {
38
- float pan = panParam_->getValueAtTime(time);
39
- float x = (pan <= 0 ? pan + 1 : pan) * PI / 2;
34
+ auto pan = panParam_->getValueAtTime(time);
35
+
36
+ auto x = (pan <= 0 ? pan + 1 : pan);
40
37
 
41
- auto gainL = static_cast<float>(cos(x));
42
- auto gainR = static_cast<float>(sin(x));
38
+ auto gainL = static_cast<float>(cos(x * PI / 2));
39
+ auto gainR = static_cast<float>(sin(x * PI / 2));
43
40
 
44
41
  float inputL = (*left)[i];
45
42
  float inputR = (*right)[i];
@@ -5,7 +5,8 @@
5
5
  #endif
6
6
 
7
7
  #if defined(ANDROID)
8
- #include <fftw3.h>
8
+ #include <pffft.h>
9
+ #include <complex>
9
10
  #endif
10
11
 
11
12
  namespace audioapi {
@@ -13,19 +14,11 @@ namespace audioapi {
13
14
  static std::unordered_map<size_t, FFTSetup> fftSetups_;
14
15
 
15
16
  FFTFrame::FFTFrame(int size)
16
- : size_(size),
17
- log2Size_(static_cast<int>(log2(size))),
18
- realData_(new float[size]),
19
- imaginaryData_(new float[size]) {
17
+ : size_(size), log2Size_(static_cast<int>(log2(size))) {
20
18
  fftSetup_ = getFFTSetupForSize(log2Size_);
21
- frame_.realp = realData_;
22
- frame_.imagp = imaginaryData_;
23
19
  }
24
20
 
25
- FFTFrame::~FFTFrame() {
26
- delete[] realData_;
27
- delete[] imaginaryData_;
28
- }
21
+ FFTFrame::~FFTFrame() {}
29
22
 
30
23
  FFTSetup FFTFrame::getFFTSetupForSize(size_t log2FFTSize) {
31
24
  if (!fftSetups_.contains(log2FFTSize)) {
@@ -36,15 +29,22 @@ FFTSetup FFTFrame::getFFTSetupForSize(size_t log2FFTSize) {
36
29
  return fftSetups_.at(log2FFTSize);
37
30
  }
38
31
 
39
- void FFTFrame::doFFT(float *data) {
32
+ void FFTFrame::doFFT(float *data, float *realData, float *imaginaryData) {
33
+ frame_.realp = realData;
34
+ frame_.imagp = imaginaryData;
40
35
  vDSP_ctoz(reinterpret_cast<DSPComplex *>(data), 2, &frame_, 1, size_ / 2);
41
36
  vDSP_fft_zrip(fftSetup_, &frame_, 1, log2Size_, FFT_FORWARD);
42
37
 
43
- VectorMath::multiplyByScalar(realData_, 0.5f, realData_, size_ / 2);
44
- VectorMath::multiplyByScalar(imaginaryData_, 0.5f, imaginaryData_, size_ / 2);
38
+ VectorMath::multiplyByScalar(realData, 0.5f, realData, size_ / 2);
39
+ VectorMath::multiplyByScalar(imaginaryData, 0.5f, imaginaryData, size_ / 2);
45
40
  }
46
41
 
47
- void FFTFrame::doInverseFFT(float *data) {
42
+ void FFTFrame::doInverseFFT(
43
+ float *data,
44
+ float *realData,
45
+ float *imaginaryData) {
46
+ frame_.realp = realData;
47
+ frame_.imagp = imaginaryData;
48
48
  vDSP_fft_zrip(fftSetup_, &frame_, 1, log2Size_, FFT_INVERSE);
49
49
  vDSP_ztoc(&frame_, 1, reinterpret_cast<DSPComplex *>(data), 2, size_ / 2);
50
50
 
@@ -57,42 +57,49 @@ void FFTFrame::doInverseFFT(float *data) {
57
57
  #elif defined(ANDROID)
58
58
 
59
59
  FFTFrame::FFTFrame(int size)
60
- : size_(size),
61
- log2Size_(static_cast<int>(log2(size))),
62
- realData_(new float[size]),
63
- imaginaryData_(new float[size]) {
64
- frame_ = fftwf_alloc_complex(size / 2);
60
+ : size_(size), log2Size_(static_cast<int>(log2(size))) {
61
+ pffftSetup_ = pffft_new_setup(size_, PFFFT_REAL);
62
+ work_ = (float *)pffft_aligned_malloc(size_ * sizeof(float));
65
63
  }
66
64
 
67
65
  FFTFrame::~FFTFrame() {
68
- delete[] realData_;
69
- delete[] imaginaryData_;
70
- fftwf_free(frame_);
66
+ pffft_destroy_setup(pffftSetup_);
67
+ pffft_aligned_free(work_);
71
68
  }
72
69
 
73
- void FFTFrame::doFFT(float *data) {
74
- auto plan = fftwf_plan_dft_r2c_1d(size_, data, frame_, FFTW_ESTIMATE);
75
- fftwf_execute(plan);
76
- fftwf_destroy_plan(plan);
70
+ void FFTFrame::doFFT(float *data, float *realData, float *imaginaryData) {
71
+ std::vector<std::complex<float>> out(size_);
72
+ pffft_transform_ordered(
73
+ pffftSetup_,
74
+ data,
75
+ reinterpret_cast<float *>(&out[0]),
76
+ work_,
77
+ PFFFT_FORWARD);
77
78
 
78
79
  for (int i = 0; i < size_ / 2; ++i) {
79
- realData_[i] = frame_[i][0];
80
- imaginaryData_[i] = frame_[i][1];
80
+ realData[i] = out[i].real();
81
+ imaginaryData[i] = out[i].imag();
81
82
  }
82
83
 
83
- VectorMath::multiplyByScalar(realData_, 0.5f, realData_, size_ / 2);
84
- VectorMath::multiplyByScalar(imaginaryData_, 0.5f, imaginaryData_, size_ / 2);
84
+ VectorMath::multiplyByScalar(realData, 0.5f, realData, size_ / 2);
85
+ VectorMath::multiplyByScalar(imaginaryData, 0.5f, imaginaryData, size_ / 2);
85
86
  }
86
87
 
87
- void FFTFrame::doInverseFFT(float *data) {
88
+ void FFTFrame::doInverseFFT(
89
+ float *data,
90
+ float *realData,
91
+ float *imaginaryData) {
92
+ std::vector<std::complex<float>> out(size_ / 2);
88
93
  for (int i = 0; i < size_ / 2; i++) {
89
- frame_[i][0] = realData_[i];
90
- frame_[i][1] = imaginaryData_[i];
94
+ out[i] = {realData[i], imaginaryData[i]};
91
95
  }
92
96
 
93
- auto plan = fftwf_plan_dft_c2r_1d(size_, frame_, data, FFTW_ESTIMATE);
94
- fftwf_execute(plan);
95
- fftwf_destroy_plan(plan);
97
+ pffft_transform_ordered(
98
+ pffftSetup_,
99
+ reinterpret_cast<float *>(&out[0]),
100
+ data,
101
+ work_,
102
+ PFFFT_BACKWARD);
96
103
 
97
104
  VectorMath::multiplyByScalar(
98
105
  data, 1.0f / static_cast<float>(size_), data, size_);
@@ -40,7 +40,7 @@
40
40
  #endif
41
41
 
42
42
  #if defined(ANDROID)
43
- #include <fftw3.h>
43
+ #include <pffft.h>
44
44
  #endif
45
45
 
46
46
  namespace audioapi {
@@ -50,22 +50,12 @@ class FFTFrame {
50
50
  explicit FFTFrame(int size);
51
51
  ~FFTFrame();
52
52
 
53
- [[nodiscard]] float *getRealData() const {
54
- return realData_;
55
- }
56
- [[nodiscard]] float *getImaginaryData() const {
57
- return imaginaryData_;
58
- }
59
-
60
- void doFFT(float *data);
61
-
62
- void doInverseFFT(float *data);
53
+ void doFFT(float *data, float *realData, float *imaginaryData);
54
+ void doInverseFFT(float *data, float *realData, float *imaginaryData);
63
55
 
64
56
  private:
65
57
  int size_;
66
58
  int log2Size_;
67
- float *realData_;
68
- float *imaginaryData_;
69
59
 
70
60
  #if defined(HAVE_ACCELERATE)
71
61
  FFTSetup fftSetup_;
@@ -75,7 +65,8 @@ class FFTFrame {
75
65
  #endif
76
66
 
77
67
  #if defined(ANDROID)
78
- fftwf_complex *frame_;
68
+ PFFFT_Setup *pffftSetup_;
69
+ float *work_;
79
70
  #endif
80
71
  };
81
72
 
@@ -14,19 +14,28 @@ AudioBus *AudioDecoder::decodeWithFilePath(const std::string &path) const
14
14
  ma_result result = ma_decoder_init_file(path.c_str(), &config, &decoder);
15
15
  if (result != MA_SUCCESS) {
16
16
  NSLog(@"Failed to initialize decoder for file: %s", path.c_str());
17
+
18
+ ma_decoder_uninit(&decoder);
19
+
17
20
  return nullptr;
18
21
  }
19
22
 
20
23
  ma_uint64 totalFrameCount;
21
24
  ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
22
25
 
23
- auto *audioBus = new AudioBus(sampleRate_, static_cast<int>(totalFrameCount), 2);
26
+ auto *audioBus = new AudioBus(static_cast<int>(totalFrameCount), 2, sampleRate_);
24
27
  auto *buffer = new float[totalFrameCount * 2];
25
28
 
26
29
  ma_uint64 framesDecoded;
27
30
  ma_decoder_read_pcm_frames(&decoder, buffer, totalFrameCount, &framesDecoded);
28
31
  if (framesDecoded == 0) {
29
32
  NSLog(@"Failed to decode audio file: %s", path.c_str());
33
+
34
+ delete[] buffer;
35
+ delete audioBus;
36
+ ma_decoder_uninit(&decoder);
37
+
38
+ return nullptr;
30
39
  }
31
40
 
32
41
  for (int i = 0; i < decoder.outputChannels; ++i) {