react-native-audio-api 0.3.2 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +17 -11
  2. package/common/cpp/HostObjects/AnalyserNodeHostObject.h +151 -0
  3. package/common/cpp/HostObjects/BaseAudioContextHostObject.h +8 -0
  4. package/common/cpp/core/AnalyserNode.cpp +237 -0
  5. package/common/cpp/core/AnalyserNode.h +54 -0
  6. package/common/cpp/core/AudioNode.cpp +6 -5
  7. package/common/cpp/core/AudioScheduledSourceNode.cpp +1 -2
  8. package/common/cpp/core/BaseAudioContext.cpp +5 -0
  9. package/common/cpp/core/BaseAudioContext.h +2 -0
  10. package/common/cpp/core/Constants.h +9 -1
  11. package/common/cpp/core/PeriodicWave.cpp +2 -2
  12. package/common/cpp/utils/AudioUtils.cpp +7 -0
  13. package/common/cpp/utils/AudioUtils.h +5 -1
  14. package/common/cpp/utils/FFTFrame.cpp +69 -23
  15. package/common/cpp/utils/FFTFrame.h +25 -10
  16. package/common/cpp/utils/VectorMath.cpp +10 -0
  17. package/common/cpp/utils/VectorMath.h +2 -0
  18. package/lib/module/core/AnalyserNode.js +59 -0
  19. package/lib/module/core/AnalyserNode.js.map +1 -0
  20. package/lib/module/core/BaseAudioContext.js +8 -0
  21. package/lib/module/core/BaseAudioContext.js.map +1 -1
  22. package/lib/module/index.js +25 -0
  23. package/lib/module/index.js.map +1 -1
  24. package/lib/module/index.native.js +1 -0
  25. package/lib/module/index.native.js.map +1 -1
  26. package/lib/typescript/core/AnalyserNode.d.ts +18 -0
  27. package/lib/typescript/core/AnalyserNode.d.ts.map +1 -0
  28. package/lib/typescript/core/BaseAudioContext.d.ts +2 -0
  29. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  30. package/lib/typescript/index.d.ts +13 -0
  31. package/lib/typescript/index.d.ts.map +1 -1
  32. package/lib/typescript/index.native.d.ts +1 -0
  33. package/lib/typescript/index.native.d.ts.map +1 -1
  34. package/lib/typescript/interfaces.d.ts +12 -0
  35. package/lib/typescript/interfaces.d.ts.map +1 -1
  36. package/package.json +4 -2
  37. package/src/core/AnalyserNode.ts +85 -0
  38. package/src/core/BaseAudioContext.ts +10 -0
  39. package/src/index.native.ts +1 -0
  40. package/src/index.ts +46 -0
  41. package/src/interfaces.ts +14 -0
package/README.md CHANGED
@@ -17,11 +17,25 @@ allowing developers to generate and modify audio in exact same way it is possibl
17
17
  yarn add react-native-audio-api
18
18
  ```
19
19
 
20
+ ## Usage with expo
21
+
22
+ `react-native-audio-api` contains native custom code and isn't part of the Expo Go application. In order to be available in expo managed builds, you have to use Expo development build. Simplest way on starting local expo dev builds, is to use:
23
+
24
+ ```bash
25
+ # Build native iOS project
26
+ npx expo run:ios
27
+ # Build native Android project
28
+ npx expo run:android
29
+ ```
30
+
31
+ To learn more about expo development builds, please check out [Development Builds Documentation](https://docs.expo.dev/develop/development-builds/introduction/).
32
+
20
33
  ## Documentation
21
34
 
22
- `react-native-audio-api` tries to strictly follow the Web Audi API specification, which can be found at [https://www.w3.org/TR/webaudio/](https://www.w3.org/TR/webaudio/).
23
- <br />
24
- [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) is useful resource to get familiar with audio processing basics.
35
+ Check out our dedicated documentation page for info about this library, API reference and more:
36
+ [https://software-mansion-labs.github.io/react-native-audio-api/](https://software-mansion-labs.github.io/react-native-audio-api/)
37
+
38
+ You can also check out [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) for fully detailed description of the audio api.
25
39
 
26
40
  ## Coverage
27
41
 
@@ -29,14 +43,6 @@ Our current coverage of Web Audio API specification can be found here: [Web Audi
29
43
 
30
44
  ## Examples
31
45
 
32
- <div align="center">
33
-
34
- <a href="https://www.youtube.com/watch?v=npALr9IIDkI" target="_blank" rel="noopener noreferrer">
35
- <img src="./docs/assets/rnaa-example-01-thumbnail.png" width="640" />
36
- </a>
37
-
38
- </div>
39
-
40
46
  The source code for the example application is under the [`/apps/common-app`](./apps/common-app/) directory. If you want to play with the API but don't feel like trying it on a real app, you can run the example project. Check [Example README](./apps/fabric-example/README.md) for installation instructions.
41
47
 
42
48
  ## Your feedback
@@ -0,0 +1,151 @@
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ #include "AnalyserNode.h"
8
+ #include "AudioNodeHostObject.h"
9
+
10
+ namespace audioapi {
11
+ using namespace facebook;
12
+
13
+ class AnalyserNodeHostObject : public AudioNodeHostObject {
14
+ public:
15
+ explicit AnalyserNodeHostObject(const std::shared_ptr<AnalyserNode> &node)
16
+ : AudioNodeHostObject(node) {
17
+ addGetters(
18
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, fftSize),
19
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, frequencyBinCount),
20
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, minDecibels),
21
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, maxDecibels),
22
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, smoothingTimeConstant));
23
+
24
+ addFunctions(
25
+ JSI_EXPORT_FUNCTION(
26
+ AnalyserNodeHostObject, getFloatFrequencyData),
27
+ JSI_EXPORT_FUNCTION(
28
+ AnalyserNodeHostObject, getByteFrequencyData),
29
+ JSI_EXPORT_FUNCTION(
30
+ AnalyserNodeHostObject, getFloatTimeDomainData),
31
+ JSI_EXPORT_FUNCTION(
32
+ AnalyserNodeHostObject, getByteTimeDomainData));
33
+
34
+ addSetters(
35
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, fftSize),
36
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, minDecibels),
37
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, maxDecibels),
38
+ JSI_EXPORT_PROPERTY_SETTER(
39
+ AnalyserNodeHostObject, smoothingTimeConstant));
40
+ }
41
+
42
+ JSI_PROPERTY_GETTER(fftSize) {
43
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
44
+ return {static_cast<int>(analyserNode->getFftSize())};
45
+ }
46
+
47
+ JSI_PROPERTY_GETTER(frequencyBinCount) {
48
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
49
+ return {static_cast<int>(analyserNode->getFrequencyBinCount())};
50
+ }
51
+
52
+ JSI_PROPERTY_GETTER(minDecibels) {
53
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
54
+ return {analyserNode->getMinDecibels()};
55
+ }
56
+
57
+ JSI_PROPERTY_GETTER(maxDecibels) {
58
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
59
+ return {analyserNode->getMaxDecibels()};
60
+ }
61
+
62
+ JSI_PROPERTY_GETTER(smoothingTimeConstant) {
63
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
64
+ return {analyserNode->getSmoothingTimeConstant()};
65
+ }
66
+
67
+ JSI_HOST_FUNCTION(getFloatFrequencyData) {
68
+ auto destination = args[0].getObject(runtime).asArray(runtime);
69
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
70
+ auto data = new float[length];
71
+
72
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
73
+ analyserNode->getFloatFrequencyData(data, length);
74
+
75
+ for (int i = 0; i < length; i++) {
76
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
77
+ }
78
+
79
+ return jsi::Value::undefined();
80
+ }
81
+
82
+ JSI_HOST_FUNCTION(getByteFrequencyData) {
83
+ auto destination = args[0].getObject(runtime).asArray(runtime);
84
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
85
+ auto data = new uint8_t[length];
86
+
87
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
88
+ analyserNode->getByteFrequencyData(data, length);
89
+
90
+ for (int i = 0; i < length; i++) {
91
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
92
+ }
93
+
94
+ return jsi::Value::undefined();
95
+ }
96
+
97
+ JSI_HOST_FUNCTION(getFloatTimeDomainData) {
98
+ auto destination = args[0].getObject(runtime).asArray(runtime);
99
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
100
+ auto data = new float[length];
101
+
102
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
103
+ analyserNode->getFloatTimeDomainData(data, length);
104
+
105
+ for (int i = 0; i < length; i++) {
106
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
107
+ }
108
+
109
+ return jsi::Value::undefined();
110
+ }
111
+
112
+ JSI_HOST_FUNCTION(getByteTimeDomainData) {
113
+ auto destination = args[0].getObject(runtime).asArray(runtime);
114
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
115
+ auto data = new uint8_t[length];
116
+
117
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
118
+ analyserNode->getByteTimeDomainData(data, length);
119
+
120
+ for (int i = 0; i < length; i++) {
121
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
122
+ }
123
+
124
+ return jsi::Value::undefined();
125
+ }
126
+
127
+ JSI_PROPERTY_SETTER(fftSize) {
128
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
129
+ auto fftSize = static_cast<size_t>(value.getNumber());
130
+ analyserNode->setFftSize(fftSize);
131
+ }
132
+
133
+ JSI_PROPERTY_SETTER(minDecibels) {
134
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
135
+ auto minDecibels = static_cast<int>(value.getNumber());
136
+ analyserNode->setMinDecibels(minDecibels);
137
+ }
138
+
139
+ JSI_PROPERTY_SETTER(maxDecibels) {
140
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
141
+ auto maxDecibels = static_cast<int>(value.getNumber());
142
+ analyserNode->setMaxDecibels(maxDecibels);
143
+ }
144
+
145
+ JSI_PROPERTY_SETTER(smoothingTimeConstant) {
146
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
147
+ auto smoothingTimeConstant = static_cast<int>(value.getNumber());
148
+ analyserNode->setSmoothingTimeConstant(smoothingTimeConstant);
149
+ }
150
+ };
151
+ } // namespace audioapi
@@ -17,6 +17,7 @@
17
17
  #include "OscillatorNodeHostObject.h"
18
18
  #include "PeriodicWaveHostObject.h"
19
19
  #include "StereoPannerNodeHostObject.h"
20
+ #include "AnalyserNodeHostObject.h"
20
21
 
21
22
  namespace audioapi {
22
23
  using namespace facebook;
@@ -41,6 +42,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
41
42
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferSource),
42
43
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBuffer),
43
44
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
45
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser),
44
46
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, decodeAudioDataSource));
45
47
  }
46
48
 
@@ -131,6 +133,12 @@ class BaseAudioContextHostObject : public JsiHostObject {
131
133
  return jsi::Object::createFromHostObject(runtime, periodicWaveHostObject);
132
134
  }
133
135
 
136
+ JSI_HOST_FUNCTION(createAnalyser) {
137
+ auto analyser = context_->createAnalyser();
138
+ auto analyserHostObject = std::make_shared<AnalyserNodeHostObject>(analyser);
139
+ return jsi::Object::createFromHostObject(runtime, analyserHostObject);
140
+ }
141
+
134
142
  JSI_HOST_FUNCTION(decodeAudioDataSource) {
135
143
  auto sourcePath = args[0].getString(runtime).utf8(runtime);
136
144
 
@@ -0,0 +1,237 @@
1
+ #include "AnalyserNode.h"
2
+
3
+ #include <complex>
4
+
5
+ #include "AudioArray.h"
6
+ #include "AudioBus.h"
7
+ #include "AudioUtils.h"
8
+ #include "BaseAudioContext.h"
9
+ #include "FFTFrame.h"
10
+ #include "VectorMath.h"
11
+
12
+ namespace audioapi {
13
+ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
14
+ : AudioNode(context),
15
+ fftSize_(DEFAULT_FFT_SIZE),
16
+ minDecibels_(DEFAULT_MIN_DECIBELS),
17
+ maxDecibels_(DEFAULT_MAX_DECIBELS),
18
+ smoothingTimeConstant_(DEFAULT_SMOOTHING_TIME_CONSTANT),
19
+ vWriteIndex_(0) {
20
+ inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
21
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
22
+ magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
23
+ isInitialized_ = true;
24
+ }
25
+
26
+ size_t AnalyserNode::getFftSize() const {
27
+ return fftSize_;
28
+ }
29
+
30
+ size_t AnalyserNode::getFrequencyBinCount() const {
31
+ return fftSize_ / 2;
32
+ }
33
+
34
+ double AnalyserNode::getMinDecibels() const {
35
+ return minDecibels_;
36
+ }
37
+
38
+ double AnalyserNode::getMaxDecibels() const {
39
+ return maxDecibels_;
40
+ }
41
+
42
+ double AnalyserNode::getSmoothingTimeConstant() const {
43
+ return smoothingTimeConstant_;
44
+ }
45
+
46
+ void AnalyserNode::setFftSize(size_t fftSize) {
47
+ if (fftSize_ == fftSize) {
48
+ return;
49
+ }
50
+
51
+ fftSize_ = fftSize;
52
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
53
+ magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
54
+ }
55
+
56
+ void AnalyserNode::setMinDecibels(double minDecibels) {
57
+ minDecibels_ = minDecibels;
58
+ }
59
+
60
+ void AnalyserNode::setMaxDecibels(double maxDecibels) {
61
+ maxDecibels_ = maxDecibels;
62
+ }
63
+
64
+ void AnalyserNode::setSmoothingTimeConstant(double smoothingTimeConstant) {
65
+ smoothingTimeConstant_ = smoothingTimeConstant;
66
+ }
67
+
68
+ void AnalyserNode::getFloatFrequencyData(float *data, size_t length) {
69
+ doFFTAnalysis();
70
+
71
+ length = std::min<size_t>(magnitudeBuffer_->getSize(), length);
72
+ VectorMath::linearToDecibels(magnitudeBuffer_->getData(), data, length);
73
+ }
74
+
75
+ void AnalyserNode::getByteFrequencyData(uint8_t *data, size_t length) {
76
+ doFFTAnalysis();
77
+
78
+ auto magnitudeBufferData = magnitudeBuffer_->getData();
79
+ length = std::min<size_t>(magnitudeBuffer_->getSize(), length);
80
+
81
+ const auto rangeScaleFactor =
82
+ maxDecibels_ == minDecibels_ ? 1 : 1 / (maxDecibels_ - minDecibels_);
83
+
84
+ for (size_t i = 0; i < length; i++) {
85
+ auto dbMag = magnitudeBufferData[i] == 0
86
+ ? minDecibels_
87
+ : AudioUtils::linearToDecibels(magnitudeBufferData[i]);
88
+ auto scaledValue = UINT8_MAX * (dbMag - minDecibels_) * rangeScaleFactor;
89
+
90
+ if (scaledValue < 0) {
91
+ scaledValue = 0;
92
+ }
93
+ if (scaledValue > UINT8_MAX) {
94
+ scaledValue = UINT8_MAX;
95
+ }
96
+
97
+ data[i] = static_cast<uint8_t>(scaledValue);
98
+ }
99
+ }
100
+
101
+ void AnalyserNode::getFloatTimeDomainData(float *data, size_t length) {
102
+ auto size = std::min(fftSize_, length);
103
+
104
+ for (size_t i = 0; i < size; i++) {
105
+ data[i] = inputBuffer_->getData()
106
+ [(vWriteIndex_ + i - fftSize_ + inputBuffer_->getSize()) %
107
+ inputBuffer_->getSize()];
108
+ }
109
+ }
110
+
111
+ void AnalyserNode::getByteTimeDomainData(uint8_t *data, size_t length) {
112
+ auto size = std::min(fftSize_, length);
113
+
114
+ for (size_t i = 0; i < size; i++) {
115
+ auto value = inputBuffer_->getData()
116
+ [(vWriteIndex_ + i - fftSize_ + inputBuffer_->getSize()) %
117
+ inputBuffer_->getSize()];
118
+
119
+ float scaledValue = 128 * (value + 1);
120
+
121
+ if (scaledValue < 0) {
122
+ scaledValue = 0;
123
+ }
124
+ if (scaledValue > UINT8_MAX) {
125
+ scaledValue = UINT8_MAX;
126
+ }
127
+
128
+ data[i] = static_cast<uint8_t>(scaledValue);
129
+ }
130
+ }
131
+
132
+ void AnalyserNode::processNode(
133
+ audioapi::AudioBus *processingBus,
134
+ int framesToProcess) {
135
+ if (!isInitialized_) {
136
+ processingBus->zero();
137
+ return;
138
+ }
139
+
140
+ // Analyser should behave like a sniffer node, it should not modify the
141
+ // processingBus but instead copy the data to its own input buffer.
142
+
143
+ if (downMixBus_ == nullptr) {
144
+ downMixBus_ = std::make_unique<AudioBus>(
145
+ context_->getSampleRate(), processingBus->getSize(), 1);
146
+ }
147
+
148
+ downMixBus_->copy(processingBus);
149
+
150
+ if (vWriteIndex_ + framesToProcess > inputBuffer_->getSize()) {
151
+ auto framesToCopy = inputBuffer_->getSize() - vWriteIndex_;
152
+ memcpy(
153
+ inputBuffer_->getData() + vWriteIndex_,
154
+ downMixBus_->getChannel(0)->getData(),
155
+ framesToCopy * sizeof(float));
156
+
157
+ vWriteIndex_ = 0;
158
+ framesToProcess -= framesToCopy;
159
+ }
160
+
161
+ memcpy(
162
+ inputBuffer_->getData() + vWriteIndex_,
163
+ downMixBus_->getChannel(0)->getData(),
164
+ framesToProcess * sizeof(float));
165
+
166
+ vWriteIndex_ += framesToProcess;
167
+ if (vWriteIndex_ >= inputBuffer_->getSize()) {
168
+ vWriteIndex_ = 0;
169
+ }
170
+
171
+ shouldDoFFTAnalysis_ = true;
172
+ }
173
+
174
+ void AnalyserNode::doFFTAnalysis() {
175
+ if (!shouldDoFFTAnalysis_) {
176
+ return;
177
+ }
178
+
179
+ shouldDoFFTAnalysis_ = false;
180
+
181
+ // We need to copy the fftSize elements from input buffer to a temporary
182
+ // buffer to apply the window.
183
+ AudioArray tempBuffer(this->fftSize_);
184
+
185
+ // We want to copy last fftSize_ elements added to the input buffer(fftSize_
186
+ // elements before vWriteIndex_). However inputBuffer_ works like a circular
187
+ // buffer so we have two cases to consider.
188
+ if (vWriteIndex_ < fftSize_) {
189
+ tempBuffer.copy(
190
+ inputBuffer_.get(),
191
+ vWriteIndex_ - fftSize_ + inputBuffer_->getSize(),
192
+ 0,
193
+ fftSize_ - vWriteIndex_);
194
+ tempBuffer.copy(
195
+ inputBuffer_.get(), 0, fftSize_ - vWriteIndex_, vWriteIndex_);
196
+ } else {
197
+ tempBuffer.copy(inputBuffer_.get(), vWriteIndex_ - fftSize_, 0, fftSize_);
198
+ }
199
+
200
+ AnalyserNode::applyWindow(tempBuffer.getData(), fftSize_);
201
+
202
+ // do fft analysis - get frequency domain data
203
+ fftFrame_->doFFT(tempBuffer.getData());
204
+
205
+ auto *realFFTFrameData = fftFrame_->getRealData();
206
+ auto *imaginaryFFTFrameData = fftFrame_->getImaginaryData();
207
+
208
+ // Zero out nquist component
209
+ imaginaryFFTFrameData[0] = 0.0f;
210
+
211
+ const float magnitudeScale = 1.0f / static_cast<float>(fftSize_);
212
+ auto magnitudeBufferData = magnitudeBuffer_->getData();
213
+
214
+ for (size_t i = 0; i < magnitudeBuffer_->getSize(); i++) {
215
+ std::complex<float> c(realFFTFrameData[i], imaginaryFFTFrameData[i]);
216
+ auto scalarMagnitude = std::abs(c) * magnitudeScale;
217
+ magnitudeBufferData[i] = static_cast<float>(
218
+ smoothingTimeConstant_ * magnitudeBufferData[i] +
219
+ (1 - smoothingTimeConstant_) * scalarMagnitude);
220
+ }
221
+ }
222
+
223
+ void AnalyserNode::applyWindow(float *data, size_t length) {
224
+ // https://www.sciencedirect.com/topics/engineering/blackman-window
225
+ auto alpha = 0.16f;
226
+ auto a0 = 0.5f * (1 - alpha);
227
+ auto a1 = 0.5f;
228
+ auto a2 = 0.5f * alpha;
229
+
230
+ for (int i = 0; i < length; ++i) {
231
+ auto x = static_cast<float>(i) / static_cast<float>(length);
232
+ auto window = a0 - a1 * cos(2 * static_cast<float>(M_PI) * x) +
233
+ a2 * cos(4 * static_cast<float>(M_PI) * x);
234
+ data[i] *= window;
235
+ }
236
+ }
237
+ } // namespace audioapi
@@ -0,0 +1,54 @@
1
+ #pragma once
2
+
3
+ #include <memory>
4
+
5
+ #include "AudioNode.h"
6
+
7
+ namespace audioapi {
8
+
9
+ class AudioBus;
10
+ class AudioArray;
11
+ class FFTFrame;
12
+
13
+ class AnalyserNode : public AudioNode {
14
+ public:
15
+ explicit AnalyserNode(BaseAudioContext *context);
16
+
17
+ size_t getFftSize() const;
18
+ size_t getFrequencyBinCount() const;
19
+ double getMinDecibels() const;
20
+ double getMaxDecibels() const;
21
+
22
+ double getSmoothingTimeConstant() const;
23
+ void setFftSize(size_t fftSize);
24
+ void setMinDecibels(double minDecibels);
25
+ void setMaxDecibels(double maxDecibels);
26
+ void setSmoothingTimeConstant(double smoothingTimeConstant);
27
+
28
+ void getFloatFrequencyData(float *data, size_t length);
29
+ void getByteFrequencyData(uint8_t *data, size_t length);
30
+ void getFloatTimeDomainData(float *data, size_t length);
31
+ void getByteTimeDomainData(uint8_t *data, size_t length);
32
+
33
+ protected:
34
+ void processNode(AudioBus *processingBus, int framesToProcess) override;
35
+
36
+ private:
37
+ size_t fftSize_;
38
+ double minDecibels_;
39
+ double maxDecibels_;
40
+ double smoothingTimeConstant_;
41
+
42
+ std::unique_ptr<AudioArray> inputBuffer_;
43
+ std::unique_ptr<AudioBus> downMixBus_;
44
+ int vWriteIndex_;
45
+
46
+ std::unique_ptr<FFTFrame> fftFrame_;
47
+ std::unique_ptr<AudioArray> magnitudeBuffer_;
48
+ bool shouldDoFFTAnalysis_ { true };
49
+
50
+ void doFFTAnalysis();
51
+ static void applyWindow(float *data, size_t length);
52
+ };
53
+
54
+ } // namespace audioapi
@@ -126,7 +126,7 @@ AudioBus *AudioNode::processAudio(AudioBus *outputBus, int framesToProcess) {
126
126
  // - it has more than one output, so each output node can get the processed
127
127
  // data without re-calculating the node.
128
128
  bool canUseOutputBus =
129
- outputBus != 0 && inputNodes_.size() < 2 && outputNodes_.size() < 2;
129
+ outputBus != nullptr && inputNodes_.size() < 2 && outputNodes_.size() < 2;
130
130
 
131
131
  if (isAlreadyProcessed) {
132
132
  // If it was already processed in the rendering quantum, return it.
@@ -144,8 +144,9 @@ AudioBus *AudioNode::processAudio(AudioBus *outputBus, int framesToProcess) {
144
144
  }
145
145
 
146
146
  if (inputNodes_.empty()) {
147
- // If there are no connected inputs, process the node just to advance the
148
- // audio params. The node will output silence anyway.
147
+ // If there are no connected inputs, if processing node is the source node,
148
+ // it will fill processing bus with the audio data, otherwise it will return
149
+ // silence.
149
150
  processNode(processingBus, framesToProcess);
150
151
  return processingBus;
151
152
  }
@@ -214,7 +215,7 @@ void AudioNode::onInputDisconnected(AudioNode *node) {
214
215
  inputNodes_.erase(position);
215
216
  }
216
217
 
217
- if (inputNodes_.size() > 0) {
218
+ if (!inputNodes_.empty()) {
218
219
  return;
219
220
  }
220
221
 
@@ -222,7 +223,7 @@ void AudioNode::onInputDisconnected(AudioNode *node) {
222
223
  node->onInputDisabled();
223
224
  }
224
225
 
225
- for (auto outputNode : outputNodes_) {
226
+ for (const auto &outputNode : outputNodes_) {
226
227
  disconnectNode(outputNode);
227
228
  }
228
229
  }
@@ -85,8 +85,7 @@ void AudioScheduledSourceNode::updatePlaybackInfo(
85
85
  startOffset = std::max(startFrame, firstFrame) - firstFrame > 0
86
86
  ? std::max(startFrame, firstFrame) - firstFrame
87
87
  : 0;
88
- nonSilentFramesToProcess =
89
- std::min(lastFrame, stopFrame) - startFrame;
88
+ nonSilentFramesToProcess = std::min(lastFrame, stopFrame) - startFrame;
90
89
  processingBus->zero(0, startOffset);
91
90
  return;
92
91
  }
@@ -6,6 +6,7 @@
6
6
 
7
7
  #include "BaseAudioContext.h"
8
8
 
9
+ #include "AnalyserNode.h"
9
10
  #include "AudioArray.h"
10
11
  #include "AudioBuffer.h"
11
12
  #include "AudioBufferSourceNode.h"
@@ -106,6 +107,10 @@ std::shared_ptr<PeriodicWave> BaseAudioContext::createPeriodicWave(
106
107
  sampleRate_, real, imag, length, disableNormalization);
107
108
  }
108
109
 
110
+ std::shared_ptr<AnalyserNode> BaseAudioContext::createAnalyser() {
111
+ return std::make_shared<AnalyserNode>(this);
112
+ }
113
+
109
114
  std::shared_ptr<AudioBuffer> BaseAudioContext::decodeAudioDataSource(
110
115
  const std::string &path) {
111
116
  auto audioBus = audioDecoder_->decodeWithFilePath(path);
@@ -22,6 +22,7 @@ class BiquadFilterNode;
22
22
  class AudioDestinationNode;
23
23
  class AudioBufferSourceNode;
24
24
  class AudioDecoder;
25
+ class AnalyserNode;
25
26
 
26
27
  #ifdef ANDROID
27
28
  class AudioPlayer;
@@ -53,6 +54,7 @@ class BaseAudioContext {
53
54
  float *imag,
54
55
  bool disableNormalization,
55
56
  int length);
57
+ std::shared_ptr<AnalyserNode> createAnalyser();
56
58
 
57
59
  std::shared_ptr<AudioBuffer> decodeAudioDataSource(const std::string &path);
58
60
  std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
@@ -6,10 +6,12 @@
6
6
  // https://webaudio.github.io/web-audio-api/
7
7
 
8
8
  namespace audioapi {
9
- constexpr int SAMPLE_RATE = 44100;
9
+ constexpr int SAMPLE_RATE = 48000;
10
10
  constexpr int CHANNEL_COUNT = 2;
11
+
11
12
  constexpr float MOST_POSITIVE_SINGLE_FLOAT = static_cast<float>(std::numeric_limits<float>::max());
12
13
  constexpr float MOST_NEGATIVE_SINGLE_FLOAT = static_cast<float>(std::numeric_limits<float>::lowest());
14
+
13
15
  constexpr float NYQUIST_FREQUENCY = SAMPLE_RATE / 2.0;
14
16
  static float MAX_DETUNE = 1200 * std::log2(MOST_POSITIVE_SINGLE_FLOAT);
15
17
  constexpr float MAX_GAIN = MOST_POSITIVE_SINGLE_FLOAT;
@@ -19,4 +21,10 @@ constexpr float MAX_FILTER_FREQUENCY = NYQUIST_FREQUENCY;
19
21
  constexpr float MIN_FILTER_FREQUENCY = 0.0;
20
22
  static float MAX_FILTER_GAIN = 40 * std::log10(MOST_POSITIVE_SINGLE_FLOAT);
21
23
  constexpr float MIN_FILTER_GAIN = -MAX_GAIN;
24
+
25
+ constexpr int MAX_FFT_SIZE = 32768;
26
+ constexpr int DEFAULT_FFT_SIZE = 2048;
27
+ constexpr double DEFAULT_MAX_DECIBELS = -30;
28
+ constexpr double DEFAULT_MIN_DECIBELS = -100;
29
+ const double DEFAULT_SMOOTHING_TIME_CONSTANT = 0.8;
22
30
  } // namespace audioapi
@@ -226,7 +226,7 @@ void PeriodicWave::createBandLimitedTables(
226
226
  0.0f);
227
227
  }
228
228
 
229
- // Zero out the nquist and DC components.
229
+ // Zero out the DC and nquist components.
230
230
  realFFTFrameData[0] = 0.0f;
231
231
  imaginaryFFTFrameData[0] = 0.0f;
232
232
 
@@ -234,7 +234,7 @@ void PeriodicWave::createBandLimitedTables(
234
234
 
235
235
  // Perform the inverse FFT to get the time domain representation of the
236
236
  // band-limited waveform.
237
- fftFrame.inverse(bandLimitedTables_[rangeIndex]);
237
+ fftFrame.doInverseFFT(bandLimitedTables_[rangeIndex]);
238
238
 
239
239
  if (!disableNormalization_ && rangeIndex == 0) {
240
240
  float maxValue =
@@ -23,4 +23,11 @@ float linearInterpolate(
23
23
  factor * (source[secondIndex] - source[firstIndex]);
24
24
  }
25
25
 
26
+ float linearToDecibels(float value) {
27
+ return 20 * log10f(value);
28
+ }
29
+
30
+ float decibelsToLinear(float value) {
31
+ return powf(10, value / 20);
32
+ }
26
33
  } // namespace audioapi::AudioUtils
@@ -1,12 +1,16 @@
1
1
  #pragma once
2
2
 
3
3
  #include <cstddef>
4
+ #include <cstdint>
5
+ #include <cmath>
4
6
 
5
7
  namespace audioapi::AudioUtils {
6
- size_t timeToSampleFrame(double time, int sampleRate);
7
8
 
9
+ size_t timeToSampleFrame(double time, int sampleRate);
8
10
  double sampleFrameToTime(int sampleFrame, int sampleRate);
9
11
 
10
12
  float linearInterpolate(const float *source, size_t firstIndex, size_t secondIndex, float factor);
11
13
 
14
+ float linearToDecibels(float value);
15
+ float decibelsToLinear(float value);
12
16
  } // namespace audioapi::AudioUtils