react-native-audio-api 0.3.2 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/README.md +17 -11
  2. package/android/src/main/cpp/core/AudioPlayer.cpp +38 -11
  3. package/android/src/main/cpp/core/AudioPlayer.h +5 -1
  4. package/common/cpp/HostObjects/AnalyserNodeHostObject.h +151 -0
  5. package/common/cpp/HostObjects/AudioAPIInstallerHostObject.h +11 -3
  6. package/common/cpp/HostObjects/AudioNodeHostObject.h +5 -0
  7. package/common/cpp/HostObjects/BaseAudioContextHostObject.h +8 -0
  8. package/common/cpp/core/AnalyserNode.cpp +237 -0
  9. package/common/cpp/core/AnalyserNode.h +54 -0
  10. package/common/cpp/core/AudioContext.cpp +41 -1
  11. package/common/cpp/core/AudioContext.h +17 -0
  12. package/common/cpp/core/AudioNode.cpp +21 -8
  13. package/common/cpp/core/AudioNode.h +1 -0
  14. package/common/cpp/core/AudioScheduledSourceNode.cpp +1 -2
  15. package/common/cpp/core/BaseAudioContext.cpp +5 -40
  16. package/common/cpp/core/BaseAudioContext.h +7 -18
  17. package/common/cpp/core/Constants.h +11 -2
  18. package/common/cpp/core/PeriodicWave.cpp +2 -2
  19. package/common/cpp/jsi/JsiHostObject.h +1 -1
  20. package/common/cpp/jsi/JsiPromise.h +1 -0
  21. package/common/cpp/utils/AudioUtils.cpp +7 -0
  22. package/common/cpp/utils/AudioUtils.h +5 -1
  23. package/common/cpp/utils/FFTFrame.cpp +69 -23
  24. package/common/cpp/utils/FFTFrame.h +25 -10
  25. package/common/cpp/utils/VectorMath.cpp +10 -0
  26. package/common/cpp/utils/VectorMath.h +2 -0
  27. package/ios/core/AudioPlayer.h +3 -2
  28. package/ios/core/AudioPlayer.m +49 -15
  29. package/ios/core/IOSAudioPlayer.h +3 -1
  30. package/ios/core/IOSAudioPlayer.mm +46 -10
  31. package/lib/module/core/AnalyserNode.js +59 -0
  32. package/lib/module/core/AnalyserNode.js.map +1 -0
  33. package/lib/module/core/AudioContext.js +2 -2
  34. package/lib/module/core/AudioContext.js.map +1 -1
  35. package/lib/module/core/AudioNode.js +5 -5
  36. package/lib/module/core/AudioNode.js.map +1 -1
  37. package/lib/module/core/BaseAudioContext.js +8 -0
  38. package/lib/module/core/BaseAudioContext.js.map +1 -1
  39. package/lib/module/index.js +35 -6
  40. package/lib/module/index.js.map +1 -1
  41. package/lib/module/index.native.js +1 -0
  42. package/lib/module/index.native.js.map +1 -1
  43. package/lib/typescript/core/AnalyserNode.d.ts +18 -0
  44. package/lib/typescript/core/AnalyserNode.d.ts.map +1 -0
  45. package/lib/typescript/core/AudioContext.d.ts +1 -1
  46. package/lib/typescript/core/AudioContext.d.ts.map +1 -1
  47. package/lib/typescript/core/AudioNode.d.ts +2 -2
  48. package/lib/typescript/core/AudioNode.d.ts.map +1 -1
  49. package/lib/typescript/core/BaseAudioContext.d.ts +2 -0
  50. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  51. package/lib/typescript/index.d.ts +16 -3
  52. package/lib/typescript/index.d.ts.map +1 -1
  53. package/lib/typescript/index.native.d.ts +1 -0
  54. package/lib/typescript/index.native.d.ts.map +1 -1
  55. package/lib/typescript/interfaces.d.ts +13 -1
  56. package/lib/typescript/interfaces.d.ts.map +1 -1
  57. package/package.json +4 -2
  58. package/src/core/AnalyserNode.ts +85 -0
  59. package/src/core/AudioContext.ts +2 -2
  60. package/src/core/AudioNode.ts +5 -5
  61. package/src/core/BaseAudioContext.ts +10 -0
  62. package/src/index.native.ts +1 -0
  63. package/src/index.ts +57 -6
  64. package/src/interfaces.ts +15 -1
  65. package/src/specs/global.d.ts +1 -1
package/README.md CHANGED
@@ -17,11 +17,25 @@ allowing developers to generate and modify audio in exact same way it is possibl
17
17
  yarn add react-native-audio-api
18
18
  ```
19
19
 
20
+ ## Usage with expo
21
+
22
+ `react-native-audio-api` contains native custom code and isn't part of the Expo Go application. In order to be available in expo managed builds, you have to use Expo development build. Simplest way on starting local expo dev builds, is to use:
23
+
24
+ ```bash
25
+ # Build native iOS project
26
+ npx expo run:ios
27
+ # Build native Android project
28
+ npx expo run:android
29
+ ```
30
+
31
+ To learn more about expo development builds, please check out [Development Builds Documentation](https://docs.expo.dev/develop/development-builds/introduction/).
32
+
20
33
  ## Documentation
21
34
 
22
- `react-native-audio-api` tries to strictly follow the Web Audi API specification, which can be found at [https://www.w3.org/TR/webaudio/](https://www.w3.org/TR/webaudio/).
23
- <br />
24
- [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) is useful resource to get familiar with audio processing basics.
35
+ Check out our dedicated documentation page for info about this library, API reference and more:
36
+ [https://software-mansion-labs.github.io/react-native-audio-api/](https://software-mansion-labs.github.io/react-native-audio-api/)
37
+
38
+ You can also check out [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) for fully detailed description of the audio api.
25
39
 
26
40
  ## Coverage
27
41
 
@@ -29,14 +43,6 @@ Our current coverage of Web Audio API specification can be found here: [Web Audi
29
43
 
30
44
  ## Examples
31
45
 
32
- <div align="center">
33
-
34
- <a href="https://www.youtube.com/watch?v=npALr9IIDkI" target="_blank" rel="noopener noreferrer">
35
- <img src="./docs/assets/rnaa-example-01-thumbnail.png" width="640" />
36
- </a>
37
-
38
- </div>
39
-
40
46
  The source code for the example application is under the [`/apps/common-app`](./apps/common-app/) directory. If you want to play with the API but don't feel like trying it on a real app, you can run the example project. Check [Example README](./apps/fabric-example/README.md) for installation instructions.
41
47
 
42
48
  ## Your feedback
@@ -21,17 +21,36 @@ AudioPlayer::AudioPlayer(
21
21
  ->setDataCallback(this)
22
22
  ->openStream(mStream_);
23
23
 
24
+ sampleRate_ = mStream_->getSampleRate();
24
25
  mBus_ = std::make_shared<AudioBus>(
25
- getSampleRate(), getBufferSizeInFrames(), CHANNEL_COUNT);
26
+ sampleRate_, RENDER_QUANTUM_SIZE, CHANNEL_COUNT);
26
27
  isInitialized_ = true;
27
28
  }
28
29
 
29
- int AudioPlayer::getSampleRate() const {
30
- return mStream_->getSampleRate();
30
+ AudioPlayer::AudioPlayer(
31
+ const std::function<void(AudioBus *, int)> &renderAudio,
32
+ int sampleRate)
33
+ : renderAudio_(renderAudio) {
34
+ AudioStreamBuilder builder;
35
+
36
+ builder.setSharingMode(SharingMode::Exclusive)
37
+ ->setFormat(AudioFormat::Float)
38
+ ->setFormatConversionAllowed(true)
39
+ ->setPerformanceMode(PerformanceMode::LowLatency)
40
+ ->setChannelCount(CHANNEL_COUNT)
41
+ ->setSampleRateConversionQuality(SampleRateConversionQuality::Medium)
42
+ ->setDataCallback(this)
43
+ ->setSampleRate(sampleRate)
44
+ ->openStream(mStream_);
45
+
46
+ sampleRate_ = sampleRate;
47
+ mBus_ = std::make_shared<AudioBus>(
48
+ sampleRate_, RENDER_QUANTUM_SIZE, CHANNEL_COUNT);
49
+ isInitialized_ = true;
31
50
  }
32
51
 
33
- int AudioPlayer::getBufferSizeInFrames() const {
34
- return mStream_->getBufferSizeInFrames();
52
+ int AudioPlayer::getSampleRate() const {
53
+ return sampleRate_;
35
54
  }
36
55
 
37
56
  void AudioPlayer::start() {
@@ -59,14 +78,22 @@ DataCallbackResult AudioPlayer::onAudioReady(
59
78
  }
60
79
 
61
80
  auto buffer = static_cast<float *>(audioData);
62
- renderAudio_(mBus_.get(), numFrames);
81
+ int processedFrames = 0;
63
82
 
64
- // TODO: optimize this with SIMD?
65
- for (int32_t i = 0; i < numFrames; i += 1) {
66
- for (int channel = 0; channel < CHANNEL_COUNT; channel += 1) {
67
- buffer[i * CHANNEL_COUNT + channel] =
68
- mBus_->getChannel(channel)->getData()[i];
83
+ while (processedFrames < numFrames) {
84
+ int framesToProcess =
85
+ std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE);
86
+ renderAudio_(mBus_.get(), framesToProcess);
87
+
88
+ // TODO: optimize this with SIMD?
89
+ for (int i = 0; i < framesToProcess; i++) {
90
+ for (int channel = 0; channel < CHANNEL_COUNT; channel += 1) {
91
+ buffer[(processedFrames + i) * CHANNEL_COUNT + channel] =
92
+ mBus_->getChannel(channel)->getData()[i];
93
+ }
69
94
  }
95
+
96
+ processedFrames += framesToProcess;
70
97
  }
71
98
 
72
99
  return DataCallbackResult::Continue;
@@ -2,6 +2,7 @@
2
2
 
3
3
  #include <oboe/Oboe.h>
4
4
  #include <memory>
5
+ #include <functional>
5
6
 
6
7
  namespace audioapi {
7
8
 
@@ -13,9 +14,11 @@ class AudioBus;
13
14
  class AudioPlayer : public AudioStreamDataCallback {
14
15
  public:
15
16
  explicit AudioPlayer(const std::function<void(AudioBus *, int)> &renderAudio);
17
+ AudioPlayer(
18
+ const std::function<void(AudioBus *, int)> &renderAudio,
19
+ int sampleRate);
16
20
 
17
21
  [[nodiscard]] int getSampleRate() const;
18
- [[nodiscard]] int getBufferSizeInFrames() const;
19
22
  void start();
20
23
  void stop();
21
24
 
@@ -29,6 +32,7 @@ class AudioPlayer : public AudioStreamDataCallback {
29
32
  std::shared_ptr<AudioStream> mStream_;
30
33
  std::shared_ptr<AudioBus> mBus_;
31
34
  bool isInitialized_ = false;
35
+ int sampleRate_;
32
36
  };
33
37
 
34
38
  } // namespace audioapi
@@ -0,0 +1,151 @@
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ #include "AnalyserNode.h"
8
+ #include "AudioNodeHostObject.h"
9
+
10
+ namespace audioapi {
11
+ using namespace facebook;
12
+
13
+ class AnalyserNodeHostObject : public AudioNodeHostObject {
14
+ public:
15
+ explicit AnalyserNodeHostObject(const std::shared_ptr<AnalyserNode> &node)
16
+ : AudioNodeHostObject(node) {
17
+ addGetters(
18
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, fftSize),
19
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, frequencyBinCount),
20
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, minDecibels),
21
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, maxDecibels),
22
+ JSI_EXPORT_PROPERTY_GETTER(AnalyserNodeHostObject, smoothingTimeConstant));
23
+
24
+ addFunctions(
25
+ JSI_EXPORT_FUNCTION(
26
+ AnalyserNodeHostObject, getFloatFrequencyData),
27
+ JSI_EXPORT_FUNCTION(
28
+ AnalyserNodeHostObject, getByteFrequencyData),
29
+ JSI_EXPORT_FUNCTION(
30
+ AnalyserNodeHostObject, getFloatTimeDomainData),
31
+ JSI_EXPORT_FUNCTION(
32
+ AnalyserNodeHostObject, getByteTimeDomainData));
33
+
34
+ addSetters(
35
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, fftSize),
36
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, minDecibels),
37
+ JSI_EXPORT_PROPERTY_SETTER(AnalyserNodeHostObject, maxDecibels),
38
+ JSI_EXPORT_PROPERTY_SETTER(
39
+ AnalyserNodeHostObject, smoothingTimeConstant));
40
+ }
41
+
42
+ JSI_PROPERTY_GETTER(fftSize) {
43
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
44
+ return {static_cast<int>(analyserNode->getFftSize())};
45
+ }
46
+
47
+ JSI_PROPERTY_GETTER(frequencyBinCount) {
48
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
49
+ return {static_cast<int>(analyserNode->getFrequencyBinCount())};
50
+ }
51
+
52
+ JSI_PROPERTY_GETTER(minDecibels) {
53
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
54
+ return {analyserNode->getMinDecibels()};
55
+ }
56
+
57
+ JSI_PROPERTY_GETTER(maxDecibels) {
58
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
59
+ return {analyserNode->getMaxDecibels()};
60
+ }
61
+
62
+ JSI_PROPERTY_GETTER(smoothingTimeConstant) {
63
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
64
+ return {analyserNode->getSmoothingTimeConstant()};
65
+ }
66
+
67
+ JSI_HOST_FUNCTION(getFloatFrequencyData) {
68
+ auto destination = args[0].getObject(runtime).asArray(runtime);
69
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
70
+ auto data = new float[length];
71
+
72
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
73
+ analyserNode->getFloatFrequencyData(data, length);
74
+
75
+ for (int i = 0; i < length; i++) {
76
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
77
+ }
78
+
79
+ return jsi::Value::undefined();
80
+ }
81
+
82
+ JSI_HOST_FUNCTION(getByteFrequencyData) {
83
+ auto destination = args[0].getObject(runtime).asArray(runtime);
84
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
85
+ auto data = new uint8_t[length];
86
+
87
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
88
+ analyserNode->getByteFrequencyData(data, length);
89
+
90
+ for (int i = 0; i < length; i++) {
91
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
92
+ }
93
+
94
+ return jsi::Value::undefined();
95
+ }
96
+
97
+ JSI_HOST_FUNCTION(getFloatTimeDomainData) {
98
+ auto destination = args[0].getObject(runtime).asArray(runtime);
99
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
100
+ auto data = new float[length];
101
+
102
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
103
+ analyserNode->getFloatTimeDomainData(data, length);
104
+
105
+ for (int i = 0; i < length; i++) {
106
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
107
+ }
108
+
109
+ return jsi::Value::undefined();
110
+ }
111
+
112
+ JSI_HOST_FUNCTION(getByteTimeDomainData) {
113
+ auto destination = args[0].getObject(runtime).asArray(runtime);
114
+ auto length = static_cast<int>(destination.getProperty(runtime, "length").asNumber());
115
+ auto data = new uint8_t[length];
116
+
117
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
118
+ analyserNode->getByteTimeDomainData(data, length);
119
+
120
+ for (int i = 0; i < length; i++) {
121
+ destination.setValueAtIndex(runtime, i, jsi::Value(data[i]));
122
+ }
123
+
124
+ return jsi::Value::undefined();
125
+ }
126
+
127
+ JSI_PROPERTY_SETTER(fftSize) {
128
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
129
+ auto fftSize = static_cast<size_t>(value.getNumber());
130
+ analyserNode->setFftSize(fftSize);
131
+ }
132
+
133
+ JSI_PROPERTY_SETTER(minDecibels) {
134
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
135
+ auto minDecibels = static_cast<int>(value.getNumber());
136
+ analyserNode->setMinDecibels(minDecibels);
137
+ }
138
+
139
+ JSI_PROPERTY_SETTER(maxDecibels) {
140
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
141
+ auto maxDecibels = static_cast<int>(value.getNumber());
142
+ analyserNode->setMaxDecibels(maxDecibels);
143
+ }
144
+
145
+ JSI_PROPERTY_SETTER(smoothingTimeConstant) {
146
+ auto analyserNode = std::static_pointer_cast<AnalyserNode>(node_);
147
+ auto smoothingTimeConstant = static_cast<int>(value.getNumber());
148
+ analyserNode->setSmoothingTimeConstant(smoothingTimeConstant);
149
+ }
150
+ };
151
+ } // namespace audioapi
@@ -35,10 +35,18 @@ class AudioAPIInstallerHostObject
35
35
  }
36
36
 
37
37
  JSI_HOST_FUNCTION(createAudioContext) {
38
- auto audioContext = std::make_shared<AudioContext>();
39
- auto audioContextHostObject =
38
+ std::shared_ptr<AudioContext> audioContext;
39
+ if (args[0].isUndefined()) {
40
+ audioContext = std::make_shared<AudioContext>();
41
+ } else {
42
+ int sampleRate = static_cast<int>(args[0].getNumber());
43
+ audioContext = std::make_shared<AudioContext>(sampleRate);
44
+ }
45
+
46
+ auto audioContextHostObject =
40
47
  std::make_shared<AudioContextHostObject>(audioContext, promiseVendor_);
41
- return jsi::Object::createFromHostObject(runtime, audioContextHostObject);
48
+
49
+ return jsi::Object::createFromHostObject(runtime, audioContextHostObject);
42
50
  }
43
51
 
44
52
  private:
@@ -54,6 +54,11 @@ class AudioNodeHostObject : public JsiHostObject {
54
54
  }
55
55
 
56
56
  JSI_HOST_FUNCTION(disconnect) {
57
+ if(args[0].isUndefined()) {
58
+ node_->disconnect();
59
+ return jsi::Value::undefined();
60
+ }
61
+
57
62
  auto node =
58
63
  args[0].getObject(runtime).getHostObject<AudioNodeHostObject>(runtime);
59
64
  node_->disconnect(std::shared_ptr<AudioNodeHostObject>(node)->node_);
@@ -17,6 +17,7 @@
17
17
  #include "OscillatorNodeHostObject.h"
18
18
  #include "PeriodicWaveHostObject.h"
19
19
  #include "StereoPannerNodeHostObject.h"
20
+ #include "AnalyserNodeHostObject.h"
20
21
 
21
22
  namespace audioapi {
22
23
  using namespace facebook;
@@ -41,6 +42,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
41
42
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferSource),
42
43
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBuffer),
43
44
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
45
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser),
44
46
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, decodeAudioDataSource));
45
47
  }
46
48
 
@@ -131,6 +133,12 @@ class BaseAudioContextHostObject : public JsiHostObject {
131
133
  return jsi::Object::createFromHostObject(runtime, periodicWaveHostObject);
132
134
  }
133
135
 
136
+ JSI_HOST_FUNCTION(createAnalyser) {
137
+ auto analyser = context_->createAnalyser();
138
+ auto analyserHostObject = std::make_shared<AnalyserNodeHostObject>(analyser);
139
+ return jsi::Object::createFromHostObject(runtime, analyserHostObject);
140
+ }
141
+
134
142
  JSI_HOST_FUNCTION(decodeAudioDataSource) {
135
143
  auto sourcePath = args[0].getString(runtime).utf8(runtime);
136
144
 
@@ -0,0 +1,237 @@
1
+ #include "AnalyserNode.h"
2
+
3
+ #include <complex>
4
+
5
+ #include "AudioArray.h"
6
+ #include "AudioBus.h"
7
+ #include "AudioUtils.h"
8
+ #include "BaseAudioContext.h"
9
+ #include "FFTFrame.h"
10
+ #include "VectorMath.h"
11
+
12
+ namespace audioapi {
13
+ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
14
+ : AudioNode(context),
15
+ fftSize_(DEFAULT_FFT_SIZE),
16
+ minDecibels_(DEFAULT_MIN_DECIBELS),
17
+ maxDecibels_(DEFAULT_MAX_DECIBELS),
18
+ smoothingTimeConstant_(DEFAULT_SMOOTHING_TIME_CONSTANT),
19
+ vWriteIndex_(0) {
20
+ inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
21
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
22
+ magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
23
+ isInitialized_ = true;
24
+ }
25
+
26
+ size_t AnalyserNode::getFftSize() const {
27
+ return fftSize_;
28
+ }
29
+
30
+ size_t AnalyserNode::getFrequencyBinCount() const {
31
+ return fftSize_ / 2;
32
+ }
33
+
34
+ double AnalyserNode::getMinDecibels() const {
35
+ return minDecibels_;
36
+ }
37
+
38
+ double AnalyserNode::getMaxDecibels() const {
39
+ return maxDecibels_;
40
+ }
41
+
42
+ double AnalyserNode::getSmoothingTimeConstant() const {
43
+ return smoothingTimeConstant_;
44
+ }
45
+
46
+ void AnalyserNode::setFftSize(size_t fftSize) {
47
+ if (fftSize_ == fftSize) {
48
+ return;
49
+ }
50
+
51
+ fftSize_ = fftSize;
52
+ fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
53
+ magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
54
+ }
55
+
56
+ void AnalyserNode::setMinDecibels(double minDecibels) {
57
+ minDecibels_ = minDecibels;
58
+ }
59
+
60
+ void AnalyserNode::setMaxDecibels(double maxDecibels) {
61
+ maxDecibels_ = maxDecibels;
62
+ }
63
+
64
+ void AnalyserNode::setSmoothingTimeConstant(double smoothingTimeConstant) {
65
+ smoothingTimeConstant_ = smoothingTimeConstant;
66
+ }
67
+
68
+ void AnalyserNode::getFloatFrequencyData(float *data, size_t length) {
69
+ doFFTAnalysis();
70
+
71
+ length = std::min<size_t>(magnitudeBuffer_->getSize(), length);
72
+ VectorMath::linearToDecibels(magnitudeBuffer_->getData(), data, length);
73
+ }
74
+
75
+ void AnalyserNode::getByteFrequencyData(uint8_t *data, size_t length) {
76
+ doFFTAnalysis();
77
+
78
+ auto magnitudeBufferData = magnitudeBuffer_->getData();
79
+ length = std::min<size_t>(magnitudeBuffer_->getSize(), length);
80
+
81
+ const auto rangeScaleFactor =
82
+ maxDecibels_ == minDecibels_ ? 1 : 1 / (maxDecibels_ - minDecibels_);
83
+
84
+ for (size_t i = 0; i < length; i++) {
85
+ auto dbMag = magnitudeBufferData[i] == 0
86
+ ? minDecibels_
87
+ : AudioUtils::linearToDecibels(magnitudeBufferData[i]);
88
+ auto scaledValue = UINT8_MAX * (dbMag - minDecibels_) * rangeScaleFactor;
89
+
90
+ if (scaledValue < 0) {
91
+ scaledValue = 0;
92
+ }
93
+ if (scaledValue > UINT8_MAX) {
94
+ scaledValue = UINT8_MAX;
95
+ }
96
+
97
+ data[i] = static_cast<uint8_t>(scaledValue);
98
+ }
99
+ }
100
+
101
+ void AnalyserNode::getFloatTimeDomainData(float *data, size_t length) {
102
+ auto size = std::min(fftSize_, length);
103
+
104
+ for (size_t i = 0; i < size; i++) {
105
+ data[i] = inputBuffer_->getData()
106
+ [(vWriteIndex_ + i - fftSize_ + inputBuffer_->getSize()) %
107
+ inputBuffer_->getSize()];
108
+ }
109
+ }
110
+
111
+ void AnalyserNode::getByteTimeDomainData(uint8_t *data, size_t length) {
112
+ auto size = std::min(fftSize_, length);
113
+
114
+ for (size_t i = 0; i < size; i++) {
115
+ auto value = inputBuffer_->getData()
116
+ [(vWriteIndex_ + i - fftSize_ + inputBuffer_->getSize()) %
117
+ inputBuffer_->getSize()];
118
+
119
+ float scaledValue = 128 * (value + 1);
120
+
121
+ if (scaledValue < 0) {
122
+ scaledValue = 0;
123
+ }
124
+ if (scaledValue > UINT8_MAX) {
125
+ scaledValue = UINT8_MAX;
126
+ }
127
+
128
+ data[i] = static_cast<uint8_t>(scaledValue);
129
+ }
130
+ }
131
+
132
+ void AnalyserNode::processNode(
133
+ audioapi::AudioBus *processingBus,
134
+ int framesToProcess) {
135
+ if (!isInitialized_) {
136
+ processingBus->zero();
137
+ return;
138
+ }
139
+
140
+ // Analyser should behave like a sniffer node, it should not modify the
141
+ // processingBus but instead copy the data to its own input buffer.
142
+
143
+ if (downMixBus_ == nullptr) {
144
+ downMixBus_ = std::make_unique<AudioBus>(
145
+ context_->getSampleRate(), processingBus->getSize(), 1);
146
+ }
147
+
148
+ downMixBus_->copy(processingBus);
149
+
150
+ if (vWriteIndex_ + framesToProcess > inputBuffer_->getSize()) {
151
+ auto framesToCopy = inputBuffer_->getSize() - vWriteIndex_;
152
+ memcpy(
153
+ inputBuffer_->getData() + vWriteIndex_,
154
+ downMixBus_->getChannel(0)->getData(),
155
+ framesToCopy * sizeof(float));
156
+
157
+ vWriteIndex_ = 0;
158
+ framesToProcess -= framesToCopy;
159
+ }
160
+
161
+ memcpy(
162
+ inputBuffer_->getData() + vWriteIndex_,
163
+ downMixBus_->getChannel(0)->getData(),
164
+ framesToProcess * sizeof(float));
165
+
166
+ vWriteIndex_ += framesToProcess;
167
+ if (vWriteIndex_ >= inputBuffer_->getSize()) {
168
+ vWriteIndex_ = 0;
169
+ }
170
+
171
+ shouldDoFFTAnalysis_ = true;
172
+ }
173
+
174
+ void AnalyserNode::doFFTAnalysis() {
175
+ if (!shouldDoFFTAnalysis_) {
176
+ return;
177
+ }
178
+
179
+ shouldDoFFTAnalysis_ = false;
180
+
181
+ // We need to copy the fftSize elements from input buffer to a temporary
182
+ // buffer to apply the window.
183
+ AudioArray tempBuffer(this->fftSize_);
184
+
185
+ // We want to copy last fftSize_ elements added to the input buffer(fftSize_
186
+ // elements before vWriteIndex_). However inputBuffer_ works like a circular
187
+ // buffer so we have two cases to consider.
188
+ if (vWriteIndex_ < fftSize_) {
189
+ tempBuffer.copy(
190
+ inputBuffer_.get(),
191
+ vWriteIndex_ - fftSize_ + inputBuffer_->getSize(),
192
+ 0,
193
+ fftSize_ - vWriteIndex_);
194
+ tempBuffer.copy(
195
+ inputBuffer_.get(), 0, fftSize_ - vWriteIndex_, vWriteIndex_);
196
+ } else {
197
+ tempBuffer.copy(inputBuffer_.get(), vWriteIndex_ - fftSize_, 0, fftSize_);
198
+ }
199
+
200
+ AnalyserNode::applyWindow(tempBuffer.getData(), fftSize_);
201
+
202
+ // do fft analysis - get frequency domain data
203
+ fftFrame_->doFFT(tempBuffer.getData());
204
+
205
+ auto *realFFTFrameData = fftFrame_->getRealData();
206
+ auto *imaginaryFFTFrameData = fftFrame_->getImaginaryData();
207
+
208
+ // Zero out nquist component
209
+ imaginaryFFTFrameData[0] = 0.0f;
210
+
211
+ const float magnitudeScale = 1.0f / static_cast<float>(fftSize_);
212
+ auto magnitudeBufferData = magnitudeBuffer_->getData();
213
+
214
+ for (size_t i = 0; i < magnitudeBuffer_->getSize(); i++) {
215
+ std::complex<float> c(realFFTFrameData[i], imaginaryFFTFrameData[i]);
216
+ auto scalarMagnitude = std::abs(c) * magnitudeScale;
217
+ magnitudeBufferData[i] = static_cast<float>(
218
+ smoothingTimeConstant_ * magnitudeBufferData[i] +
219
+ (1 - smoothingTimeConstant_) * scalarMagnitude);
220
+ }
221
+ }
222
+
223
+ void AnalyserNode::applyWindow(float *data, size_t length) {
224
+ // https://www.sciencedirect.com/topics/engineering/blackman-window
225
+ auto alpha = 0.16f;
226
+ auto a0 = 0.5f * (1 - alpha);
227
+ auto a1 = 0.5f;
228
+ auto a2 = 0.5f * alpha;
229
+
230
+ for (int i = 0; i < length; ++i) {
231
+ auto x = static_cast<float>(i) / static_cast<float>(length);
232
+ auto window = a0 - a1 * cos(2 * static_cast<float>(M_PI) * x) +
233
+ a2 * cos(4 * static_cast<float>(M_PI) * x);
234
+ data[i] *= window;
235
+ }
236
+ }
237
+ } // namespace audioapi
@@ -0,0 +1,54 @@
1
+ #pragma once
2
+
3
+ #include <memory>
4
+
5
+ #include "AudioNode.h"
6
+
7
+ namespace audioapi {
8
+
9
+ class AudioBus;
10
+ class AudioArray;
11
+ class FFTFrame;
12
+
13
+ class AnalyserNode : public AudioNode {
14
+ public:
15
+ explicit AnalyserNode(BaseAudioContext *context);
16
+
17
+ size_t getFftSize() const;
18
+ size_t getFrequencyBinCount() const;
19
+ double getMinDecibels() const;
20
+ double getMaxDecibels() const;
21
+
22
+ double getSmoothingTimeConstant() const;
23
+ void setFftSize(size_t fftSize);
24
+ void setMinDecibels(double minDecibels);
25
+ void setMaxDecibels(double maxDecibels);
26
+ void setSmoothingTimeConstant(double smoothingTimeConstant);
27
+
28
+ void getFloatFrequencyData(float *data, size_t length);
29
+ void getByteFrequencyData(uint8_t *data, size_t length);
30
+ void getFloatTimeDomainData(float *data, size_t length);
31
+ void getByteTimeDomainData(uint8_t *data, size_t length);
32
+
33
+ protected:
34
+ void processNode(AudioBus *processingBus, int framesToProcess) override;
35
+
36
+ private:
37
+ size_t fftSize_;
38
+ double minDecibels_;
39
+ double maxDecibels_;
40
+ double smoothingTimeConstant_;
41
+
42
+ std::unique_ptr<AudioArray> inputBuffer_;
43
+ std::unique_ptr<AudioBus> downMixBus_;
44
+ int vWriteIndex_;
45
+
46
+ std::unique_ptr<FFTFrame> fftFrame_;
47
+ std::unique_ptr<AudioArray> magnitudeBuffer_;
48
+ bool shouldDoFFTAnalysis_ { true };
49
+
50
+ void doFFTAnalysis();
51
+ static void applyWindow(float *data, size_t length);
52
+ };
53
+
54
+ } // namespace audioapi