react-native-audio-api 0.10.0-nightly-dedc2a2-20251029 → 0.10.0-nightly-e16d7ff-20251031

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/RNAudioAPI.podspec +7 -0
  2. package/android/src/main/cpp/audioapi/CMakeLists.txt +6 -0
  3. package/android/src/main/java/com/swmansion/audioapi/system/MediaNotificationManager.kt +11 -0
  4. package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +18 -0
  5. package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +1 -0
  6. package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.cpp +47 -0
  7. package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.h +20 -0
  8. package/common/cpp/audioapi/core/AudioNode.h +3 -2
  9. package/common/cpp/audioapi/core/BaseAudioContext.cpp +10 -0
  10. package/common/cpp/audioapi/core/BaseAudioContext.h +2 -0
  11. package/common/cpp/audioapi/core/effects/ConvolverNode.cpp +210 -0
  12. package/common/cpp/audioapi/core/effects/ConvolverNode.h +55 -0
  13. package/common/cpp/audioapi/core/sources/StreamerNode.cpp +59 -58
  14. package/common/cpp/audioapi/core/sources/StreamerNode.h +37 -8
  15. package/common/cpp/audioapi/core/utils/AudioNodeManager.cpp +5 -0
  16. package/common/cpp/audioapi/dsp/AudioUtils.cpp +1 -1
  17. package/common/cpp/audioapi/dsp/Convolver.cpp +213 -0
  18. package/common/cpp/audioapi/dsp/Convolver.h +45 -0
  19. package/common/cpp/audioapi/dsp/FFT.cpp +0 -26
  20. package/common/cpp/audioapi/dsp/FFT.h +26 -2
  21. package/common/cpp/audioapi/utils/AlignedAllocator.hpp +50 -0
  22. package/common/cpp/audioapi/utils/AudioBus.cpp +28 -0
  23. package/common/cpp/audioapi/utils/AudioBus.h +3 -0
  24. package/common/cpp/test/CMakeLists.txt +16 -14
  25. package/lib/commonjs/api.js +7 -0
  26. package/lib/commonjs/api.js.map +1 -1
  27. package/lib/commonjs/api.web.js +8 -0
  28. package/lib/commonjs/api.web.js.map +1 -1
  29. package/lib/commonjs/core/BaseAudioContext.js +12 -0
  30. package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
  31. package/lib/commonjs/core/ConvolverNode.js +37 -0
  32. package/lib/commonjs/core/ConvolverNode.js.map +1 -0
  33. package/lib/commonjs/types.js +4 -0
  34. package/lib/commonjs/web-core/AudioContext.js +12 -0
  35. package/lib/commonjs/web-core/AudioContext.js.map +1 -1
  36. package/lib/commonjs/web-core/ConvolverNode.js +40 -0
  37. package/lib/commonjs/web-core/ConvolverNode.js.map +1 -0
  38. package/lib/commonjs/web-core/ConvolverNodeOptions.js +6 -0
  39. package/lib/commonjs/web-core/ConvolverNodeOptions.js.map +1 -0
  40. package/lib/commonjs/web-core/OfflineAudioContext.js +12 -0
  41. package/lib/commonjs/web-core/OfflineAudioContext.js.map +1 -1
  42. package/lib/module/api.js +1 -0
  43. package/lib/module/api.js.map +1 -1
  44. package/lib/module/api.web.js +1 -0
  45. package/lib/module/api.web.js.map +1 -1
  46. package/lib/module/core/BaseAudioContext.js +12 -0
  47. package/lib/module/core/BaseAudioContext.js.map +1 -1
  48. package/lib/module/core/ConvolverNode.js +31 -0
  49. package/lib/module/core/ConvolverNode.js.map +1 -0
  50. package/lib/module/types.js +2 -0
  51. package/lib/module/web-core/AudioContext.js +12 -0
  52. package/lib/module/web-core/AudioContext.js.map +1 -1
  53. package/lib/module/web-core/ConvolverNode.js +34 -0
  54. package/lib/module/web-core/ConvolverNode.js.map +1 -0
  55. package/lib/module/web-core/ConvolverNodeOptions.js +4 -0
  56. package/lib/module/web-core/ConvolverNodeOptions.js.map +1 -0
  57. package/lib/module/web-core/OfflineAudioContext.js +12 -0
  58. package/lib/module/web-core/OfflineAudioContext.js.map +1 -1
  59. package/lib/typescript/api.d.ts +1 -0
  60. package/lib/typescript/api.d.ts.map +1 -1
  61. package/lib/typescript/api.web.d.ts +1 -0
  62. package/lib/typescript/api.web.d.ts.map +1 -1
  63. package/lib/typescript/core/BaseAudioContext.d.ts +3 -1
  64. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  65. package/lib/typescript/core/ConvolverNode.d.ts +12 -0
  66. package/lib/typescript/core/ConvolverNode.d.ts.map +1 -0
  67. package/lib/typescript/interfaces.d.ts +5 -0
  68. package/lib/typescript/interfaces.d.ts.map +1 -1
  69. package/lib/typescript/types.d.ts +5 -0
  70. package/lib/typescript/types.d.ts.map +1 -1
  71. package/lib/typescript/web-core/AudioContext.d.ts +3 -0
  72. package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
  73. package/lib/typescript/web-core/BaseAudioContext.d.ts +2 -0
  74. package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -1
  75. package/lib/typescript/web-core/ConvolverNode.d.ts +11 -0
  76. package/lib/typescript/web-core/ConvolverNode.d.ts.map +1 -0
  77. package/lib/typescript/web-core/ConvolverNodeOptions.d.ts +6 -0
  78. package/lib/typescript/web-core/ConvolverNodeOptions.d.ts.map +1 -0
  79. package/lib/typescript/web-core/OfflineAudioContext.d.ts +3 -0
  80. package/lib/typescript/web-core/OfflineAudioContext.d.ts.map +1 -1
  81. package/package.json +1 -1
  82. package/src/api.ts +1 -0
  83. package/src/api.web.ts +1 -0
  84. package/src/core/BaseAudioContext.ts +23 -0
  85. package/src/core/ConvolverNode.ts +35 -0
  86. package/src/interfaces.ts +11 -0
  87. package/src/types.ts +7 -0
  88. package/src/web-core/AudioContext.tsx +25 -0
  89. package/src/web-core/BaseAudioContext.tsx +2 -0
  90. package/src/web-core/ConvolverNode.tsx +43 -0
  91. package/src/web-core/ConvolverNodeOptions.tsx +6 -0
  92. package/src/web-core/OfflineAudioContext.tsx +25 -0
@@ -32,6 +32,13 @@ Pod::Spec.new do |s|
32
32
  sss.header_dir = "audioapi"
33
33
  sss.header_mappings_dir = "ios/audioapi"
34
34
  end
35
+
36
+ ss.subspec "audioapi_dsp" do |sss|
37
+ sss.source_files = "common/cpp/audioapi/dsp/**/*.{cpp}"
38
+ sss.header_dir = "audioapi/dsp"
39
+ sss.header_mappings_dir = "common/cpp/audioapi/dsp"
40
+ sss.compiler_flags = "-O3"
41
+ end
35
42
  end
36
43
 
37
44
  s.ios.frameworks = 'CoreFoundation', 'CoreAudio', 'AudioToolbox', 'Accelerate', 'MediaPlayer', 'AVFoundation'
@@ -3,6 +3,12 @@ cmake_minimum_required(VERSION 3.12.0)
3
3
  file(GLOB_RECURSE ANDROID_CPP_SOURCES CONFIGURE_DEPENDS "${ANDROID_CPP_DIR}/audioapi/*.cpp")
4
4
  file(GLOB_RECURSE COMMON_CPP_SOURCES CONFIGURE_DEPENDS "${COMMON_CPP_DIR}/audioapi/*.cpp" "${COMMON_CPP_DIR}/audioapi/*.c")
5
5
 
6
+ set_source_files_properties(
7
+ ${COMMON_CPP_SOURCES}/dsp/*.cpp
8
+ PROPERTIES
9
+ COMPILE_FLAGS "-O3"
10
+ )
11
+
6
12
  set(INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/include)
7
13
  set(FFMPEG_INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/ffmpeg_include)
8
14
  set(EXTERNAL_DIR ${COMMON_CPP_DIR}/audioapi/external)
@@ -258,5 +258,16 @@ class MediaNotificationManager(
258
258
  }
259
259
  super.onDestroy()
260
260
  }
261
+
262
+ override fun onTimeout(startId: Int) {
263
+ stopForegroundService()
264
+ }
265
+
266
+ override fun onTimeout(
267
+ startId: Int,
268
+ fgsType: Int,
269
+ ) {
270
+ stopForegroundService()
271
+ }
261
272
  }
262
273
  }
@@ -5,6 +5,7 @@
5
5
  #include <audioapi/HostObjects/analysis/AnalyserNodeHostObject.h>
6
6
  #include <audioapi/HostObjects/destinations/AudioDestinationNodeHostObject.h>
7
7
  #include <audioapi/HostObjects/effects/BiquadFilterNodeHostObject.h>
8
+ #include <audioapi/HostObjects/effects/ConvolverNodeHostObject.h>
8
9
  #include <audioapi/HostObjects/effects/GainNodeHostObject.h>
9
10
  #include <audioapi/HostObjects/effects/PeriodicWaveHostObject.h>
10
11
  #include <audioapi/HostObjects/effects/StereoPannerNodeHostObject.h>
@@ -49,6 +50,7 @@ BaseAudioContextHostObject::BaseAudioContextHostObject(
49
50
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferQueueSource),
50
51
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBuffer),
51
52
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
53
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createConvolver),
52
54
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser));
53
55
  }
54
56
 
@@ -269,4 +271,20 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createAnalyser) {
269
271
  auto analyserHostObject = std::make_shared<AnalyserNodeHostObject>(analyser);
270
272
  return jsi::Object::createFromHostObject(runtime, analyserHostObject);
271
273
  }
274
+
275
+ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConvolver) {
276
+ auto disableNormalization = args[1].getBool();
277
+ std::shared_ptr<ConvolverNode> convolver;
278
+ if (args[0].isUndefined()) {
279
+ convolver = context_->createConvolver(nullptr, disableNormalization);
280
+ } else {
281
+ auto bufferHostObject =
282
+ args[0].getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
283
+ convolver = context_->createConvolver(
284
+ bufferHostObject->audioBuffer_, disableNormalization);
285
+ }
286
+ auto convolverHostObject =
287
+ std::make_shared<ConvolverNodeHostObject>(convolver);
288
+ return jsi::Object::createFromHostObject(runtime, convolverHostObject);
289
+ }
272
290
  } // namespace audioapi
@@ -41,6 +41,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
41
41
  JSI_HOST_FUNCTION_DECL(createBuffer);
42
42
  JSI_HOST_FUNCTION_DECL(createPeriodicWave);
43
43
  JSI_HOST_FUNCTION_DECL(createAnalyser);
44
+ JSI_HOST_FUNCTION_DECL(createConvolver);
44
45
 
45
46
  std::shared_ptr<BaseAudioContext> context_;
46
47
 
@@ -0,0 +1,47 @@
1
+ #include <audioapi/HostObjects/effects/ConvolverNodeHostObject.h>
2
+
3
+ #include <audioapi/HostObjects/sources/AudioBufferHostObject.h>
4
+ #include <audioapi/core/effects/ConvolverNode.h>
5
+
6
+ namespace audioapi {
7
+
8
+ ConvolverNodeHostObject::ConvolverNodeHostObject(
9
+ const std::shared_ptr<ConvolverNode> &node)
10
+ : AudioNodeHostObject(node) {
11
+ addGetters(
12
+ JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, normalize),
13
+ JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, buffer));
14
+ addSetters(
15
+ JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, normalize),
16
+ JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, buffer));
17
+ }
18
+
19
+ JSI_PROPERTY_GETTER_IMPL(ConvolverNodeHostObject, normalize) {
20
+ auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
21
+ return {convolverNode->getNormalize_()};
22
+ }
23
+
24
+ JSI_PROPERTY_GETTER_IMPL(ConvolverNodeHostObject, buffer) {
25
+ auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
26
+ auto buffer = convolverNode->getBuffer();
27
+ auto bufferHostObject = std::make_shared<AudioBufferHostObject>(buffer);
28
+ return jsi::Object::createFromHostObject(runtime, bufferHostObject);
29
+ }
30
+
31
+ JSI_PROPERTY_SETTER_IMPL(ConvolverNodeHostObject, normalize) {
32
+ auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
33
+ convolverNode->setNormalize(value.getBool());
34
+ }
35
+
36
+ JSI_PROPERTY_SETTER_IMPL(ConvolverNodeHostObject, buffer) {
37
+ auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
38
+ if (value.isNull()) {
39
+ convolverNode->setBuffer(nullptr);
40
+ return;
41
+ }
42
+
43
+ auto bufferHostObject =
44
+ value.getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
45
+ convolverNode->setBuffer(bufferHostObject->audioBuffer_);
46
+ }
47
+ } // namespace audioapi
@@ -0,0 +1,20 @@
1
+ #pragma once
2
+
3
+ #include <audioapi/HostObjects/AudioNodeHostObject.h>
4
+
5
+ #include <memory>
6
+
7
+ namespace audioapi {
8
+ using namespace facebook;
9
+
10
+ class ConvolverNode;
11
+
12
+ class ConvolverNodeHostObject : public AudioNodeHostObject {
13
+ public:
14
+ explicit ConvolverNodeHostObject(const std::shared_ptr<ConvolverNode> &node);
15
+ JSI_PROPERTY_GETTER_DECL(normalize);
16
+ JSI_PROPERTY_GETTER_DECL(buffer);
17
+ JSI_PROPERTY_SETTER_DECL(normalize);
18
+ JSI_PROPERTY_SETTER_DECL(buffer);
19
+ };
20
+ } // namespace audioapi
@@ -41,6 +41,7 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
41
41
  protected:
42
42
  friend class AudioNodeManager;
43
43
  friend class AudioDestinationNode;
44
+ friend class ConvolverNode;
44
45
 
45
46
  BaseAudioContext *context_;
46
47
  std::shared_ptr<AudioBus> audioBus_;
@@ -68,10 +69,10 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
68
69
  static std::string toString(ChannelCountMode mode);
69
70
  static std::string toString(ChannelInterpretation interpretation);
70
71
 
72
+ virtual std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus>& outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
71
73
  virtual std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus>&, int) = 0;
72
74
 
73
75
  bool isAlreadyProcessed();
74
- std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus>& outputBus, int framesToProcess, bool checkIsAlreadyProcessed);
75
76
  std::shared_ptr<AudioBus> applyChannelCountMode(const std::shared_ptr<AudioBus> &processingBus);
76
77
  void mixInputsBuses(const std::shared_ptr<AudioBus>& processingBus);
77
78
 
@@ -81,7 +82,7 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
81
82
  void disconnectParam(const std::shared_ptr<AudioParam> &param);
82
83
 
83
84
  void onInputEnabled();
84
- void onInputDisabled();
85
+ virtual void onInputDisabled();
85
86
  void onInputConnected(AudioNode *node);
86
87
  void onInputDisconnected(AudioNode *node);
87
88
 
@@ -2,6 +2,7 @@
2
2
  #include <audioapi/core/analysis/AnalyserNode.h>
3
3
  #include <audioapi/core/destinations/AudioDestinationNode.h>
4
4
  #include <audioapi/core/effects/BiquadFilterNode.h>
5
+ #include <audioapi/core/effects/ConvolverNode.h>
5
6
  #include <audioapi/core/effects/GainNode.h>
6
7
  #include <audioapi/core/effects/StereoPannerNode.h>
7
8
  #include <audioapi/core/effects/WorkletNode.h>
@@ -182,6 +183,15 @@ std::shared_ptr<AnalyserNode> BaseAudioContext::createAnalyser() {
182
183
  return analyser;
183
184
  }
184
185
 
186
+ std::shared_ptr<ConvolverNode> BaseAudioContext::createConvolver(
187
+ std::shared_ptr<AudioBuffer> buffer,
188
+ bool disableNormalization) {
189
+ auto convolver =
190
+ std::make_shared<ConvolverNode>(this, buffer, disableNormalization);
191
+ nodeManager_->addProcessingNode(convolver);
192
+ return convolver;
193
+ }
194
+
185
195
  AudioNodeManager *BaseAudioContext::getNodeManager() {
186
196
  return nodeManager_.get();
187
197
  }
@@ -28,6 +28,7 @@ class AudioBufferSourceNode;
28
28
  class AudioBufferQueueSourceNode;
29
29
  class AnalyserNode;
30
30
  class AudioEventHandlerRegistry;
31
+ class ConvolverNode;
31
32
  class IAudioEventHandlerRegistry;
32
33
  class RecorderAdapterNode;
33
34
  class WorkletSourceNode;
@@ -76,6 +77,7 @@ class BaseAudioContext {
76
77
  bool disableNormalization,
77
78
  int length);
78
79
  std::shared_ptr<AnalyserNode> createAnalyser();
80
+ std::shared_ptr<ConvolverNode> createConvolver(std::shared_ptr<AudioBuffer> buffer, bool disableNormalization);
79
81
 
80
82
  std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
81
83
  [[nodiscard]] float getNyquistFrequency() const;
@@ -0,0 +1,210 @@
1
+ #include <audioapi/core/BaseAudioContext.h>
2
+ #include <audioapi/core/effects/ConvolverNode.h>
3
+ #include <audioapi/core/sources/AudioBuffer.h>
4
+ #include <audioapi/core/utils/Constants.h>
5
+ #include <audioapi/dsp/AudioUtils.h>
6
+ #include <audioapi/dsp/FFT.h>
7
+ #include <audioapi/utils/AudioArray.h>
8
+ #include <iostream>
9
+ #include <thread>
10
+
11
+ namespace audioapi {
12
+ ConvolverNode::ConvolverNode(
13
+ BaseAudioContext *context,
14
+ std::shared_ptr<AudioBuffer> buffer,
15
+ bool disableNormalization)
16
+ : AudioNode(context),
17
+ buffer_(nullptr),
18
+ internalBuffer_(nullptr),
19
+ signalledToStop_(false),
20
+ remainingSegments_(0),
21
+ internalBufferIndex_(0),
22
+ scaleFactor_(1.0f),
23
+ intermediateBus_(nullptr) {
24
+ channelCount_ = 2;
25
+ channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
26
+ normalize_ = !disableNormalization;
27
+ gainCalibrationSampleRate_ = context->getSampleRate();
28
+ setBuffer(buffer);
29
+ audioBus_ = std::make_shared<AudioBus>(
30
+ RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
31
+ isInitialized_ = true;
32
+ }
33
+
34
+ bool ConvolverNode::getNormalize_() const {
35
+ return normalize_;
36
+ }
37
+
38
+ const std::shared_ptr<AudioBuffer> &ConvolverNode::getBuffer() const {
39
+ return buffer_;
40
+ }
41
+
42
+ void ConvolverNode::setNormalize(bool normalize) {
43
+ if (normalize_ != normalize) {
44
+ normalize_ = normalize;
45
+ if (normalize_ && buffer_)
46
+ calculateNormalizationScale();
47
+ }
48
+ if (!normalize_) {
49
+ scaleFactor_ = 1.0f;
50
+ }
51
+ }
52
+
53
+ void ConvolverNode::setBuffer(const std::shared_ptr<AudioBuffer> &buffer) {
54
+ if (buffer_ != buffer && buffer != nullptr) {
55
+ buffer_ = buffer;
56
+ if (normalize_)
57
+ calculateNormalizationScale();
58
+ threadPool_ = std::make_shared<ThreadPool>(4);
59
+ convolvers_.clear();
60
+ for (int i = 0; i < buffer->getNumberOfChannels(); ++i) {
61
+ convolvers_.emplace_back();
62
+ AudioArray channelData(buffer->getLength());
63
+ memcpy(
64
+ channelData.getData(),
65
+ buffer->getChannelData(i),
66
+ buffer->getLength() * sizeof(float));
67
+ convolvers_.back().init(
68
+ RENDER_QUANTUM_SIZE, channelData, buffer->getLength());
69
+ }
70
+ if (buffer->getNumberOfChannels() == 1) {
71
+ // add one more convolver, because right now input is always stereo
72
+ convolvers_.emplace_back();
73
+ AudioArray channelData(buffer->getLength());
74
+ memcpy(
75
+ channelData.getData(),
76
+ buffer->getChannelData(0),
77
+ buffer->getLength() * sizeof(float));
78
+ convolvers_.back().init(
79
+ RENDER_QUANTUM_SIZE, channelData, buffer->getLength());
80
+ }
81
+ internalBuffer_ = std::make_shared<AudioBus>(
82
+ RENDER_QUANTUM_SIZE * 2, channelCount_, buffer->getSampleRate());
83
+ intermediateBus_ = std::make_shared<AudioBus>(
84
+ RENDER_QUANTUM_SIZE, convolvers_.size(), buffer->getSampleRate());
85
+ internalBufferIndex_ = 0;
86
+ }
87
+ }
88
+
89
+ void ConvolverNode::onInputDisabled() {
90
+ numberOfEnabledInputNodes_ -= 1;
91
+ if (isEnabled() && numberOfEnabledInputNodes_ == 0) {
92
+ signalledToStop_ = true;
93
+ remainingSegments_ = convolvers_.at(0).getSegCount();
94
+ }
95
+ }
96
+
97
+ std::shared_ptr<AudioBus> ConvolverNode::processInputs(
98
+ const std::shared_ptr<AudioBus> &outputBus,
99
+ int framesToProcess,
100
+ bool checkIsAlreadyProcessed) {
101
+ if (internalBufferIndex_ < framesToProcess) {
102
+ return AudioNode::processInputs(outputBus, RENDER_QUANTUM_SIZE, false);
103
+ }
104
+ return AudioNode::processInputs(outputBus, 0, false);
105
+ }
106
+
107
+ // processing pipeline: processingBus -> intermediateBus_ -> audioBus_ (mixing
108
+ // with intermediateBus_)
109
+ std::shared_ptr<AudioBus> ConvolverNode::processNode(
110
+ const std::shared_ptr<AudioBus> &processingBus,
111
+ int framesToProcess) {
112
+ if (signalledToStop_) {
113
+ if (remainingSegments_ > 0) {
114
+ remainingSegments_--;
115
+ } else {
116
+ disable();
117
+ signalledToStop_ = false;
118
+ internalBufferIndex_ = 0;
119
+ return processingBus;
120
+ }
121
+ }
122
+ if (internalBufferIndex_ < framesToProcess) {
123
+ performConvolution(processingBus); // result returned to intermediateBus_
124
+ audioBus_->sum(intermediateBus_.get());
125
+
126
+ internalBuffer_->copy(
127
+ audioBus_.get(), 0, internalBufferIndex_, RENDER_QUANTUM_SIZE);
128
+ internalBufferIndex_ += RENDER_QUANTUM_SIZE;
129
+ }
130
+ audioBus_->zero();
131
+ audioBus_->copy(internalBuffer_.get(), 0, 0, framesToProcess);
132
+ int remainingFrames = internalBufferIndex_ - framesToProcess;
133
+ if (remainingFrames > 0) {
134
+ for (int i = 0; i < internalBuffer_->getNumberOfChannels(); ++i) {
135
+ memmove(
136
+ internalBuffer_->getChannel(i)->getData(),
137
+ internalBuffer_->getChannel(i)->getData() + framesToProcess,
138
+ remainingFrames * sizeof(float));
139
+ }
140
+ }
141
+ internalBufferIndex_ -= framesToProcess;
142
+
143
+ for (int i = 0; i < audioBus_->getNumberOfChannels(); ++i) {
144
+ dsp::multiplyByScalar(
145
+ audioBus_->getChannel(i)->getData(),
146
+ scaleFactor_,
147
+ audioBus_->getChannel(i)->getData(),
148
+ framesToProcess);
149
+ }
150
+
151
+ return audioBus_;
152
+ }
153
+
154
+ void ConvolverNode::calculateNormalizationScale() {
155
+ int numberOfChannels = buffer_->getNumberOfChannels();
156
+ int length = buffer_->getLength();
157
+
158
+ float power = 0;
159
+
160
+ for (int channel = 0; channel < numberOfChannels; ++channel) {
161
+ float channelPower = 0;
162
+ auto channelData = buffer_->getChannelData(channel);
163
+ for (int i = 0; i < length; ++i) {
164
+ float sample = channelData[i];
165
+ channelPower += sample * sample;
166
+ }
167
+ power += channelPower;
168
+ }
169
+
170
+ power = std::sqrt(power / (numberOfChannels * length));
171
+ if (power < MIN_IR_POWER) {
172
+ power = MIN_IR_POWER;
173
+ }
174
+ scaleFactor_ = 1 / power;
175
+ scaleFactor_ *= std::pow(10, GAIN_CALIBRATION * 0.05f);
176
+ scaleFactor_ *= gainCalibrationSampleRate_ / buffer_->getSampleRate();
177
+ }
178
+
179
+ void ConvolverNode::performConvolution(
180
+ const std::shared_ptr<AudioBus> &processingBus) {
181
+ if (processingBus->getNumberOfChannels() == 1) {
182
+ for (int i = 0; i < convolvers_.size(); ++i) {
183
+ threadPool_->schedule([&, i] {
184
+ convolvers_[i].process(
185
+ processingBus->getChannel(0)->getData(),
186
+ intermediateBus_->getChannel(i)->getData());
187
+ });
188
+ }
189
+ } else if (processingBus->getNumberOfChannels() == 2) {
190
+ std::vector<int> inputChannelMap;
191
+ std::vector<int> outputChannelMap;
192
+ if (convolvers_.size() == 2) {
193
+ inputChannelMap = {0, 1};
194
+ outputChannelMap = {0, 1};
195
+ } else { // 4 channel IR
196
+ inputChannelMap = {0, 0, 1, 1};
197
+ outputChannelMap = {0, 3, 2, 1};
198
+ }
199
+ for (int i = 0; i < convolvers_.size(); ++i) {
200
+ threadPool_->schedule(
201
+ [this, i, inputChannelMap, outputChannelMap, &processingBus] {
202
+ convolvers_[i].process(
203
+ processingBus->getChannel(inputChannelMap[i])->getData(),
204
+ intermediateBus_->getChannel(outputChannelMap[i])->getData());
205
+ });
206
+ }
207
+ }
208
+ threadPool_->wait();
209
+ }
210
+ } // namespace audioapi
@@ -0,0 +1,55 @@
1
+ #pragma once
2
+
3
+ #include <audioapi/core/AudioNode.h>
4
+ #include <audioapi/core/AudioParam.h>
5
+ #include <audioapi/dsp/Convolver.h>
6
+
7
+ #include <memory>
8
+ #include <vector>
9
+
10
+ #include <audioapi/utils/ThreadPool.hpp>
11
+
12
+ static constexpr int GAIN_CALIBRATION = -58; // magic number so that processed signal and dry signal have roughly the same volume
13
+ static constexpr double MIN_IR_POWER = 0.000125;
14
+
15
+ namespace audioapi {
16
+
17
+ class AudioBus;
18
+ class AudioBuffer;
19
+
20
+ class ConvolverNode : public AudioNode {
21
+ public:
22
+ explicit ConvolverNode(BaseAudioContext *context, std::shared_ptr<AudioBuffer> buffer, bool disableNormalization);
23
+
24
+ [[nodiscard]] bool getNormalize_() const;
25
+ [[nodiscard]] const std::shared_ptr<AudioBuffer> &getBuffer() const;
26
+ void setNormalize(bool normalize);
27
+ void setBuffer(const std::shared_ptr<AudioBuffer> &buffer);
28
+
29
+ protected:
30
+ std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus>& processingBus, int framesToProcess) override;
31
+
32
+ private:
33
+ std::shared_ptr<AudioBus> processInputs(const std::shared_ptr<AudioBus>& outputBus, int framesToProcess, bool checkIsAlreadyProcessed) override;
34
+ void onInputDisabled() override;
35
+ float gainCalibrationSampleRate_;
36
+ size_t remainingSegments_;
37
+ size_t internalBufferIndex_;
38
+ bool normalize_;
39
+ bool signalledToStop_;
40
+ float scaleFactor_;
41
+ std::shared_ptr<AudioBus>intermediateBus_;
42
+
43
+ // impulse response buffer
44
+ std::shared_ptr<AudioBuffer> buffer_;
45
+ // buffer to hold internal processed data
46
+ std::shared_ptr<AudioBus> internalBuffer_;
47
+ // vectors of convolvers, one per channel
48
+ std::vector<Convolver> convolvers_;
49
+ std::shared_ptr<ThreadPool> threadPool_;
50
+
51
+ void calculateNormalizationScale();
52
+ void performConvolution(const std::shared_ptr<AudioBus>& processingBus);
53
+ };
54
+
55
+ } // namespace audioapi
@@ -25,14 +25,12 @@ StreamerNode::StreamerNode(BaseAudioContext *context)
25
25
  codecpar_(nullptr),
26
26
  pkt_(nullptr),
27
27
  frame_(nullptr),
28
- pendingFrame_(nullptr),
29
28
  bufferedBus_(nullptr),
30
- bufferedBusIndex_(0),
31
- maxBufferSize_(0),
32
29
  audio_stream_index_(-1),
33
30
  swrCtx_(nullptr),
34
31
  resampledData_(nullptr),
35
- maxResampledSamples_(0) {}
32
+ maxResampledSamples_(0),
33
+ processedSamples_(0) {}
36
34
 
37
35
  StreamerNode::~StreamerNode() {
38
36
  cleanup();
@@ -66,24 +64,30 @@ bool StreamerNode::initialize(const std::string &input_url) {
66
64
  return false;
67
65
  }
68
66
 
69
- maxBufferSize_ = BUFFER_LENGTH_SECONDS * codecCtx_->sample_rate;
70
- // If decoding is faster than playing, we buffer few seconds of audio
71
- bufferedBus_ = std::make_shared<AudioBus>(
72
- maxBufferSize_, codecpar_->ch_layout.nb_channels, codecCtx_->sample_rate);
73
-
74
67
  channelCount_ = codecpar_->ch_layout.nb_channels;
75
68
  audioBus_ = std::make_shared<AudioBus>(
76
69
  RENDER_QUANTUM_SIZE, channelCount_, context_->getSampleRate());
77
70
 
71
+ auto [sender, receiver] = channels::spsc::channel<
72
+ StreamingData,
73
+ channels::spsc::OverflowStrategy::WAIT_ON_FULL,
74
+ channels::spsc::WaitStrategy::ATOMIC_WAIT>(CHANNEL_CAPACITY);
75
+ sender_ = std::move(sender);
76
+ receiver_ = std::move(receiver);
77
+
78
78
  streamingThread_ = std::thread(&StreamerNode::streamAudio, this);
79
- streamFlag.store(true);
79
+ streamFlag.store(true, std::memory_order_release);
80
80
  isInitialized_ = true;
81
81
  return true;
82
82
  }
83
83
 
84
84
  void StreamerNode::stop(double when) {
85
85
  AudioScheduledSourceNode::stop(when);
86
- streamFlag.store(false);
86
+ streamFlag.store(false, std::memory_order_release);
87
+ StreamingData dummy;
88
+ while (receiver_.try_receive(dummy) ==
89
+ channels::spsc::ResponseStatus::SUCCESS)
90
+ ; // clear the receiver
87
91
  }
88
92
 
89
93
  bool StreamerNode::setupResampler() {
@@ -122,29 +126,22 @@ bool StreamerNode::setupResampler() {
122
126
  }
123
127
 
124
128
  void StreamerNode::streamAudio() {
125
- while (streamFlag.load()) {
126
- if (pendingFrame_ != nullptr) {
127
- if (!processFrameWithResampler(pendingFrame_)) {
129
+ while (streamFlag.load(std::memory_order_acquire)) {
130
+ if (av_read_frame(fmtCtx_, pkt_) < 0) {
131
+ return;
132
+ }
133
+ if (pkt_->stream_index == audio_stream_index_) {
134
+ if (avcodec_send_packet(codecCtx_, pkt_) != 0) {
128
135
  return;
129
136
  }
130
- } else {
131
- if (av_read_frame(fmtCtx_, pkt_) < 0) {
137
+ if (avcodec_receive_frame(codecCtx_, frame_) != 0) {
132
138
  return;
133
139
  }
134
- if (pkt_->stream_index == audio_stream_index_) {
135
- if (avcodec_send_packet(codecCtx_, pkt_) != 0) {
136
- return;
137
- }
138
- if (avcodec_receive_frame(codecCtx_, frame_) != 0) {
139
- return;
140
- }
141
- if (!processFrameWithResampler(frame_)) {
142
- return;
143
- }
140
+ if (!processFrameWithResampler(frame_)) {
141
+ return;
144
142
  }
145
- av_packet_unref(pkt_);
146
143
  }
147
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
144
+ av_packet_unref(pkt_);
148
145
  }
149
146
  }
150
147
 
@@ -160,28 +157,33 @@ std::shared_ptr<AudioBus> StreamerNode::processNode(
160
157
  return processingBus;
161
158
  }
162
159
 
163
- // If we have enough buffered data, copy to output bus
164
- if (bufferedBusIndex_ >= framesToProcess) {
165
- Locker locker(mutex_);
160
+ int bufferRemaining = bufferedBusSize_ - processedSamples_;
161
+ int alreadyProcessed = 0;
162
+ if (bufferRemaining < framesToProcess) {
163
+ if (bufferedBus_ != nullptr) {
164
+ for (int ch = 0; ch < processingBus->getNumberOfChannels(); ch++) {
165
+ memcpy(
166
+ processingBus->getChannel(ch)->getData(),
167
+ bufferedBus_->getChannel(ch)->getData() + processedSamples_,
168
+ bufferRemaining * sizeof(float));
169
+ }
170
+ framesToProcess -= bufferRemaining;
171
+ alreadyProcessed += bufferRemaining;
172
+ }
173
+ StreamingData data;
174
+ receiver_.try_receive(data);
175
+ bufferedBus_ = std::make_shared<AudioBus>(std::move(data.bus));
176
+ bufferedBusSize_ = data.size;
177
+ processedSamples_ = 0;
178
+ }
179
+ if (bufferedBus_ != nullptr) {
166
180
  for (int ch = 0; ch < processingBus->getNumberOfChannels(); ch++) {
167
181
  memcpy(
168
- processingBus->getChannel(ch)->getData(),
169
- bufferedBus_->getChannel(ch)->getData(),
170
- offsetLength * sizeof(float));
171
-
172
- memmove(
173
- bufferedBus_->getChannel(ch)->getData(),
174
- bufferedBus_->getChannel(ch)->getData() + offsetLength,
175
- (maxBufferSize_ - offsetLength) * sizeof(float));
182
+ processingBus->getChannel(ch)->getData() + alreadyProcessed,
183
+ bufferedBus_->getChannel(ch)->getData() + processedSamples_,
184
+ framesToProcess * sizeof(float));
176
185
  }
177
- bufferedBusIndex_ -= offsetLength;
178
- } else {
179
- if (VERBOSE)
180
- printf(
181
- "Buffer underrun: have %zu, need %zu\n",
182
- bufferedBusIndex_,
183
- (size_t)framesToProcess);
184
- processingBus->zero();
186
+ processedSamples_ += framesToProcess;
185
187
  }
186
188
 
187
189
  return processingBus;
@@ -220,22 +222,21 @@ bool StreamerNode::processFrameWithResampler(AVFrame *frame) {
220
222
  return false;
221
223
  }
222
224
 
223
- // Check if converted data fits in buffer
224
- if (bufferedBusIndex_ + converted_samples > maxBufferSize_) {
225
- pendingFrame_ = frame;
225
+ // if we would like to finish dont copy anything
226
+ if (!streamFlag.load(std::memory_order_acquire)) {
226
227
  return true;
227
- } else {
228
- pendingFrame_ = nullptr;
229
228
  }
230
-
231
- // Copy converted data to our buffer
232
- Locker locker(mutex_);
229
+ auto bus = AudioBus(
230
+ static_cast<size_t>(converted_samples),
231
+ codecCtx_->ch_layout.nb_channels,
232
+ context_->getSampleRate());
233
233
  for (int ch = 0; ch < codecCtx_->ch_layout.nb_channels; ch++) {
234
234
  auto *src = reinterpret_cast<float *>(resampledData_[ch]);
235
- float *dst = bufferedBus_->getChannel(ch)->getData() + bufferedBusIndex_;
235
+ float *dst = bus.getChannel(ch)->getData();
236
236
  memcpy(dst, src, converted_samples * sizeof(float));
237
237
  }
238
- bufferedBusIndex_ += converted_samples;
238
+ StreamingData data{std::move(bus), static_cast<size_t>(converted_samples)};
239
+ sender_.send(std::move(data));
239
240
  return true;
240
241
  }
241
242
 
@@ -280,7 +281,7 @@ bool StreamerNode::setupDecoder() {
280
281
  }
281
282
 
282
283
  void StreamerNode::cleanup() {
283
- streamFlag.store(false);
284
+ streamFlag.store(false, std::memory_order_release);
284
285
  // cleanup cannot be called from the streaming thread so there is no need to
285
286
  // check if we are in the same thread
286
287
  streamingThread_.join();