react-native-audio-api 0.11.0-nightly-568a154-20251222 → 0.11.0-nightly-94b7f30-20251224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (207) hide show
  1. package/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp +1 -1
  2. package/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp +11 -3
  3. package/android/src/main/cpp/audioapi/android/core/utils/ffmpegBackend/FFmpegFileWriter.cpp +47 -79
  4. package/android/src/main/cpp/audioapi/android/core/utils/ffmpegBackend/FFmpegFileWriter.h +3 -2
  5. package/common/cpp/audioapi/AudioAPIModuleInstaller.h +2 -0
  6. package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +9 -1
  7. package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +1 -0
  8. package/common/cpp/audioapi/HostObjects/effects/DelayNodeHostObject.cpp +6 -2
  9. package/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp +72 -0
  10. package/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.h +23 -0
  11. package/common/cpp/audioapi/core/AudioContext.cpp +15 -13
  12. package/common/cpp/audioapi/core/AudioContext.h +2 -1
  13. package/common/cpp/audioapi/core/AudioNode.cpp +39 -24
  14. package/common/cpp/audioapi/core/AudioNode.h +3 -3
  15. package/common/cpp/audioapi/core/AudioParam.cpp +9 -6
  16. package/common/cpp/audioapi/core/AudioParam.h +2 -2
  17. package/common/cpp/audioapi/core/BaseAudioContext.cpp +32 -21
  18. package/common/cpp/audioapi/core/BaseAudioContext.h +5 -1
  19. package/common/cpp/audioapi/core/analysis/AnalyserNode.cpp +8 -11
  20. package/common/cpp/audioapi/core/analysis/AnalyserNode.h +1 -1
  21. package/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp +9 -3
  22. package/common/cpp/audioapi/core/destinations/AudioDestinationNode.h +1 -1
  23. package/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +18 -9
  24. package/common/cpp/audioapi/core/effects/BiquadFilterNode.h +1 -1
  25. package/common/cpp/audioapi/core/effects/ConvolverNode.cpp +3 -3
  26. package/common/cpp/audioapi/core/effects/ConvolverNode.h +1 -1
  27. package/common/cpp/audioapi/core/effects/DelayNode.cpp +20 -11
  28. package/common/cpp/audioapi/core/effects/DelayNode.h +1 -1
  29. package/common/cpp/audioapi/core/effects/GainNode.cpp +12 -4
  30. package/common/cpp/audioapi/core/effects/GainNode.h +1 -1
  31. package/common/cpp/audioapi/core/effects/IIRFilterNode.cpp +6 -3
  32. package/common/cpp/audioapi/core/effects/IIRFilterNode.h +1 -1
  33. package/common/cpp/audioapi/core/effects/StereoPannerNode.cpp +7 -4
  34. package/common/cpp/audioapi/core/effects/StereoPannerNode.h +1 -1
  35. package/common/cpp/audioapi/core/effects/WaveShaperNode.cpp +79 -0
  36. package/common/cpp/audioapi/core/effects/WaveShaperNode.h +66 -0
  37. package/common/cpp/audioapi/core/effects/WorkletNode.cpp +2 -2
  38. package/common/cpp/audioapi/core/effects/WorkletNode.h +2 -2
  39. package/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp +7 -4
  40. package/common/cpp/audioapi/core/effects/WorkletProcessingNode.h +6 -2
  41. package/common/cpp/audioapi/core/sources/AudioBuffer.cpp +2 -3
  42. package/common/cpp/audioapi/core/sources/AudioBuffer.h +1 -1
  43. package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +59 -25
  44. package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +4 -2
  45. package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +18 -11
  46. package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +3 -1
  47. package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +37 -21
  48. package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +3 -3
  49. package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +11 -11
  50. package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +4 -2
  51. package/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp +16 -8
  52. package/common/cpp/audioapi/core/sources/ConstantSourceNode.h +1 -1
  53. package/common/cpp/audioapi/core/sources/OscillatorNode.cpp +30 -18
  54. package/common/cpp/audioapi/core/sources/OscillatorNode.h +1 -1
  55. package/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp +4 -4
  56. package/common/cpp/audioapi/core/sources/RecorderAdapterNode.h +1 -1
  57. package/common/cpp/audioapi/core/sources/StreamerNode.cpp +24 -10
  58. package/common/cpp/audioapi/core/sources/StreamerNode.h +4 -3
  59. package/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp +11 -4
  60. package/common/cpp/audioapi/core/sources/WorkletSourceNode.h +6 -2
  61. package/common/cpp/audioapi/core/types/OverSampleType.h +7 -0
  62. package/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp +1 -0
  63. package/common/cpp/audioapi/dsp/Resampler.cpp +200 -0
  64. package/common/cpp/audioapi/dsp/Resampler.h +65 -0
  65. package/common/cpp/audioapi/dsp/WaveShaper.cpp +105 -0
  66. package/common/cpp/audioapi/dsp/WaveShaper.h +46 -0
  67. package/common/cpp/audioapi/utils/AudioArray.cpp +5 -0
  68. package/common/cpp/audioapi/utils/AudioArray.h +6 -0
  69. package/common/cpp/test/RunTests.sh +1 -1
  70. package/common/cpp/test/src/AudioParamTest.cpp +10 -10
  71. package/common/cpp/test/src/AudioScheduledSourceTest.cpp +31 -15
  72. package/common/cpp/test/src/ConstantSourceTest.cpp +16 -14
  73. package/common/cpp/test/src/DelayTest.cpp +14 -13
  74. package/common/cpp/test/src/GainTest.cpp +10 -9
  75. package/common/cpp/test/src/IIRFilterTest.cpp +4 -4
  76. package/common/cpp/test/src/OscillatorTest.cpp +2 -2
  77. package/common/cpp/test/src/StereoPannerTest.cpp +14 -12
  78. package/common/cpp/test/src/biquad/BiquadFilterTest.cpp +25 -25
  79. package/common/cpp/test/src/biquad/BiquadFilterTest.h +3 -5
  80. package/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp +76 -0
  81. package/common/cpp/test/src/dsp/ResamplerTest.cpp +117 -0
  82. package/ios/audioapi/ios/AudioAPIModule.mm +4 -4
  83. package/ios/audioapi/ios/core/IOSAudioRecorder.mm +1 -1
  84. package/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm +9 -3
  85. package/lib/commonjs/AudioAPIModule/AudioAPIModule.js +0 -3
  86. package/lib/commonjs/AudioAPIModule/AudioAPIModule.js.map +1 -1
  87. package/lib/commonjs/AudioAPIModule/AudioAPIModule.web.js +20 -0
  88. package/lib/commonjs/AudioAPIModule/AudioAPIModule.web.js.map +1 -0
  89. package/lib/commonjs/AudioAPIModule/ModuleInterfaces.js +6 -0
  90. package/lib/commonjs/AudioAPIModule/ModuleInterfaces.js.map +1 -0
  91. package/lib/commonjs/api.js +16 -0
  92. package/lib/commonjs/api.js.map +1 -1
  93. package/lib/commonjs/api.web.js +23 -0
  94. package/lib/commonjs/api.web.js.map +1 -1
  95. package/lib/commonjs/core/BaseAudioContext.js +4 -0
  96. package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
  97. package/lib/commonjs/core/WaveShaperNode.js +38 -0
  98. package/lib/commonjs/core/WaveShaperNode.js.map +1 -0
  99. package/lib/commonjs/specs/NativeAudioAPIModule.js.map +1 -1
  100. package/lib/commonjs/specs/NativeAudioAPIModule.web.js +47 -0
  101. package/lib/commonjs/specs/NativeAudioAPIModule.web.js.map +1 -0
  102. package/lib/commonjs/system/AudioManager.js.map +1 -1
  103. package/lib/commonjs/system/types.js +4 -0
  104. package/lib/commonjs/web-core/AudioContext.js +4 -0
  105. package/lib/commonjs/web-core/AudioContext.js.map +1 -1
  106. package/lib/commonjs/web-core/OfflineAudioContext.js +4 -0
  107. package/lib/commonjs/web-core/OfflineAudioContext.js.map +1 -1
  108. package/lib/commonjs/web-core/WaveShaperNode.js +38 -0
  109. package/lib/commonjs/web-core/WaveShaperNode.js.map +1 -0
  110. package/lib/commonjs/web-system/AudioManager.js +30 -0
  111. package/lib/commonjs/web-system/AudioManager.js.map +1 -0
  112. package/lib/commonjs/web-system/index.js +12 -0
  113. package/lib/commonjs/web-system/index.js.map +1 -1
  114. package/lib/module/AudioAPIModule/AudioAPIModule.js +0 -4
  115. package/lib/module/AudioAPIModule/AudioAPIModule.js.map +1 -1
  116. package/lib/module/AudioAPIModule/AudioAPIModule.web.js +16 -0
  117. package/lib/module/AudioAPIModule/AudioAPIModule.web.js.map +1 -0
  118. package/lib/module/AudioAPIModule/ModuleInterfaces.js +4 -0
  119. package/lib/module/AudioAPIModule/ModuleInterfaces.js.map +1 -0
  120. package/lib/module/AudioAPIModule/index.js +1 -1
  121. package/lib/module/AudioAPIModule/index.js.map +1 -1
  122. package/lib/module/api.js +2 -0
  123. package/lib/module/api.js.map +1 -1
  124. package/lib/module/api.web.js +3 -1
  125. package/lib/module/api.web.js.map +1 -1
  126. package/lib/module/core/BaseAudioContext.js +4 -0
  127. package/lib/module/core/BaseAudioContext.js.map +1 -1
  128. package/lib/module/core/WaveShaperNode.js +32 -0
  129. package/lib/module/core/WaveShaperNode.js.map +1 -0
  130. package/lib/module/specs/NativeAudioAPIModule.js.map +1 -1
  131. package/lib/module/specs/NativeAudioAPIModule.web.js +44 -0
  132. package/lib/module/specs/NativeAudioAPIModule.web.js.map +1 -0
  133. package/lib/module/specs/index.js +1 -1
  134. package/lib/module/specs/index.js.map +1 -1
  135. package/lib/module/system/AudioManager.js.map +1 -1
  136. package/lib/module/system/types.js +2 -0
  137. package/lib/module/web-core/AudioContext.js +4 -0
  138. package/lib/module/web-core/AudioContext.js.map +1 -1
  139. package/lib/module/web-core/OfflineAudioContext.js +4 -0
  140. package/lib/module/web-core/OfflineAudioContext.js.map +1 -1
  141. package/lib/module/web-core/WaveShaperNode.js +32 -0
  142. package/lib/module/web-core/WaveShaperNode.js.map +1 -0
  143. package/lib/module/web-system/AudioManager.js +26 -0
  144. package/lib/module/web-system/AudioManager.js.map +1 -0
  145. package/lib/module/web-system/index.js +1 -0
  146. package/lib/module/web-system/index.js.map +1 -1
  147. package/lib/typescript/AudioAPIModule/AudioAPIModule.d.ts +2 -10
  148. package/lib/typescript/AudioAPIModule/AudioAPIModule.d.ts.map +1 -1
  149. package/lib/typescript/AudioAPIModule/AudioAPIModule.web.d.ts +13 -0
  150. package/lib/typescript/AudioAPIModule/AudioAPIModule.web.d.ts.map +1 -0
  151. package/lib/typescript/AudioAPIModule/ModuleInterfaces.d.ts +18 -0
  152. package/lib/typescript/AudioAPIModule/ModuleInterfaces.d.ts.map +1 -0
  153. package/lib/typescript/api.d.ts +2 -0
  154. package/lib/typescript/api.d.ts.map +1 -1
  155. package/lib/typescript/api.web.d.ts +3 -1
  156. package/lib/typescript/api.web.d.ts.map +1 -1
  157. package/lib/typescript/core/BaseAudioContext.d.ts +2 -0
  158. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  159. package/lib/typescript/core/WaveShaperNode.d.ts +9 -0
  160. package/lib/typescript/core/WaveShaperNode.d.ts.map +1 -0
  161. package/lib/typescript/interfaces.d.ts +8 -2
  162. package/lib/typescript/interfaces.d.ts.map +1 -1
  163. package/lib/typescript/specs/NativeAudioAPIModule.d.ts +1 -1
  164. package/lib/typescript/specs/NativeAudioAPIModule.d.ts.map +1 -1
  165. package/lib/typescript/specs/NativeAudioAPIModule.web.d.ts +34 -0
  166. package/lib/typescript/specs/NativeAudioAPIModule.web.d.ts.map +1 -0
  167. package/lib/typescript/system/AudioManager.d.ts +2 -2
  168. package/lib/typescript/system/AudioManager.d.ts.map +1 -1
  169. package/lib/typescript/system/notification/types.d.ts +1 -1
  170. package/lib/typescript/system/notification/types.d.ts.map +1 -1
  171. package/lib/typescript/system/types.d.ts +17 -0
  172. package/lib/typescript/system/types.d.ts.map +1 -1
  173. package/lib/typescript/types.d.ts +1 -0
  174. package/lib/typescript/types.d.ts.map +1 -1
  175. package/lib/typescript/web-core/AudioContext.d.ts +2 -0
  176. package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
  177. package/lib/typescript/web-core/BaseAudioContext.d.ts +3 -1
  178. package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -1
  179. package/lib/typescript/web-core/OfflineAudioContext.d.ts +2 -0
  180. package/lib/typescript/web-core/OfflineAudioContext.d.ts.map +1 -1
  181. package/lib/typescript/web-core/WaveShaperNode.d.ts +9 -0
  182. package/lib/typescript/web-core/WaveShaperNode.d.ts.map +1 -0
  183. package/lib/typescript/web-system/AudioManager.d.ts +24 -0
  184. package/lib/typescript/web-system/AudioManager.d.ts.map +1 -0
  185. package/lib/typescript/web-system/index.d.ts +1 -0
  186. package/lib/typescript/web-system/index.d.ts.map +1 -1
  187. package/package.json +1 -1
  188. package/src/AudioAPIModule/AudioAPIModule.ts +6 -17
  189. package/src/AudioAPIModule/AudioAPIModule.web.ts +18 -0
  190. package/src/AudioAPIModule/ModuleInterfaces.ts +25 -0
  191. package/src/api.ts +2 -0
  192. package/src/api.web.ts +3 -0
  193. package/src/core/BaseAudioContext.ts +5 -0
  194. package/src/core/WaveShaperNode.ts +43 -0
  195. package/src/interfaces.ts +9 -1
  196. package/src/specs/NativeAudioAPIModule.ts +5 -3
  197. package/src/specs/NativeAudioAPIModule.web.ts +93 -0
  198. package/src/system/AudioManager.ts +19 -14
  199. package/src/system/notification/types.ts +1 -1
  200. package/src/system/types.ts +22 -0
  201. package/src/types.ts +2 -0
  202. package/src/web-core/AudioContext.tsx +5 -0
  203. package/src/web-core/BaseAudioContext.tsx +3 -1
  204. package/src/web-core/OfflineAudioContext.tsx +5 -0
  205. package/src/web-core/WaveShaperNode.tsx +42 -0
  206. package/src/web-system/AudioManager.ts +33 -0
  207. package/src/web-system/index.ts +1 -0
@@ -109,7 +109,7 @@ Result<NoneType, std::string> AndroidAudioRecorder::openAudioStream() {
109
109
  Result<std::string, std::string> AndroidAudioRecorder::start() {
110
110
  std::scoped_lock startLock(callbackMutex_, fileWriterMutex_, adapterNodeMutex_);
111
111
 
112
- if (isRecording()) {
112
+ if (!isIdle()) {
113
113
  return Result<std::string, std::string>::Err("Recorder is already recording");
114
114
  }
115
115
 
@@ -103,7 +103,9 @@ Result<NoneType, std::string> AndroidRecorderCallback::prepare(
103
103
  }
104
104
 
105
105
  void AndroidRecorderCallback::cleanup() {
106
- emitAudioData(true);
106
+ if (circularBus_[0]->getNumberOfAvailableFrames() > 0) {
107
+ emitAudioData(true);
108
+ }
107
109
 
108
110
  if (converter_ != nullptr) {
109
111
  ma_data_converter_uninit(converter_.get(), NULL);
@@ -136,7 +138,10 @@ void AndroidRecorderCallback::receiveAudioData(void *data, int numFrames) {
136
138
  if (static_cast<float>(streamSampleRate_) == sampleRate_ &&
137
139
  streamChannelCount_ == channelCount_) {
138
140
  deinterleaveAndPushAudioData(data, numFrames);
139
- emitAudioData();
141
+
142
+ if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) {
143
+ emitAudioData();
144
+ }
140
145
  return;
141
146
  }
142
147
 
@@ -147,7 +152,10 @@ void AndroidRecorderCallback::receiveAudioData(void *data, int numFrames) {
147
152
  converter_.get(), data, &inputFrameCount, processingBuffer_, &outputFrameCount);
148
153
 
149
154
  deinterleaveAndPushAudioData(processingBuffer_, static_cast<int>(outputFrameCount));
150
- emitAudioData();
155
+
156
+ if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) {
157
+ emitAudioData();
158
+ }
151
159
  }
152
160
 
153
161
  /// @brief Deinterleaves the audio data and pushes it into the circular buffer.
@@ -17,12 +17,13 @@ extern "C" {
17
17
  #include <audioapi/utils/UnitConversion.h>
18
18
 
19
19
  #include <algorithm>
20
+ #include <cassert>
20
21
  #include <memory>
21
22
  #include <string>
22
23
  #include <utility>
23
24
 
24
- constexpr int defaultFrameRatio = 4;
25
25
  constexpr int fallbackFIFOSize = 8192;
26
+ constexpr int fallbackFrameSize = 512;
26
27
  constexpr int defaultFlushInterval = 100;
27
28
 
28
29
  namespace audioapi::android::ffmpeg {
@@ -33,7 +34,7 @@ FFmpegAudioFileWriter::FFmpegAudioFileWriter(
33
34
  : AndroidFileWriterBackend(audioEventHandlerRegistry, fileProperties) {
34
35
  // Set flush interval from properties, limit minimum to 100ms
35
36
  // to avoid people hurting themselves too much
36
- flushIntervalMs_ = std::min(fileProperties_->androidFlushIntervalMs, defaultFlushInterval);
37
+ flushIntervalMs_ = std::max(fileProperties_->androidFlushIntervalMs, defaultFlushInterval);
37
38
  }
38
39
 
39
40
  FFmpegAudioFileWriter::~FFmpegAudioFileWriter() {
@@ -278,23 +279,40 @@ Result<NoneType, std::string> FFmpegAudioFileWriter::initializeResampler(
278
279
  /// that might be needed for storing intermediate audio data or buffering before encoding.
279
280
  /// @param maxBufferSize The maximum buffer size to allocate.
280
281
  void FFmpegAudioFileWriter::initializeBuffers(int32_t maxBufferSize) {
281
- frame_ = av_unique_ptr<AVFrame>(av_frame_alloc());
282
+ resamplerFrame_ = av_unique_ptr<AVFrame>(av_frame_alloc());
283
+ writingFrame_ = av_unique_ptr<AVFrame>(av_frame_alloc());
282
284
  packet_ = av_unique_ptr<AVPacket>(av_packet_alloc());
283
285
 
284
- int frameRatio = defaultFrameRatio;
285
- if (encoderCtx_->frame_size > 0) {
286
- frameRatio = static_cast<int>(std::ceil(
287
- static_cast<double>(maxBufferSize) / static_cast<double>(encoderCtx_->frame_size)));
288
- }
289
-
290
- int calculatedSize =
291
- (encoderCtx_->frame_size > 0 ? encoderCtx_->frame_size * frameRatio
292
- : maxBufferSize * frameRatio);
293
-
294
- int fifoSize = std::max(calculatedSize, fallbackFIFOSize);
286
+ // Calculate resampler size of output buffer from the resampler
287
+ int resamplerFrameSize = av_rescale_rnd(
288
+ maxBufferSize,
289
+ static_cast<int>(encoderCtx_->sample_rate),
290
+ static_cast<int>(streamSampleRate_),
291
+ AV_ROUND_UP);
292
+
293
+ // Configure frame parameters for desired file output
294
+ resamplerFrame_->nb_samples = resamplerFrameSize;
295
+ resamplerFrame_->format = encoderCtx_->sample_fmt;
296
+ av_channel_layout_copy(&resamplerFrame_->ch_layout, &encoderCtx_->ch_layout);
297
+ // Allocate buffer for the resampler frame
298
+ av_frame_get_buffer(resamplerFrame_.get(), 0);
299
+
300
+ // calculate FIFO size based on max buffer size and encoder frame size
301
+ // max(2 * resamplerFrameSize, 2 * encoderCtx_->frame_size, fallbackFIFOSize)
302
+ int writingFrameSize = 2 * std::max(encoderCtx_->frame_size, fallbackFrameSize);
303
+ int fifoSize = std::max(std::max(2 * resamplerFrameSize, writingFrameSize), fallbackFIFOSize);
295
304
 
296
305
  audioFifo_ = av_unique_ptr<AVAudioFifo>(
297
306
  av_audio_fifo_alloc(encoderCtx_->sample_fmt, encoderCtx_->ch_layout.nb_channels, fifoSize));
307
+
308
+ // Configure writing frame parameters
309
+ // size 2 x encoder frame size + same format as encoder
310
+ writingFrame_->nb_samples = writingFrameSize;
311
+ av_channel_layout_copy(&writingFrame_->ch_layout, &encoderCtx_->ch_layout);
312
+ writingFrame_->format = encoderCtx_->sample_fmt;
313
+ writingFrame_->sample_rate = encoderCtx_->sample_rate;
314
+ // Allocate buffer for the writing frame
315
+ av_frame_get_buffer(writingFrame_.get(), 0);
298
316
  }
299
317
 
300
318
  /// @brief Resamples input audio data and pushes it to the audio FIFO.
@@ -302,34 +320,30 @@ void FFmpegAudioFileWriter::initializeBuffers(int32_t maxBufferSize) {
302
320
  /// @param inputFrameCount Number of input frames.
303
321
  /// @returns True if successful, false otherwise.
304
322
  bool FFmpegAudioFileWriter::resampleAndPushToFifo(void *inputData, int inputFrameCount) {
305
- int result = 0;
306
323
  int64_t outputLength = av_rescale_rnd(
307
324
  inputFrameCount, encoderCtx_->sample_rate, static_cast<int>(streamSampleRate_), AV_ROUND_UP);
308
325
 
309
- result = prepareFrameForEncoding(outputLength);
310
-
311
- if (result < 0) {
312
- invokeOnErrorCallback("Failed to prepare frame for resampling: " + parseErrorCode(result));
313
- return false;
314
- }
315
-
316
326
  const uint8_t *inputs[1] = {reinterpret_cast<const uint8_t *>(inputData)};
317
327
 
328
+ assert(outputLength <= resamplerFrame_->nb_samples);
329
+
318
330
  int convertedSamples = swr_convert(
319
- resampleCtx_.get(), frame_->data, static_cast<int>(outputLength), inputs, inputFrameCount);
331
+ resampleCtx_.get(),
332
+ resamplerFrame_->data,
333
+ static_cast<int>(outputLength),
334
+ inputs,
335
+ inputFrameCount);
320
336
 
321
337
  if (convertedSamples < 0) {
322
338
  invokeOnErrorCallback("Failed to convert audio samples: " + parseErrorCode(convertedSamples));
323
- av_frame_unref(frame_.get());
324
339
  return false;
325
340
  }
326
341
 
327
342
  int written = av_audio_fifo_write(
328
- audioFifo_.get(), reinterpret_cast<void **>(frame_->data), convertedSamples);
343
+ audioFifo_.get(), reinterpret_cast<void **>(resamplerFrame_->data), convertedSamples);
329
344
 
330
345
  if (written < convertedSamples) {
331
346
  invokeOnErrorCallback("Failed to write all samples to FIFO");
332
- av_frame_unref(frame_.get());
333
347
  return false;
334
348
  }
335
349
 
@@ -344,26 +358,25 @@ bool FFmpegAudioFileWriter::resampleAndPushToFifo(void *inputData, int inputFram
344
358
  /// @returns 0 on success, -1 or AV_ERROR code on failure
345
359
  int FFmpegAudioFileWriter::processFifo(bool flush) {
346
360
  int result = 0;
347
- int frameSize = encoderCtx_->frame_size > 0 ? encoderCtx_->frame_size : 512;
361
+ int frameSize = std::max(encoderCtx_->frame_size, fallbackFrameSize);
348
362
 
349
363
  while (av_audio_fifo_size(audioFifo_.get()) >= (flush ? 1 : frameSize)) {
350
364
  const int chunkSize = std::min(av_audio_fifo_size(audioFifo_.get()), frameSize);
351
365
 
352
- if (prepareFrameForEncoding(chunkSize) < 0) {
353
- invokeOnErrorCallback("Failed to prepare frame for encoding");
354
- return -1;
355
- }
366
+ assert(chunkSize <= writingFrame_->nb_samples);
356
367
 
357
- if (av_audio_fifo_read(audioFifo_.get(), reinterpret_cast<void **>(frame_->data), chunkSize) !=
368
+ if (av_audio_fifo_read(
369
+ audioFifo_.get(), reinterpret_cast<void **>(writingFrame_->data), chunkSize) !=
358
370
  chunkSize) {
359
371
  invokeOnErrorCallback("Failed to read data from FIFO");
360
372
  return -1;
361
373
  }
362
374
 
363
- frame_->pts = nextPts_;
375
+ writingFrame_->nb_samples = chunkSize;
376
+ writingFrame_->pts = nextPts_;
364
377
  nextPts_ += chunkSize;
365
378
 
366
- result = avcodec_send_frame(encoderCtx_.get(), frame_.get());
379
+ result = avcodec_send_frame(encoderCtx_.get(), writingFrame_.get());
367
380
 
368
381
  if (result < 0) {
369
382
  invokeOnErrorCallback("Failed to send frame to encoder: " + parseErrorCode(result));
@@ -418,51 +431,6 @@ int FFmpegAudioFileWriter::writeEncodedPackets() {
418
431
  }
419
432
  }
420
433
 
421
- /// @brief Prepares the frame for next encoding phase,
422
- /// if frame is same size as previously used one (99.9% cases) try to reuse it.
423
- /// Otherwise resize the frame and in the worst case allocate new frame to use.
424
- /// @param samplesToRead Number of samples to prepare the frame for.
425
- /// @returns 0 on success, AV_ERROR code on failure
426
- int FFmpegAudioFileWriter::prepareFrameForEncoding(int64_t samplesToRead) {
427
- int result = 0;
428
-
429
- if (frame_->data[0] && frame_->nb_samples == samplesToRead &&
430
- av_frame_is_writable(frame_.get())) {
431
- return 0;
432
- }
433
-
434
- frame_->nb_samples = static_cast<int>(samplesToRead);
435
- frame_->format = encoderCtx_->sample_fmt;
436
- frame_->sample_rate = encoderCtx_->sample_rate;
437
-
438
- if (av_channel_layout_compare(&frame_->ch_layout, &encoderCtx_->ch_layout) != 0) {
439
- av_channel_layout_uninit(&frame_->ch_layout);
440
-
441
- result = av_channel_layout_copy(&frame_->ch_layout, &encoderCtx_->ch_layout);
442
-
443
- if (result < 0) {
444
- invokeOnErrorCallback("Failed to copy channel layout: " + parseErrorCode(result));
445
- return result;
446
- }
447
- }
448
-
449
- result = av_frame_make_writable(frame_.get());
450
-
451
- if (result < 0) {
452
- av_frame_unref(frame_.get());
453
-
454
- frame_->nb_samples = static_cast<int>(samplesToRead);
455
- ;
456
- frame_->format = encoderCtx_->sample_fmt;
457
- frame_->sample_rate = encoderCtx_->sample_rate;
458
- av_channel_layout_copy(&frame_->ch_layout, &encoderCtx_->ch_layout);
459
-
460
- result = av_frame_get_buffer(frame_.get(), 0);
461
- }
462
-
463
- return result;
464
- }
465
-
466
434
  /// @brief Closes the currently opened audio file, flushing any remaining data and finalizing the file.
467
435
  /// Method checks the file size and duration for convenience.
468
436
  /// @returns CloseFileResult indicating success or error details
@@ -40,8 +40,10 @@ class FFmpegAudioFileWriter : public AndroidFileWriterBackend {
40
40
  av_unique_ptr<SwrContext> resampleCtx_{nullptr};
41
41
  av_unique_ptr<AVAudioFifo> audioFifo_{nullptr};
42
42
  av_unique_ptr<AVPacket> packet_{nullptr};
43
- av_unique_ptr<AVFrame> frame_{nullptr};
43
+ av_unique_ptr<AVFrame> resamplerFrame_{nullptr};
44
+ av_unique_ptr<AVFrame> writingFrame_{nullptr};
44
45
  AVStream* stream_{nullptr};
46
+
45
47
  unsigned int nextPts_{0};
46
48
 
47
49
  std::chrono::steady_clock::time_point lastFlushTime_ = std::chrono::steady_clock::now();
@@ -59,7 +61,6 @@ class FFmpegAudioFileWriter : public AndroidFileWriterBackend {
59
61
  bool resampleAndPushToFifo(void *data, int numFrames);
60
62
  int processFifo(bool flush);
61
63
  int writeEncodedPackets();
62
- int prepareFrameForEncoding(int64_t samplesToRead);
63
64
 
64
65
  // Finalization helper methods
65
66
  CloseFileResult finalizeOutput();
@@ -82,6 +82,7 @@ class AudioAPIModuleInstaller {
82
82
 
83
83
  audioContext = std::make_shared<AudioContext>(
84
84
  sampleRate, audioEventHandlerRegistry, runtimeRegistry);
85
+ audioContext->initialize();
85
86
 
86
87
  auto audioContextHostObject =
87
88
  std::make_shared<AudioContextHostObject>(audioContext, &runtime, jsCallInvoker);
@@ -118,6 +119,7 @@ class AudioAPIModuleInstaller {
118
119
 
119
120
  auto offlineAudioContext = std::make_shared<OfflineAudioContext>(
120
121
  numberOfChannels, length, sampleRate, audioEventHandlerRegistry, runtimeRegistry);
122
+ offlineAudioContext->initialize();
121
123
 
122
124
  auto audioContextHostObject = std::make_shared<OfflineAudioContextHostObject>(
123
125
  offlineAudioContext, &runtime, jsCallInvoker);
@@ -11,6 +11,7 @@
11
11
  #include <audioapi/HostObjects/effects/IIRFilterNodeHostObject.h>
12
12
  #include <audioapi/HostObjects/effects/PeriodicWaveHostObject.h>
13
13
  #include <audioapi/HostObjects/effects/StereoPannerNodeHostObject.h>
14
+ #include <audioapi/HostObjects/effects/WaveShaperNodeHostObject.h>
14
15
  #include <audioapi/HostObjects/sources/AudioBufferHostObject.h>
15
16
  #include <audioapi/HostObjects/sources/AudioBufferQueueSourceNodeHostObject.h>
16
17
  #include <audioapi/HostObjects/sources/AudioBufferSourceNodeHostObject.h>
@@ -57,7 +58,8 @@ BaseAudioContextHostObject::BaseAudioContextHostObject(
57
58
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBuffer),
58
59
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
59
60
  JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createConvolver),
60
- JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser));
61
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser),
62
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createWaveShaper));
61
63
  }
62
64
 
63
65
  JSI_PROPERTY_GETTER_IMPL(BaseAudioContextHostObject, destination) {
@@ -308,4 +310,10 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConvolver) {
308
310
  }
309
311
  return jsiObject;
310
312
  }
313
+
314
+ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createWaveShaper) {
315
+ auto waveShaper = context_->createWaveShaper();
316
+ auto waveShaperHostObject = std::make_shared<WaveShaperNodeHostObject>(waveShaper);
317
+ return jsi::Object::createFromHostObject(runtime, waveShaperHostObject);
318
+ }
311
319
  } // namespace audioapi
@@ -43,6 +43,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
43
43
  JSI_HOST_FUNCTION_DECL(createPeriodicWave);
44
44
  JSI_HOST_FUNCTION_DECL(createAnalyser);
45
45
  JSI_HOST_FUNCTION_DECL(createConvolver);
46
+ JSI_HOST_FUNCTION_DECL(createWaveShaper);
46
47
  JSI_HOST_FUNCTION_DECL(createDelay);
47
48
 
48
49
  std::shared_ptr<BaseAudioContext> context_;
@@ -14,8 +14,12 @@ DelayNodeHostObject::DelayNodeHostObject(const std::shared_ptr<DelayNode> &node)
14
14
 
15
15
  size_t DelayNodeHostObject::getSizeInBytes() const {
16
16
  auto delayNode = std::static_pointer_cast<DelayNode>(node_);
17
- return sizeof(float) * delayNode->context_->getSampleRate() *
18
- delayNode->getDelayTimeParam()->getMaxValue();
17
+ auto base = sizeof(float) * delayNode->getDelayTimeParam()->getMaxValue();
18
+ if (std::shared_ptr<BaseAudioContext> context = delayNode->context_.lock()) {
19
+ return base * context->getSampleRate();
20
+ } else {
21
+ return base * 44100; // Fallback to common sample rate
22
+ }
19
23
  }
20
24
 
21
25
  JSI_PROPERTY_GETTER_IMPL(DelayNodeHostObject, delayTime) {
@@ -0,0 +1,72 @@
1
+ #include <audioapi/HostObjects/effects/WaveShaperNodeHostObject.h>
2
+ #include <audioapi/core/effects/WaveShaperNode.h>
3
+ #include <audioapi/jsi/AudioArrayBuffer.h>
4
+
5
+ #include <memory>
6
+ #include <string>
7
+
8
+ namespace audioapi {
9
+
10
+ WaveShaperNodeHostObject::WaveShaperNodeHostObject(const std::shared_ptr<WaveShaperNode> &node)
11
+ : AudioNodeHostObject(node) {
12
+ addGetters(
13
+ JSI_EXPORT_PROPERTY_GETTER(WaveShaperNodeHostObject, oversample),
14
+ JSI_EXPORT_PROPERTY_GETTER(WaveShaperNodeHostObject, curve));
15
+
16
+ addSetters(JSI_EXPORT_PROPERTY_SETTER(WaveShaperNodeHostObject, oversample));
17
+ addFunctions(JSI_EXPORT_FUNCTION(WaveShaperNodeHostObject, setCurve));
18
+ }
19
+
20
+ JSI_PROPERTY_GETTER_IMPL(WaveShaperNodeHostObject, oversample) {
21
+ auto waveShaperNode = std::static_pointer_cast<WaveShaperNode>(node_);
22
+ return jsi::String::createFromUtf8(runtime, waveShaperNode->getOversample());
23
+ }
24
+
25
+ JSI_PROPERTY_GETTER_IMPL(WaveShaperNodeHostObject, curve) {
26
+ auto waveShaperNode = std::static_pointer_cast<WaveShaperNode>(node_);
27
+ auto curve = waveShaperNode->getCurve();
28
+
29
+ if (curve == nullptr) {
30
+ return jsi::Value::null();
31
+ }
32
+
33
+ // copy AudioArray holding curve data to avoid subsequent modifications
34
+ auto audioArray = std::make_shared<AudioArray>(*curve);
35
+ auto audioArrayBuffer = std::make_shared<AudioArrayBuffer>(audioArray);
36
+ auto arrayBuffer = jsi::ArrayBuffer(runtime, audioArrayBuffer);
37
+
38
+ auto float32ArrayCtor = runtime.global().getPropertyAsFunction(runtime, "Float32Array");
39
+ auto float32Array = float32ArrayCtor.callAsConstructor(runtime, arrayBuffer).getObject(runtime);
40
+ float32Array.setExternalMemoryPressure(runtime, audioArrayBuffer->size());
41
+
42
+ return float32Array;
43
+ }
44
+
45
+ JSI_PROPERTY_SETTER_IMPL(WaveShaperNodeHostObject, oversample) {
46
+ auto waveShaperNode = std::static_pointer_cast<WaveShaperNode>(node_);
47
+ std::string type = value.asString(runtime).utf8(runtime);
48
+ waveShaperNode->setOversample(type);
49
+ }
50
+
51
+ JSI_HOST_FUNCTION_IMPL(WaveShaperNodeHostObject, setCurve) {
52
+ auto waveShaperNode = std::static_pointer_cast<WaveShaperNode>(node_);
53
+
54
+ if (args[0].isNull()) {
55
+ waveShaperNode->setCurve(std::shared_ptr<AudioArray>(nullptr));
56
+ return jsi::Value::undefined();
57
+ }
58
+
59
+ auto arrayBuffer =
60
+ args[0].getObject(runtime).getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime);
61
+
62
+ auto curve = std::make_shared<AudioArray>(
63
+ reinterpret_cast<float *>(arrayBuffer.data(runtime)),
64
+ static_cast<size_t>(arrayBuffer.size(runtime) / sizeof(float)));
65
+
66
+ waveShaperNode->setCurve(curve);
67
+ thisValue.asObject(runtime).setExternalMemoryPressure(runtime, arrayBuffer.size(runtime));
68
+
69
+ return jsi::Value::undefined();
70
+ }
71
+
72
+ } // namespace audioapi
@@ -0,0 +1,23 @@
1
+ #pragma once
2
+
3
+ #include <audioapi/HostObjects/AudioNodeHostObject.h>
4
+
5
+ #include <memory>
6
+ #include <vector>
7
+
8
+ namespace audioapi {
9
+ using namespace facebook;
10
+
11
+ class WaveShaperNode;
12
+
13
+ class WaveShaperNodeHostObject : public AudioNodeHostObject {
14
+ public:
15
+ explicit WaveShaperNodeHostObject(const std::shared_ptr<WaveShaperNode> &node);
16
+
17
+ JSI_PROPERTY_GETTER_DECL(oversample);
18
+ JSI_PROPERTY_GETTER_DECL(curve);
19
+
20
+ JSI_PROPERTY_SETTER_DECL(oversample);
21
+ JSI_HOST_FUNCTION_DECL(setCurve);
22
+ };
23
+ } // namespace audioapi
@@ -15,17 +15,8 @@ AudioContext::AudioContext(
15
15
  float sampleRate,
16
16
  const std::shared_ptr<IAudioEventHandlerRegistry> &audioEventHandlerRegistry,
17
17
  const RuntimeRegistry &runtimeRegistry)
18
- : BaseAudioContext(audioEventHandlerRegistry, runtimeRegistry) {
19
- #ifdef ANDROID
20
- audioPlayer_ = std::make_shared<AudioPlayer>(
21
- this->renderAudio(), sampleRate, destination_->getChannelCount());
22
- #else
23
- audioPlayer_ = std::make_shared<IOSAudioPlayer>(
24
- this->renderAudio(), sampleRate, destination_->getChannelCount());
25
- #endif
26
-
18
+ : BaseAudioContext(audioEventHandlerRegistry, runtimeRegistry), isInitialized_(false) {
27
19
  sampleRate_ = sampleRate;
28
- playerHasBeenStarted_ = false;
29
20
  state_ = ContextState::SUSPENDED;
30
21
  }
31
22
 
@@ -35,6 +26,17 @@ AudioContext::~AudioContext() {
35
26
  }
36
27
  }
37
28
 
29
+ void AudioContext::initialize() {
30
+ BaseAudioContext::initialize();
31
+ #ifdef ANDROID
32
+ audioPlayer_ = std::make_shared<AudioPlayer>(
33
+ this->renderAudio(), sampleRate_, destination_->getChannelCount());
34
+ #else
35
+ audioPlayer_ = std::make_shared<IOSAudioPlayer>(
36
+ this->renderAudio(), sampleRate_, destination_->getChannelCount());
37
+ #endif
38
+ }
39
+
38
40
  void AudioContext::close() {
39
41
  state_ = ContextState::CLOSED;
40
42
 
@@ -52,7 +54,7 @@ bool AudioContext::resume() {
52
54
  return true;
53
55
  }
54
56
 
55
- if (playerHasBeenStarted_ && audioPlayer_->resume()) {
57
+ if (isInitialized_ && audioPlayer_->resume()) {
56
58
  state_ = ContextState::RUNNING;
57
59
  return true;
58
60
  }
@@ -80,8 +82,8 @@ bool AudioContext::start() {
80
82
  return false;
81
83
  }
82
84
 
83
- if (!playerHasBeenStarted_ && audioPlayer_->start()) {
84
- playerHasBeenStarted_ = true;
85
+ if (!isInitialized_ && audioPlayer_->start()) {
86
+ isInitialized_ = true;
85
87
  state_ = ContextState::RUNNING;
86
88
 
87
89
  return true;
@@ -25,6 +25,7 @@ class AudioContext : public BaseAudioContext {
25
25
  bool resume();
26
26
  bool suspend();
27
27
  bool start();
28
+ void initialize() override;
28
29
 
29
30
  private:
30
31
  #ifdef ANDROID
@@ -32,7 +33,7 @@ class AudioContext : public BaseAudioContext {
32
33
  #else
33
34
  std::shared_ptr<IOSAudioPlayer> audioPlayer_;
34
35
  #endif
35
- bool playerHasBeenStarted_;
36
+ bool isInitialized_;
36
37
 
37
38
  bool isDriverRunning() const override;
38
39
 
@@ -10,10 +10,13 @@
10
10
 
11
11
  namespace audioapi {
12
12
 
13
- AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
14
- audioBus_ =
15
- std::make_shared<AudioBus>(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
16
- }
13
+ AudioNode::AudioNode(std::shared_ptr<BaseAudioContext> context)
14
+ : context_(context),
15
+ audioBus_(
16
+ std::make_shared<AudioBus>(
17
+ RENDER_QUANTUM_SIZE,
18
+ channelCount_,
19
+ context->getSampleRate())) {}
17
20
 
18
21
  AudioNode::~AudioNode() {
19
22
  if (isInitialized_) {
@@ -42,28 +45,38 @@ std::string AudioNode::getChannelInterpretation() const {
42
45
  }
43
46
 
44
47
  void AudioNode::connect(const std::shared_ptr<AudioNode> &node) {
45
- context_->getNodeManager()->addPendingNodeConnection(
46
- shared_from_this(), node, AudioNodeManager::ConnectionType::CONNECT);
48
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
49
+ context->getNodeManager()->addPendingNodeConnection(
50
+ shared_from_this(), node, AudioNodeManager::ConnectionType::CONNECT);
51
+ }
47
52
  }
48
53
 
49
54
  void AudioNode::connect(const std::shared_ptr<AudioParam> &param) {
50
- context_->getNodeManager()->addPendingParamConnection(
51
- shared_from_this(), param, AudioNodeManager::ConnectionType::CONNECT);
55
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
56
+ context->getNodeManager()->addPendingParamConnection(
57
+ shared_from_this(), param, AudioNodeManager::ConnectionType::CONNECT);
58
+ }
52
59
  }
53
60
 
54
61
  void AudioNode::disconnect() {
55
- context_->getNodeManager()->addPendingNodeConnection(
56
- shared_from_this(), nullptr, AudioNodeManager::ConnectionType::DISCONNECT_ALL);
62
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
63
+ context->getNodeManager()->addPendingNodeConnection(
64
+ shared_from_this(), nullptr, AudioNodeManager::ConnectionType::DISCONNECT_ALL);
65
+ }
57
66
  }
58
67
 
59
68
  void AudioNode::disconnect(const std::shared_ptr<AudioNode> &node) {
60
- context_->getNodeManager()->addPendingNodeConnection(
61
- shared_from_this(), node, AudioNodeManager::ConnectionType::DISCONNECT);
69
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
70
+ context->getNodeManager()->addPendingNodeConnection(
71
+ shared_from_this(), node, AudioNodeManager::ConnectionType::DISCONNECT);
72
+ }
62
73
  }
63
74
 
64
75
  void AudioNode::disconnect(const std::shared_ptr<AudioParam> &param) {
65
- context_->getNodeManager()->addPendingParamConnection(
66
- shared_from_this(), param, AudioNodeManager::ConnectionType::DISCONNECT);
76
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
77
+ context->getNodeManager()->addPendingParamConnection(
78
+ shared_from_this(), param, AudioNodeManager::ConnectionType::DISCONNECT);
79
+ }
67
80
  }
68
81
 
69
82
  bool AudioNode::isEnabled() const {
@@ -147,23 +160,25 @@ std::shared_ptr<AudioBus> AudioNode::processAudio(
147
160
 
148
161
  // Finally, process the node itself.
149
162
  return processNode(processingBus, framesToProcess);
150
- ;
151
163
  }
152
164
 
153
165
  bool AudioNode::isAlreadyProcessed() {
154
- assert(context_ != nullptr);
166
+ if (std::shared_ptr<BaseAudioContext> context = context_.lock()) {
167
+ std::size_t currentSampleFrame = context->getCurrentSampleFrame();
168
+
169
+ // check if the node has already been processed for this rendering quantum
170
+ if (currentSampleFrame == lastRenderedFrame_) {
171
+ return true;
172
+ }
155
173
 
156
- std::size_t currentSampleFrame = context_->getCurrentSampleFrame();
174
+ // Update the last rendered frame before processing node and its inputs.
175
+ lastRenderedFrame_ = currentSampleFrame;
157
176
 
158
- // check if the node has already been processed for this rendering quantum
159
- if (currentSampleFrame == lastRenderedFrame_) {
160
- return true;
177
+ return false;
161
178
  }
162
179
 
163
- // Update the last rendered frame before processing node and its inputs.
164
- lastRenderedFrame_ = currentSampleFrame;
165
-
166
- return false;
180
+ // If context is invalid, consider it as already processed to avoid processing
181
+ return true;
167
182
  }
168
183
 
169
184
  std::shared_ptr<AudioBus> AudioNode::processInputs(
@@ -19,7 +19,7 @@ class AudioParam;
19
19
 
20
20
  class AudioNode : public std::enable_shared_from_this<AudioNode> {
21
21
  public:
22
- explicit AudioNode(BaseAudioContext *context);
22
+ explicit AudioNode(std::shared_ptr<BaseAudioContext> context);
23
23
  virtual ~AudioNode();
24
24
 
25
25
  int getNumberOfInputs() const;
@@ -47,13 +47,13 @@ class AudioNode : public std::enable_shared_from_this<AudioNode> {
47
47
  friend class AudioDestinationNode;
48
48
  friend class ConvolverNode;
49
49
  friend class DelayNodeHostObject;
50
+ int channelCount_ = 2;
50
51
 
51
- BaseAudioContext *context_;
52
+ std::weak_ptr<BaseAudioContext> context_;
52
53
  std::shared_ptr<AudioBus> audioBus_;
53
54
 
54
55
  int numberOfInputs_ = 1;
55
56
  int numberOfOutputs_ = 1;
56
- int channelCount_ = 2;
57
57
  ChannelCountMode channelCountMode_ = ChannelCountMode::MAX;
58
58
  ChannelInterpretation channelInterpretation_ =
59
59