react-native-audio-api 0.4.8-rc2 → 0.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. package/android/CMakeLists.txt +2 -5
  2. package/android/build.gradle +25 -2
  3. package/android/src/main/cpp/core/AudioDecoder.cpp +10 -1
  4. package/android/src/main/cpp/core/AudioPlayer.cpp +6 -3
  5. package/android/src/main/cpp/libs/pffft.c +1906 -0
  6. package/android/src/main/cpp/libs/pffft.h +198 -0
  7. package/common/cpp/core/AnalyserNode.cpp +11 -10
  8. package/common/cpp/core/AnalyserNode.h +2 -0
  9. package/common/cpp/core/AudioBuffer.cpp +1 -1
  10. package/common/cpp/core/AudioBufferSourceNode.cpp +26 -16
  11. package/common/cpp/core/AudioBus.cpp +105 -13
  12. package/common/cpp/core/AudioBus.h +6 -4
  13. package/common/cpp/core/AudioContext.cpp +4 -3
  14. package/common/cpp/core/AudioContext.h +4 -4
  15. package/common/cpp/core/AudioDestinationNode.cpp +2 -3
  16. package/common/cpp/core/AudioNode.cpp +78 -58
  17. package/common/cpp/core/AudioNode.h +10 -1
  18. package/common/cpp/core/AudioNodeManager.cpp +13 -1
  19. package/common/cpp/core/AudioNodeManager.h +2 -0
  20. package/common/cpp/core/AudioScheduledSourceNode.cpp +5 -1
  21. package/common/cpp/core/BaseAudioContext.cpp +4 -1
  22. package/common/cpp/core/BaseAudioContext.h +4 -2
  23. package/common/cpp/core/PeriodicWave.cpp +9 -3
  24. package/common/cpp/core/StereoPannerNode.cpp +9 -12
  25. package/common/cpp/utils/FFTFrame.cpp +44 -37
  26. package/common/cpp/utils/FFTFrame.h +5 -14
  27. package/ios/core/AudioDecoder.mm +10 -1
  28. package/ios/core/AudioPlayer.m +23 -23
  29. package/ios/core/IOSAudioPlayer.mm +3 -3
  30. package/lib/module/core/AudioBufferSourceNode.js +2 -2
  31. package/lib/module/core/AudioBufferSourceNode.js.map +1 -1
  32. package/lib/module/index.js +19 -335
  33. package/lib/module/index.js.map +1 -1
  34. package/lib/module/index.web.js +18 -0
  35. package/lib/module/index.web.js.map +1 -0
  36. package/lib/module/types.js.map +1 -0
  37. package/lib/module/web-core/AnalyserNode.js +48 -0
  38. package/lib/module/web-core/AnalyserNode.js.map +1 -0
  39. package/lib/module/web-core/AudioBuffer.js +43 -0
  40. package/lib/module/web-core/AudioBuffer.js.map +1 -0
  41. package/lib/module/web-core/AudioBufferSourceNode.js +62 -0
  42. package/lib/module/web-core/AudioBufferSourceNode.js.map +1 -0
  43. package/lib/module/web-core/AudioContext.js +69 -0
  44. package/lib/module/web-core/AudioContext.js.map +1 -0
  45. package/lib/module/web-core/AudioDestinationNode.js +5 -0
  46. package/lib/module/web-core/AudioDestinationNode.js.map +1 -0
  47. package/lib/module/web-core/AudioNode.js +27 -0
  48. package/lib/module/web-core/AudioNode.js.map +1 -0
  49. package/lib/module/web-core/AudioParam.js +60 -0
  50. package/lib/module/web-core/AudioParam.js.map +1 -0
  51. package/lib/module/web-core/AudioScheduledSourceNode.js +27 -0
  52. package/lib/module/web-core/AudioScheduledSourceNode.js.map +1 -0
  53. package/lib/module/web-core/BaseAudioContext.js +2 -0
  54. package/lib/module/{core/types.js.map → web-core/BaseAudioContext.js.map} +1 -1
  55. package/lib/module/web-core/BiquadFilterNode.js +35 -0
  56. package/lib/module/web-core/BiquadFilterNode.js.map +1 -0
  57. package/lib/module/web-core/GainNode.js +11 -0
  58. package/lib/module/web-core/GainNode.js.map +1 -0
  59. package/lib/module/web-core/OscillatorNode.js +25 -0
  60. package/lib/module/web-core/OscillatorNode.js.map +1 -0
  61. package/lib/module/web-core/PeriodicWave.js +10 -0
  62. package/lib/module/web-core/PeriodicWave.js.map +1 -0
  63. package/lib/module/web-core/StereoPannerNode.js +11 -0
  64. package/lib/module/web-core/StereoPannerNode.js.map +1 -0
  65. package/lib/typescript/core/AnalyserNode.d.ts +1 -1
  66. package/lib/typescript/core/AnalyserNode.d.ts.map +1 -1
  67. package/lib/typescript/core/AudioNode.d.ts +1 -1
  68. package/lib/typescript/core/AudioNode.d.ts.map +1 -1
  69. package/lib/typescript/core/BaseAudioContext.d.ts +1 -1
  70. package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
  71. package/lib/typescript/core/BiquadFilterNode.d.ts +1 -1
  72. package/lib/typescript/core/BiquadFilterNode.d.ts.map +1 -1
  73. package/lib/typescript/core/OscillatorNode.d.ts +1 -1
  74. package/lib/typescript/core/OscillatorNode.d.ts.map +1 -1
  75. package/lib/typescript/index.d.ts +15 -126
  76. package/lib/typescript/index.d.ts.map +1 -1
  77. package/lib/typescript/index.web.d.ts +16 -0
  78. package/lib/typescript/index.web.d.ts.map +1 -0
  79. package/lib/typescript/interfaces.d.ts +1 -1
  80. package/lib/typescript/interfaces.d.ts.map +1 -1
  81. package/lib/typescript/types.d.ts.map +1 -0
  82. package/lib/typescript/web-core/AnalyserNode.d.ts +18 -0
  83. package/lib/typescript/web-core/AnalyserNode.d.ts.map +1 -0
  84. package/lib/typescript/web-core/AudioBuffer.d.ts +13 -0
  85. package/lib/typescript/web-core/AudioBuffer.d.ts.map +1 -0
  86. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts +19 -0
  87. package/lib/typescript/web-core/AudioBufferSourceNode.d.ts.map +1 -0
  88. package/lib/typescript/web-core/AudioContext.d.ts +30 -0
  89. package/lib/typescript/web-core/AudioContext.d.ts.map +1 -0
  90. package/lib/typescript/web-core/AudioDestinationNode.d.ts +4 -0
  91. package/lib/typescript/web-core/AudioDestinationNode.d.ts.map +1 -0
  92. package/lib/typescript/web-core/AudioNode.d.ts +15 -0
  93. package/lib/typescript/web-core/AudioNode.d.ts.map +1 -0
  94. package/lib/typescript/web-core/AudioParam.d.ts +17 -0
  95. package/lib/typescript/web-core/AudioParam.d.ts.map +1 -0
  96. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts +7 -0
  97. package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts.map +1 -0
  98. package/lib/typescript/web-core/BaseAudioContext.d.ts +27 -0
  99. package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -0
  100. package/lib/typescript/web-core/BiquadFilterNode.d.ts +15 -0
  101. package/lib/typescript/web-core/BiquadFilterNode.d.ts.map +1 -0
  102. package/lib/typescript/web-core/GainNode.d.ts +8 -0
  103. package/lib/typescript/web-core/GainNode.d.ts.map +1 -0
  104. package/lib/typescript/web-core/OscillatorNode.d.ts +14 -0
  105. package/lib/typescript/web-core/OscillatorNode.d.ts.map +1 -0
  106. package/lib/typescript/web-core/PeriodicWave.d.ts +6 -0
  107. package/lib/typescript/web-core/PeriodicWave.d.ts.map +1 -0
  108. package/lib/typescript/web-core/StereoPannerNode.d.ts +8 -0
  109. package/lib/typescript/web-core/StereoPannerNode.d.ts.map +1 -0
  110. package/package.json +1 -1
  111. package/src/core/AnalyserNode.ts +1 -1
  112. package/src/core/AudioBufferSourceNode.ts +2 -2
  113. package/src/core/AudioNode.ts +1 -1
  114. package/src/core/BaseAudioContext.ts +1 -1
  115. package/src/core/BiquadFilterNode.ts +1 -1
  116. package/src/core/OscillatorNode.ts +1 -1
  117. package/src/index.ts +30 -568
  118. package/src/index.web.ts +30 -0
  119. package/src/interfaces.ts +1 -1
  120. package/src/web-core/AnalyserNode.tsx +69 -0
  121. package/src/web-core/AudioBuffer.tsx +79 -0
  122. package/src/web-core/AudioBufferSourceNode.tsx +94 -0
  123. package/src/web-core/AudioContext.tsx +114 -0
  124. package/src/web-core/AudioDestinationNode.tsx +3 -0
  125. package/src/web-core/AudioNode.tsx +40 -0
  126. package/src/web-core/AudioParam.tsx +106 -0
  127. package/src/web-core/AudioScheduledSourceNode.tsx +37 -0
  128. package/src/web-core/BaseAudioContext.tsx +37 -0
  129. package/src/web-core/BiquadFilterNode.tsx +62 -0
  130. package/src/web-core/GainNode.tsx +12 -0
  131. package/src/web-core/OscillatorNode.tsx +36 -0
  132. package/src/web-core/PeriodicWave.tsx +8 -0
  133. package/src/web-core/StereoPannerNode.tsx +12 -0
  134. package/android/libs/arm64-v8a/libfftw3.a +0 -0
  135. package/android/libs/armeabi-v7a/libfftw3.a +0 -0
  136. package/android/libs/include/fftw3.h +0 -413
  137. package/android/libs/x86/libfftw3.a +0 -0
  138. package/android/libs/x86_64/libfftw3.a +0 -0
  139. package/lib/module/index.native.js +0 -21
  140. package/lib/module/index.native.js.map +0 -1
  141. package/lib/typescript/core/types.d.ts.map +0 -1
  142. package/lib/typescript/index.native.d.ts +0 -15
  143. package/lib/typescript/index.native.d.ts.map +0 -1
  144. package/src/index.native.ts +0 -27
  145. /package/lib/module/{core/types.js → types.js} +0 -0
  146. /package/lib/typescript/{core/types.d.ts → types.d.ts} +0 -0
  147. /package/src/{core/types.ts → types.ts} +0 -0
@@ -0,0 +1,198 @@
1
+ /* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com )
2
+
3
+ Based on original fortran 77 code from FFTPACKv4 from NETLIB,
4
+ authored by Dr Paul Swarztrauber of NCAR, in 1985.
5
+
6
+ As confirmed by the NCAR fftpack software curators, the following
7
+ FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
8
+ released under the same terms.
9
+
10
+ FFTPACK license:
11
+
12
+ http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
13
+
14
+ Copyright (c) 2004 the University Corporation for Atmospheric
15
+ Research ("UCAR"). All rights reserved. Developed by NCAR's
16
+ Computational and Information Systems Laboratory, UCAR,
17
+ www.cisl.ucar.edu.
18
+
19
+ Redistribution and use of the Software in source and binary forms,
20
+ with or without modification, is permitted provided that the
21
+ following conditions are met:
22
+
23
+ - Neither the names of NCAR's Computational and Information Systems
24
+ Laboratory, the University Corporation for Atmospheric Research,
25
+ nor the names of its sponsors or contributors may be used to
26
+ endorse or promote products derived from this Software without
27
+ specific prior written permission.
28
+
29
+ - Redistributions of source code must retain the above copyright
30
+ notices, this list of conditions, and the disclaimer below.
31
+
32
+ - Redistributions in binary form must reproduce the above copyright
33
+ notice, this list of conditions, and the disclaimer below in the
34
+ documentation and/or other materials provided with the
35
+ distribution.
36
+
37
+ THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
38
+ EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
39
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
40
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
41
+ HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
42
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
43
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
44
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
45
+ SOFTWARE.
46
+ */
47
+
48
+ /*
49
+ PFFFT : a Pretty Fast FFT.
50
+
51
+ This is basically an adaptation of the single precision fftpack
52
+ (v4) as found on netlib taking advantage of SIMD instruction found
53
+ on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON).
54
+
55
+ For architectures where no SIMD instruction is available, the code
56
+ falls back to a scalar version.
57
+
58
+ Restrictions:
59
+
60
+ - 1D transforms only, with 32-bit single precision.
61
+
62
+ - supports only transforms for inputs of length N of the form
63
+ N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128,
64
+ 144, 160, etc are all acceptable lengths). Performance is best for
65
+ 128<=N<=8192.
66
+
67
+ - all (float*) pointers in the functions below are expected to
68
+ have an "simd-compatible" alignment, that is 16 bytes on x86 and
69
+ powerpc CPUs.
70
+
71
+ You can allocate such buffers with the functions
72
+ pffft_aligned_malloc / pffft_aligned_free (or with stuff like
73
+ posix_memalign..)
74
+
75
+ */
76
+
77
+ #pragma once
78
+
79
+ #include <stddef.h> // for size_t
80
+
81
+ #ifdef __cplusplus
82
+ extern "C" {
83
+ #endif
84
+
85
+ /**
86
+ Opaque struct holding internal stuff (precomputed twiddle factors)
87
+ this struct can be shared by many threads as it contains only
88
+ read-only data.
89
+ */
90
+ typedef struct PFFFT_Setup PFFFT_Setup;
91
+
92
+ /** Direction of the transform */
93
+ typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t;
94
+
95
+ /** Type of transform */
96
+ typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t;
97
+
98
+ /**
99
+ Prepare for performing transforms of size N -- the returned
100
+ PFFFT_Setup structure is read-only so it can safely be shared by
101
+ multiple concurrent threads.
102
+
103
+ Will return NULL if N is not suitable (too large / no decomposable with simple
104
+ integer factors..)
105
+ */
106
+ PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform);
107
+ void pffft_destroy_setup(PFFFT_Setup *);
108
+ /**
109
+ Perform a Fourier transform , The z-domain data is stored in the
110
+ most efficient order for transforming it back, or using it for
111
+ convolution. If you need to have its content sorted in the
112
+ "usual" way, that is as an array of interleaved complex numbers,
113
+ either use pffft_transform_ordered , or call pffft_zreorder after
114
+ the forward fft, and before the backward fft.
115
+
116
+ Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x.
117
+ Typically you will want to scale the backward transform by 1/N.
118
+
119
+ The 'work' pointer should point to an area of N (2*N for complex
120
+ fft) floats, properly aligned. If 'work' is NULL, then stack will
121
+ be used instead (this is probably the best strategy for small
122
+ FFTs, say for N < 16384).
123
+
124
+ input and output may alias.
125
+ */
126
+ void pffft_transform(
127
+ PFFFT_Setup *setup,
128
+ const float *input,
129
+ float *output,
130
+ float *work,
131
+ pffft_direction_t direction);
132
+
133
+ /**
134
+ Similar to pffft_transform, but makes sure that the output is
135
+ ordered as expected (interleaved complex numbers). This is
136
+ similar to calling pffft_transform and then pffft_zreorder.
137
+
138
+ input and output may alias.
139
+ */
140
+ void pffft_transform_ordered(
141
+ PFFFT_Setup *setup,
142
+ const float *input,
143
+ float *output,
144
+ float *work,
145
+ pffft_direction_t direction);
146
+
147
+ /**
148
+ call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(...,
149
+ PFFFT_FORWARD) if you want to have the frequency components in
150
+ the correct "canonical" order, as interleaved complex numbers.
151
+
152
+ (for real transforms, both 0-frequency and half frequency
153
+ components, which are real, are assembled in the first entry as
154
+ F(0)+i*F(n/2+1). Note that the original fftpack did place
155
+ F(n/2+1) at the end of the arrays).
156
+
157
+ input and output should not alias.
158
+ */
159
+ void pffft_zreorder(
160
+ PFFFT_Setup *setup,
161
+ const float *input,
162
+ float *output,
163
+ pffft_direction_t direction);
164
+
165
+ /**
166
+ Perform a multiplication of the frequency components of dft_a and
167
+ dft_b and accumulate them into dft_ab. The arrays should have
168
+ been obtained with pffft_transform(.., PFFFT_FORWARD) and should
169
+ *not* have been reordered with pffft_zreorder (otherwise just
170
+ perform the operation yourself as the dft coefs are stored as
171
+ interleaved complex numbers).
172
+
173
+ the operation performed is: dft_ab += (dft_a * fdt_b)*scaling
174
+
175
+ The dft_a, dft_b and dft_ab pointers may alias.
176
+ */
177
+ void pffft_zconvolve_accumulate(
178
+ PFFFT_Setup *setup,
179
+ const float *dft_a,
180
+ const float *dft_b,
181
+ float *dft_ab,
182
+ float scaling);
183
+
184
+ /**
185
+ the float buffers must have the correct alignment (16-byte boundary
186
+ on intel and powerpc). This function may be used to obtain such
187
+ correctly aligned buffers.
188
+ */
189
+ void *pffft_aligned_malloc(size_t nb_bytes);
190
+ void pffft_aligned_free(void *);
191
+
192
+ /** return 4 or 1 wether support SSE/Altivec instructions was enable when
193
+ * building pffft.c */
194
+ int pffft_simd_size(void);
195
+
196
+ #ifdef __cplusplus
197
+ }
198
+ #endif
@@ -21,9 +21,11 @@ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
21
21
  inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
22
22
  magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
23
23
  downMixBus_ = std::make_unique<AudioBus>(
24
- context_->getSampleRate(), RENDER_QUANTUM_SIZE, 1);
24
+ RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
25
25
 
26
26
  fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
27
+ realData_ = std::make_shared<AudioArray>(fftSize_);
28
+ imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
27
29
 
28
30
  isInitialized_ = true;
29
31
  }
@@ -59,6 +61,8 @@ void AnalyserNode::setFftSize(int fftSize) {
59
61
 
60
62
  fftSize_ = fftSize;
61
63
  fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
64
+ realData_ = std::make_shared<AudioArray>(fftSize_);
65
+ imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
62
66
  magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
63
67
  }
64
68
 
@@ -145,14 +149,10 @@ void AnalyserNode::getByteTimeDomainData(uint8_t *data, int length) {
145
149
  void AnalyserNode::processNode(
146
150
  audioapi::AudioBus *processingBus,
147
151
  int framesToProcess) {
148
- if (!isInitialized_) {
149
- processingBus->zero();
150
- return;
151
- }
152
-
153
152
  // Analyser should behave like a sniffer node, it should not modify the
154
153
  // processingBus but instead copy the data to its own input buffer.
155
154
 
155
+ // Down mix the input bus to mono
156
156
  downMixBus_->copy(processingBus);
157
157
 
158
158
  if (vWriteIndex_ + framesToProcess > inputBuffer_->getSize()) {
@@ -215,11 +215,12 @@ void AnalyserNode::doFFTAnalysis() {
215
215
  break;
216
216
  }
217
217
 
218
- // do fft analysis - get frequency domain data
219
- fftFrame_->doFFT(tempBuffer.getData());
218
+ auto *realFFTFrameData = realData_->getData();
219
+ auto *imaginaryFFTFrameData = imaginaryData_->getData();
220
220
 
221
- auto *realFFTFrameData = fftFrame_->getRealData();
222
- auto *imaginaryFFTFrameData = fftFrame_->getImaginaryData();
221
+ // do fft analysis - get frequency domain data
222
+ fftFrame_->doFFT(
223
+ tempBuffer.getData(), realFFTFrameData, imaginaryFFTFrameData);
223
224
 
224
225
  // Zero out nquist component
225
226
  imaginaryFFTFrameData[0] = 0.0f;
@@ -50,6 +50,8 @@ class AnalyserNode : public AudioNode {
50
50
  int vWriteIndex_;
51
51
 
52
52
  std::unique_ptr<FFTFrame> fftFrame_;
53
+ std::shared_ptr<AudioArray> realData_;
54
+ std::shared_ptr<AudioArray> imaginaryData_;
53
55
  std::unique_ptr<AudioArray> magnitudeBuffer_;
54
56
  bool shouldDoFFTAnalysis_ { true };
55
57
 
@@ -8,7 +8,7 @@ AudioBuffer::AudioBuffer(
8
8
  int numberOfChannels,
9
9
  size_t length,
10
10
  float sampleRate) {
11
- bus_ = std::make_shared<AudioBus>(sampleRate, length, numberOfChannels);
11
+ bus_ = std::make_shared<AudioBus>(length, numberOfChannels, sampleRate);
12
12
  }
13
13
 
14
14
  AudioBuffer::AudioBuffer(AudioBus *bus) {
@@ -1,4 +1,5 @@
1
1
  #include <algorithm>
2
+ #include <cassert>
2
3
 
3
4
  #include "AudioArray.h"
4
5
  #include "AudioBufferSourceNode.h"
@@ -17,7 +18,8 @@ AudioBufferSourceNode::AudioBufferSourceNode(BaseAudioContext *context)
17
18
  loopEnd_(0),
18
19
  vReadIndex_(0.0) {
19
20
  buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
20
- alignedBus_ = std::shared_ptr<AudioBus>(nullptr);
21
+ alignedBus_ = std::make_shared<AudioBus>(
22
+ RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
21
23
 
22
24
  detuneParam_ = std::make_shared<AudioParam>(0.0, MIN_DETUNE, MAX_DETUNE);
23
25
  playbackRateParam_ = std::make_shared<AudioParam>(
@@ -67,26 +69,33 @@ void AudioBufferSourceNode::setBuffer(
67
69
  const std::shared_ptr<AudioBuffer> &buffer) {
68
70
  if (!buffer) {
69
71
  buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
70
- alignedBus_ = std::shared_ptr<AudioBus>(nullptr);
72
+ alignedBus_ = std::make_shared<AudioBus>(
73
+ RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
71
74
  loopEnd_ = 0;
72
75
  return;
73
76
  }
74
77
 
75
78
  buffer_ = buffer;
76
- alignedBus_ = std::make_shared<AudioBus>(
77
- context_->getSampleRate(),
78
- buffer_->getLength(),
79
- buffer_->getNumberOfChannels());
79
+ channelCount_ = buffer_->getNumberOfChannels();
80
80
 
81
+ alignedBus_ = std::make_shared<AudioBus>(
82
+ buffer_->getLength(), channelCount_, context_->getSampleRate());
81
83
  alignedBus_->zero();
82
84
  alignedBus_->sum(buffer_->bus_.get());
83
85
 
86
+ audioBus_ = std::make_shared<AudioBus>(
87
+ RENDER_QUANTUM_SIZE, channelCount_, context_->getSampleRate());
88
+
84
89
  loopEnd_ = buffer_->getDuration();
85
90
  }
86
91
 
87
92
  void AudioBufferSourceNode::start(double when, double offset, double duration) {
88
93
  AudioScheduledSourceNode::start(when);
89
94
 
95
+ if (duration > 0) {
96
+ AudioScheduledSourceNode::stop(when + duration);
97
+ }
98
+
90
99
  if (!buffer_) {
91
100
  return;
92
101
  }
@@ -98,29 +107,30 @@ void AudioBufferSourceNode::start(double when, double offset, double duration) {
98
107
  }
99
108
 
100
109
  vReadIndex_ = static_cast<double>(buffer_->getSampleRate() * offset);
101
-
102
- if (duration > 0) {
103
- AudioScheduledSourceNode::stop(when + duration);
104
- }
105
110
  }
106
111
 
107
112
  void AudioBufferSourceNode::processNode(
108
113
  AudioBus *processingBus,
109
114
  int framesToProcess) {
115
+ // No audio data to fill, zero the output and return.
116
+ if (!buffer_) {
117
+ processingBus->zero();
118
+ return;
119
+ }
120
+
110
121
  size_t startOffset = 0;
111
122
  size_t offsetLength = 0;
112
123
 
113
124
  updatePlaybackInfo(processingBus, framesToProcess, startOffset, offsetLength);
114
125
  float playbackRate = getPlaybackRateValue(startOffset);
115
126
 
116
- // No audio data to fill, zero the output and return.
117
- if (!isPlaying() || !alignedBus_ || alignedBus_->getSize() == 0 ||
118
- playbackRate == 0.0f) {
127
+ assert(alignedBus_ != nullptr);
128
+ assert(alignedBus_->getSize() > 0);
129
+
130
+ if (playbackRate == 0.0f || !isPlaying()) {
119
131
  processingBus->zero();
120
132
  return;
121
- }
122
-
123
- if (std::fabs(playbackRate) == 1.0) {
133
+ } else if (std::fabs(playbackRate) == 1.0) {
124
134
  processWithoutInterpolation(
125
135
  processingBus, startOffset, offsetLength, playbackRate);
126
136
  } else {
@@ -18,7 +18,7 @@ namespace audioapi {
18
18
  * Public interfaces - memory management
19
19
  */
20
20
 
21
- AudioBus::AudioBus(float sampleRate, size_t size, int numberOfChannels)
21
+ AudioBus::AudioBus(size_t size, int numberOfChannels, float sampleRate)
22
22
  : numberOfChannels_(numberOfChannels),
23
23
  sampleRate_(sampleRate),
24
24
  size_(size) {
@@ -161,19 +161,26 @@ float AudioBus::maxAbsValue() const {
161
161
  return maxAbsValue;
162
162
  }
163
163
 
164
- void AudioBus::sum(const AudioBus *source) {
165
- sum(source, 0, 0, getSize());
164
+ void AudioBus::sum(
165
+ const AudioBus *source,
166
+ ChannelInterpretation interpretation) {
167
+ sum(source, 0, 0, getSize(), interpretation);
166
168
  }
167
169
 
168
- void AudioBus::sum(const AudioBus *source, size_t start, size_t length) {
169
- sum(source, start, start, length);
170
+ void AudioBus::sum(
171
+ const AudioBus *source,
172
+ size_t start,
173
+ size_t length,
174
+ ChannelInterpretation interpretation) {
175
+ sum(source, start, start, length, interpretation);
170
176
  }
171
177
 
172
178
  void AudioBus::sum(
173
179
  const AudioBus *source,
174
180
  size_t sourceStart,
175
181
  size_t destinationStart,
176
- size_t length) {
182
+ size_t length,
183
+ ChannelInterpretation interpretation) {
177
184
  if (source == this) {
178
185
  return;
179
186
  }
@@ -181,9 +188,12 @@ void AudioBus::sum(
181
188
  int numberOfSourceChannels = source->getNumberOfChannels();
182
189
  int numberOfChannels = getNumberOfChannels();
183
190
 
184
- // TODO: consider adding ability to enforce discrete summing (if/when it will
185
- // be useful). Source channel count is smaller than current bus, we need to
186
- // up-mix.
191
+ if (interpretation == ChannelInterpretation::DISCRETE) {
192
+ discreteSum(source, sourceStart, destinationStart, length);
193
+ return;
194
+ }
195
+
196
+ // Source channel count is smaller than current bus, we need to up-mix.
187
197
  if (numberOfSourceChannels < numberOfChannels) {
188
198
  sumByUpMixing(source, sourceStart, destinationStart, length);
189
199
  return;
@@ -373,8 +383,9 @@ void AudioBus::sumByDownMixing(
373
383
  return;
374
384
  }
375
385
 
376
- // Stereo 4 to mono: output += 0.25 * (input.left + input.right +
377
- // input.surroundLeft + input.surroundRight)
386
+ // Stereo 4 to mono (4 -> 1):
387
+ // output += 0.25 * (input.left + input.right + input.surroundLeft +
388
+ // input.surroundRight)
378
389
  if (numberOfSourceChannels == 4 && numberOfChannels == 1) {
379
390
  float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
380
391
  float *sourceRight = source->getChannelByType(ChannelRight)->getData();
@@ -408,7 +419,88 @@ void AudioBus::sumByDownMixing(
408
419
  return;
409
420
  }
410
421
 
411
- // 5.1 to stereo:
422
+ // 5.1 to mono (6 -> 1):
423
+ // output += sqrt(1/2) * (input.left + input.right) + input.center + 0.5 *
424
+ // (input.surroundLeft + input.surroundRight)
425
+ if (numberOfSourceChannels == 6 && numberOfChannels == 1) {
426
+ float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
427
+ float *sourceRight = source->getChannelByType(ChannelRight)->getData();
428
+ float *sourceCenter = source->getChannelByType(ChannelCenter)->getData();
429
+ float *sourceSurroundLeft =
430
+ source->getChannelByType(ChannelSurroundLeft)->getData();
431
+ float *sourceSurroundRight =
432
+ source->getChannelByType(ChannelSurroundRight)->getData();
433
+
434
+ float *destinationData = getChannelByType(ChannelMono)->getData();
435
+
436
+ VectorMath::multiplyByScalarThenAddToOutput(
437
+ sourceLeft + sourceStart,
438
+ SQRT_HALF,
439
+ destinationData + destinationStart,
440
+ length);
441
+ VectorMath::multiplyByScalarThenAddToOutput(
442
+ sourceRight + sourceStart,
443
+ SQRT_HALF,
444
+ destinationData + destinationStart,
445
+ length);
446
+ VectorMath::add(
447
+ sourceCenter + sourceStart,
448
+ destinationData + destinationStart,
449
+ destinationData + destinationStart,
450
+ length);
451
+ VectorMath::multiplyByScalarThenAddToOutput(
452
+ sourceSurroundLeft + sourceStart,
453
+ 0.5f,
454
+ destinationData + destinationStart,
455
+ length);
456
+ VectorMath::multiplyByScalarThenAddToOutput(
457
+ sourceSurroundRight + sourceStart,
458
+ 0.5f,
459
+ destinationData + destinationStart,
460
+ length);
461
+
462
+ return;
463
+ }
464
+
465
+ // Stereo 4 to stereo 2 (4 -> 2):
466
+ // output.left += 0.5 * (input.left + input.surroundLeft)
467
+ // output.right += 0.5 * (input.right + input.surroundRight)
468
+ if (numberOfSourceChannels == 4 && numberOfChannels == 2) {
469
+ float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
470
+ float *sourceRight = source->getChannelByType(ChannelRight)->getData();
471
+ float *sourceSurroundLeft =
472
+ source->getChannelByType(ChannelSurroundLeft)->getData();
473
+ float *sourceSurroundRight =
474
+ source->getChannelByType(ChannelSurroundRight)->getData();
475
+
476
+ float *destinationLeft = getChannelByType(ChannelLeft)->getData();
477
+ float *destinationRight = getChannelByType(ChannelRight)->getData();
478
+
479
+ VectorMath::multiplyByScalarThenAddToOutput(
480
+ sourceLeft + sourceStart,
481
+ 0.5f,
482
+ destinationLeft + destinationStart,
483
+ length);
484
+ VectorMath::multiplyByScalarThenAddToOutput(
485
+ sourceSurroundLeft + sourceStart,
486
+ 0.5f,
487
+ destinationLeft + destinationStart,
488
+ length);
489
+
490
+ VectorMath::multiplyByScalarThenAddToOutput(
491
+ sourceRight + sourceStart,
492
+ 0.5f,
493
+ destinationRight + destinationStart,
494
+ length);
495
+ VectorMath::multiplyByScalarThenAddToOutput(
496
+ sourceSurroundRight + sourceStart,
497
+ 0.5f,
498
+ destinationRight + destinationStart,
499
+ length);
500
+ return;
501
+ }
502
+
503
+ // 5.1 to stereo (6 -> 2):
412
504
  // output.left += input.left + sqrt(1/2) * (input.center + input.surroundLeft)
413
505
  // output.right += input.right + sqrt(1/2) * (input.center +
414
506
  // input.surroundRight)
@@ -458,7 +550,7 @@ void AudioBus::sumByDownMixing(
458
550
  return;
459
551
  }
460
552
 
461
- // 5.1 to stereo 4:
553
+ // 5.1 to stereo 4 (6 -> 4):
462
554
  // output.left += input.left + sqrt(1/2) * input.center
463
555
  // output.right += input.right + sqrt(1/2) * input.center
464
556
  // output.surroundLeft += input.surroundLeft
@@ -5,6 +5,8 @@
5
5
  #include <vector>
6
6
  #include <cstddef>
7
7
 
8
+ #include "ChannelInterpretation.h"
9
+
8
10
  namespace audioapi {
9
11
 
10
12
  class BaseAudioContext;
@@ -22,7 +24,7 @@ class AudioBus {
22
24
  ChannelSurroundRight = 5,
23
25
  };
24
26
 
25
- explicit AudioBus(float sampleRate, size_t size, int numberOfChannels);
27
+ explicit AudioBus(size_t size, int numberOfChannels, float sampleRate);
26
28
 
27
29
  ~AudioBus();
28
30
 
@@ -39,13 +41,13 @@ class AudioBus {
39
41
  void zero();
40
42
  void zero(size_t start, size_t length);
41
43
 
42
- void sum(const AudioBus *source);
43
- void sum(const AudioBus *source, size_t start, size_t length);
44
+ void sum(const AudioBus *source, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
45
+ void sum(const AudioBus *source, size_t start, size_t length, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
44
46
  void sum(
45
47
  const AudioBus *source,
46
48
  size_t sourceStart,
47
49
  size_t destinationStart,
48
- size_t length);
50
+ size_t length, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
49
51
 
50
52
  void copy(const AudioBus *source);
51
53
  void copy(const AudioBus *source, size_t start, size_t length);
@@ -7,6 +7,7 @@
7
7
  #include "AudioContext.h"
8
8
  #include "AudioDecoder.h"
9
9
  #include "AudioDestinationNode.h"
10
+ #include "AudioNodeManager.h"
10
11
 
11
12
  namespace audioapi {
12
13
  AudioContext::AudioContext() : BaseAudioContext() {
@@ -35,11 +36,11 @@ AudioContext::AudioContext(float sampleRate) : BaseAudioContext() {
35
36
  }
36
37
 
37
38
  AudioContext::~AudioContext() {
38
- if (isClosed()) {
39
- return;
39
+ if (!isClosed()) {
40
+ close();
40
41
  }
41
42
 
42
- close();
43
+ nodeManager_->cleanup();
43
44
  }
44
45
 
45
46
  void AudioContext::close() {
@@ -20,14 +20,14 @@ class AudioContext : public BaseAudioContext {
20
20
 
21
21
  void close();
22
22
 
23
- std::function<void(AudioBus *, int)> renderAudio();
24
-
25
23
  private:
26
24
  #ifdef ANDROID
27
- std::shared_ptr<AudioPlayer> audioPlayer_;
25
+ std::shared_ptr<AudioPlayer> audioPlayer_;
28
26
  #else
29
- std::shared_ptr<IOSAudioPlayer> audioPlayer_;
27
+ std::shared_ptr<IOSAudioPlayer> audioPlayer_;
30
28
  #endif
29
+
30
+ std::function<void(AudioBus *, int)> renderAudio();
31
31
  };
32
32
 
33
33
  } // namespace audioapi
@@ -3,14 +3,13 @@
3
3
  #include "AudioNode.h"
4
4
  #include "AudioNodeManager.h"
5
5
  #include "BaseAudioContext.h"
6
- #include "VectorMath.h"
7
6
 
8
7
  namespace audioapi {
9
8
 
10
9
  AudioDestinationNode::AudioDestinationNode(BaseAudioContext *context)
11
10
  : AudioNode(context), currentSampleFrame_(0) {
12
11
  numberOfOutputs_ = 0;
13
- numberOfInputs_ = INT_MAX;
12
+ numberOfInputs_ = 1;
14
13
  channelCountMode_ = ChannelCountMode::EXPLICIT;
15
14
  isInitialized_ = true;
16
15
  }
@@ -26,7 +25,7 @@ double AudioDestinationNode::getCurrentTime() const {
26
25
  void AudioDestinationNode::renderAudio(
27
26
  AudioBus *destinationBus,
28
27
  int numFrames) {
29
- if (!numFrames || !destinationBus || !isInitialized_) {
28
+ if (numFrames < 0 || !destinationBus || !isInitialized_) {
30
29
  return;
31
30
  }
32
31