react-native-audio-api 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/README.md +58 -1
  2. package/RNAudioAPI.podspec +41 -0
  3. package/android/CMakeLists.txt +63 -0
  4. package/android/build.gradle +194 -0
  5. package/android/gradle.properties +5 -0
  6. package/android/src/main/AndroidManifest.xml +3 -0
  7. package/android/src/main/AndroidManifestNew.xml +2 -0
  8. package/android/src/main/cpp/AudioAPIInstaller/AudioAPIInstaller.cpp +22 -0
  9. package/android/src/main/cpp/AudioAPIInstaller/AudioAPIInstaller.h +48 -0
  10. package/android/src/main/cpp/AudioPlayer/AudioPlayer.cpp +45 -0
  11. package/android/src/main/cpp/AudioPlayer/AudioPlayer.h +30 -0
  12. package/android/src/main/cpp/OnLoad.cpp +9 -0
  13. package/android/src/main/java/com/swmansion/audioapi/AudioAPIPackage.kt +14 -0
  14. package/android/src/main/java/com/swmansion/audioapi/module/AudioAPIInstaller.kt +21 -0
  15. package/android/src/main/java/com/swmansion/audioapi/nativemodules/AudioAPIModule.kt +25 -0
  16. package/common/cpp/AudioAPIInstaller/AudioAPIInstallerHostObject.cpp +52 -0
  17. package/common/cpp/AudioAPIInstaller/AudioAPIInstallerHostObject.h +45 -0
  18. package/common/cpp/AudioAPIInstaller/AudioAPIInstallerWrapper.h +38 -0
  19. package/common/cpp/AudioAPIInstaller/android/AudioAPIInstallerWrapper.cpp +16 -0
  20. package/common/cpp/AudioAPIInstaller/ios/AudioAPIInstallerWrapper.cpp +12 -0
  21. package/common/cpp/HostObjects/AudioBufferHostObject.cpp +143 -0
  22. package/common/cpp/HostObjects/AudioBufferHostObject.h +33 -0
  23. package/common/cpp/HostObjects/AudioBufferSourceNodeHostObject.cpp +67 -0
  24. package/common/cpp/HostObjects/AudioBufferSourceNodeHostObject.h +37 -0
  25. package/common/cpp/HostObjects/AudioContextHostObject.cpp +191 -0
  26. package/common/cpp/HostObjects/AudioContextHostObject.h +43 -0
  27. package/common/cpp/HostObjects/AudioDestinationNodeHostObject.cpp +33 -0
  28. package/common/cpp/HostObjects/AudioDestinationNodeHostObject.h +31 -0
  29. package/common/cpp/HostObjects/AudioNodeHostObject.cpp +108 -0
  30. package/common/cpp/HostObjects/AudioNodeHostObject.h +29 -0
  31. package/common/cpp/HostObjects/AudioParamHostObject.cpp +115 -0
  32. package/common/cpp/HostObjects/AudioParamHostObject.h +34 -0
  33. package/common/cpp/HostObjects/AudioScheduledSourceNodeHostObject.cpp +73 -0
  34. package/common/cpp/HostObjects/AudioScheduledSourceNodeHostObject.h +31 -0
  35. package/common/cpp/HostObjects/BiquadFilterNodeHostObject.cpp +81 -0
  36. package/common/cpp/HostObjects/BiquadFilterNodeHostObject.h +42 -0
  37. package/common/cpp/HostObjects/Constants.h +22 -0
  38. package/common/cpp/HostObjects/GainNodeHostObject.cpp +41 -0
  39. package/common/cpp/HostObjects/GainNodeHostObject.h +32 -0
  40. package/common/cpp/HostObjects/OscillatorNodeHostObject.cpp +67 -0
  41. package/common/cpp/HostObjects/OscillatorNodeHostObject.h +40 -0
  42. package/common/cpp/HostObjects/StereoPannerNodeHostObject.cpp +41 -0
  43. package/common/cpp/HostObjects/StereoPannerNodeHostObject.h +36 -0
  44. package/common/cpp/core/AudioBuffer.cpp +115 -0
  45. package/common/cpp/core/AudioBuffer.h +42 -0
  46. package/common/cpp/core/AudioBufferSourceNode.cpp +58 -0
  47. package/common/cpp/core/AudioBufferSourceNode.h +26 -0
  48. package/common/cpp/core/AudioContext.cpp +90 -0
  49. package/common/cpp/core/AudioContext.h +73 -0
  50. package/common/cpp/core/AudioDestinationNode.cpp +35 -0
  51. package/common/cpp/core/AudioDestinationNode.h +24 -0
  52. package/common/cpp/core/AudioNode.cpp +68 -0
  53. package/common/cpp/core/AudioNode.h +74 -0
  54. package/common/cpp/core/AudioParam.cpp +136 -0
  55. package/common/cpp/core/AudioParam.h +50 -0
  56. package/common/cpp/core/AudioScheduledSourceNode.cpp +39 -0
  57. package/common/cpp/core/AudioScheduledSourceNode.h +30 -0
  58. package/common/cpp/core/BiquadFilterNode.cpp +364 -0
  59. package/common/cpp/core/BiquadFilterNode.h +128 -0
  60. package/common/cpp/core/GainNode.cpp +30 -0
  61. package/common/cpp/core/GainNode.h +23 -0
  62. package/common/cpp/core/OscillatorNode.cpp +66 -0
  63. package/common/cpp/core/OscillatorNode.h +112 -0
  64. package/common/cpp/core/ParamChange.cpp +46 -0
  65. package/common/cpp/core/ParamChange.h +34 -0
  66. package/common/cpp/core/StereoPannerNode.cpp +58 -0
  67. package/common/cpp/core/StereoPannerNode.h +26 -0
  68. package/common/cpp/utils/VectorMath.cpp +609 -0
  69. package/common/cpp/utils/VectorMath.h +65 -0
  70. package/common/cpp/wrappers/AudioBufferSourceNodeWrapper.cpp +35 -0
  71. package/common/cpp/wrappers/AudioBufferSourceNodeWrapper.h +25 -0
  72. package/common/cpp/wrappers/AudioBufferWrapper.cpp +46 -0
  73. package/common/cpp/wrappers/AudioBufferWrapper.h +30 -0
  74. package/common/cpp/wrappers/AudioContextWrapper.cpp +70 -0
  75. package/common/cpp/wrappers/AudioContextWrapper.h +40 -0
  76. package/common/cpp/wrappers/AudioDestinationNodeWrapper.h +16 -0
  77. package/common/cpp/wrappers/AudioNodeWrapper.cpp +37 -0
  78. package/common/cpp/wrappers/AudioNodeWrapper.h +25 -0
  79. package/common/cpp/wrappers/AudioParamWrapper.cpp +42 -0
  80. package/common/cpp/wrappers/AudioParamWrapper.h +25 -0
  81. package/common/cpp/wrappers/AudioScheduledSourceNodeWrapper.cpp +23 -0
  82. package/common/cpp/wrappers/AudioScheduledSourceNodeWrapper.h +23 -0
  83. package/common/cpp/wrappers/BiquadFilterNodeWrapper.cpp +51 -0
  84. package/common/cpp/wrappers/BiquadFilterNodeWrapper.h +32 -0
  85. package/common/cpp/wrappers/GainNodeWrapper.cpp +14 -0
  86. package/common/cpp/wrappers/GainNodeWrapper.h +20 -0
  87. package/common/cpp/wrappers/OscillatorNodeWrapper.cpp +38 -0
  88. package/common/cpp/wrappers/OscillatorNodeWrapper.h +28 -0
  89. package/common/cpp/wrappers/StereoPannerNodeWrapper.cpp +16 -0
  90. package/common/cpp/wrappers/StereoPannerNodeWrapper.h +21 -0
  91. package/ios/AudioAPIModule.h +5 -0
  92. package/ios/AudioAPIModule.mm +44 -0
  93. package/ios/AudioPlayer/AudioPlayer.h +27 -0
  94. package/ios/AudioPlayer/AudioPlayer.m +121 -0
  95. package/ios/AudioPlayer/IOSAudioPlayer.h +29 -0
  96. package/ios/AudioPlayer/IOSAudioPlayer.mm +34 -0
  97. package/lib/module/index.js +39 -0
  98. package/lib/module/index.js.map +1 -0
  99. package/lib/module/modules/global.d.js +2 -0
  100. package/lib/module/modules/global.d.js.map +1 -0
  101. package/lib/module/types.js +2 -0
  102. package/lib/module/types.js.map +1 -0
  103. package/lib/module/utils/install.js +22 -0
  104. package/lib/module/utils/install.js.map +1 -0
  105. package/lib/typescript/index.d.ts +18 -0
  106. package/lib/typescript/index.d.ts.map +1 -0
  107. package/lib/typescript/types.d.ts +76 -0
  108. package/lib/typescript/types.d.ts.map +1 -0
  109. package/lib/typescript/utils/install.d.ts +7 -0
  110. package/lib/typescript/utils/install.d.ts.map +1 -0
  111. package/package.json +104 -5
  112. package/src/index.ts +79 -0
  113. package/src/modules/global.d.ts +10 -0
  114. package/src/types.ts +108 -0
  115. package/src/utils/install.ts +39 -0
  116. package/index.ts +0 -1
@@ -0,0 +1,609 @@
1
+ /*
2
+ * Copyright (C) 2010, Google Inc. All rights reserved.
3
+ * Copyright (C) 2020, Apple Inc. All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
18
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+ */
25
+
26
+ #include "VectorMath.h"
27
+
28
+ #if defined(HAVE_ACCELERATE)
29
+ #include <Accelerate/Accelerate.h>
30
+ #endif
31
+
32
+ #if defined(HAVE_X86_SSE2)
33
+ #include <emmintrin.h>
34
+ #endif
35
+
36
+ #if defined(HAVE_ARM_NEON_INTRINSICS)
37
+ #include <arm_neon.h>
38
+ #endif
39
+
40
+ #include <algorithm>
41
+ #include <cmath>
42
+
43
+ namespace audioapi::VectorMath {
44
+
45
+ #if defined(HAVE_ACCELERATE)
46
+
47
+ void multiplyByScalar(
48
+ const float *inputVector,
49
+ float scalar,
50
+ float *outputVector,
51
+ size_t numberOfElementsToProcess) {
52
+ vDSP_vsmul(
53
+ inputVector, 1, &scalar, outputVector, 1, numberOfElementsToProcess);
54
+ }
55
+
56
+ void addScalar(
57
+ const float *inputVector,
58
+ float scalar,
59
+ float *outputVector,
60
+ size_t numberOfElementsToProcess) {
61
+ vDSP_vsadd(
62
+ inputVector, 1, &scalar, outputVector, 1, numberOfElementsToProcess);
63
+ }
64
+
65
+ void add(
66
+ const float *inputVector1,
67
+ const float *inputVector2,
68
+ float *outputVector,
69
+ size_t numberOfElementsToProcess) {
70
+ vDSP_vadd(
71
+ inputVector1,
72
+ 1,
73
+ inputVector2,
74
+ 1,
75
+ outputVector,
76
+ 1,
77
+ numberOfElementsToProcess);
78
+ }
79
+
80
+ void substract(
81
+ const float *inputVector1,
82
+ const float *inputVector2,
83
+ float *outputVector,
84
+ size_t numberOfElementsToProcess) {
85
+ vDSP_vsub(
86
+ inputVector1,
87
+ 1,
88
+ inputVector2,
89
+ 1,
90
+ outputVector,
91
+ 1,
92
+ numberOfElementsToProcess);
93
+ }
94
+
95
+ void multiply(
96
+ const float *inputVector1,
97
+ const float *inputVector2,
98
+ float *outputVector,
99
+ size_t numberOfElementsToProcess) {
100
+ vDSP_vmul(
101
+ inputVector1,
102
+ 1,
103
+ inputVector2,
104
+ 1,
105
+ outputVector,
106
+ 1,
107
+ numberOfElementsToProcess);
108
+ }
109
+
110
+ float maximumMagnitude(
111
+ const float *inputVector,
112
+ size_t numberOfElementsToProcess) {
113
+ float maximumValue = 0;
114
+ vDSP_maxmgv(inputVector, 1, &maximumValue, numberOfElementsToProcess);
115
+ return maximumValue;
116
+ }
117
+
118
+ #else
119
+
120
+ #if defined(HAVE_X86_SSE2)
121
+ static inline bool is16ByteAligned(const float *vector) {
122
+ return !(reinterpret_cast<uintptr_t>(vector) & 0x0F);
123
+ }
124
+ #endif
125
+
126
+ void multiplyByScalar(
127
+ const float *inputVector,
128
+ float scalar,
129
+ float *outputVector,
130
+ size_t numberOfElementsToProcess) {
131
+ size_t n = numberOfElementsToProcess;
132
+
133
+ #if defined(HAVE_X86_SSE2)
134
+
135
+ // If the inputVector address is not 16-byte aligned, the first several frames
136
+ // (at most three) should be processed separately.
137
+ while (!is16ByteAligned(inputVector) && n) {
138
+ *outputVector = scalar * *inputVector;
139
+ inputVector++;
140
+ outputVector++;
141
+ n--;
142
+ }
143
+
144
+ // Now the inputVector address is aligned and start to apply SSE.
145
+ size_t group = n / 4;
146
+ __m128 mScale = _mm_set_ps1(scalar);
147
+ __m128 *pSource;
148
+ __m128 *pDest;
149
+ __m128 dest;
150
+
151
+ if (!is16ByteAligned(outputVector)) {
152
+ while (group--) {
153
+ pSource = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector));
154
+ dest = _mm_mul_ps(*pSource, mScale);
155
+ _mm_storeu_ps(outputVector, dest);
156
+
157
+ inputVector += 4;
158
+ outputVector += 4;
159
+ }
160
+ } else {
161
+ while (group--) {
162
+ pSource = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector));
163
+ pDest = reinterpret_cast<__m128 *>(outputVector);
164
+ *pDest = _mm_mul_ps(*pSource, mScale);
165
+
166
+ inputVector += 4;
167
+ outputVector += 4;
168
+ }
169
+ }
170
+
171
+ // Non-SSE handling for remaining frames which is less than 4.
172
+ n %= 4;
173
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
174
+ size_t tailFrames = n % 4;
175
+ const float *endP = outputVector + n - tailFrames;
176
+
177
+ while (outputVector < endP) {
178
+ float32x4_t source = vld1q_f32(inputVector);
179
+ vst1q_f32(outputVector, vmulq_n_f32(source, scalar));
180
+
181
+ inputVector += 4;
182
+ outputVector += 4;
183
+ }
184
+ n = tailFrames;
185
+ #endif
186
+ while (n--) {
187
+ *outputVector = scalar * *inputVector;
188
+ ++inputVector;
189
+ ++outputVector;
190
+ }
191
+ }
192
+
193
+ void addScalar(
194
+ const float *inputVector,
195
+ float scalar,
196
+ float *outputVector,
197
+ size_t numberOfElementsToProcess) {
198
+ size_t n = numberOfElementsToProcess;
199
+
200
+ #if defined(HAVE_X86_SSE2)
201
+ // If the inputVector address is not 16-byte aligned, the first several frames
202
+ // (at most three) should be processed separately.
203
+ while (!is16ByteAligned(inputVector) && n) {
204
+ *outputVector = *inputVector + scalar;
205
+ inputVector++;
206
+ outputVector++;
207
+ n--;
208
+ }
209
+
210
+ // Now the inputVector address is aligned and start to apply SSE.
211
+ size_t group = n / 4;
212
+ __m128 mScalar = _mm_set_ps1(scalar);
213
+ __m128 *pSource;
214
+ __m128 *pDest;
215
+ __m128 dest;
216
+
217
+ bool destAligned = is16ByteAligned(outputVector);
218
+ if (destAligned) { // all aligned
219
+ while (group--) {
220
+ pSource = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector));
221
+ pDest = reinterpret_cast<__m128 *>(outputVector);
222
+ *pDest = _mm_add_ps(*pSource, mScalar);
223
+
224
+ inputVector += 4;
225
+ outputVector += 4;
226
+ }
227
+ } else {
228
+ while (group--) {
229
+ pSource = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector));
230
+ dest = _mm_add_ps(*pSource, mScalar);
231
+ _mm_storeu_ps(outputVector, dest);
232
+
233
+ inputVector += 4;
234
+ outputVector += 4;
235
+ }
236
+ }
237
+
238
+ // Non-SSE handling for remaining frames which is less than 4.
239
+ n %= 4;
240
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
241
+ size_t tailFrames = n % 4;
242
+ const float *endP = outputVector + n - tailFrames;
243
+ float32x4_t scalarVector = vdupq_n_f32(scalar);
244
+
245
+ while (outputVector < endP) {
246
+ float32x4_t source = vld1q_f32(inputVector);
247
+ vst1q_f32(outputVector, vaddq_f32(source, scalarVector));
248
+
249
+ inputVector += 4;
250
+ outputVector += 4;
251
+ }
252
+ n = tailFrames;
253
+ #endif
254
+ while (n--) {
255
+ *outputVector = *inputVector + scalar;
256
+ ++inputVector;
257
+ ++outputVector;
258
+ }
259
+ }
260
+
261
+ void add(
262
+ const float *inputVector1,
263
+ const float *inputVector2,
264
+ float *outputVector,
265
+ size_t numberOfElementsToProcess) {
266
+ size_t n = numberOfElementsToProcess;
267
+
268
+ #if defined(HAVE_X86_SSE2)
269
+ // If the inputVector address is not 16-byte aligned, the first several frames
270
+ // (at most three) should be processed separately.
271
+ while (!is16ByteAligned(inputVector1) && n) {
272
+ *outputVector = *inputVector1 + *inputVector2;
273
+ inputVector1++;
274
+ inputVector2++;
275
+ outputVector++;
276
+ n--;
277
+ }
278
+
279
+ // Now the inputVector1 address is aligned and start to apply SSE.
280
+ size_t group = n / 4;
281
+ __m128 *pSource1;
282
+ __m128 *pSource2;
283
+ __m128 *pDest;
284
+ __m128 source2;
285
+ __m128 dest;
286
+
287
+ bool source2Aligned = is16ByteAligned(inputVector2);
288
+ bool destAligned = is16ByteAligned(outputVector);
289
+
290
+ if (source2Aligned && destAligned) { // all aligned
291
+ while (group--) {
292
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
293
+ pSource2 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector2));
294
+ pDest = reinterpret_cast<__m128 *>(outputVector);
295
+ *pDest = _mm_add_ps(*pSource1, *pSource2);
296
+
297
+ inputVector1 += 4;
298
+ inputVector2 += 4;
299
+ outputVector += 4;
300
+ }
301
+
302
+ } else if (source2Aligned && !destAligned) { // source2 aligned but dest not
303
+ // aligned
304
+ while (group--) {
305
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
306
+ pSource2 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector2));
307
+ dest = _mm_add_ps(*pSource1, *pSource2);
308
+ _mm_storeu_ps(outputVector, dest);
309
+
310
+ inputVector1 += 4;
311
+ inputVector2 += 4;
312
+ outputVector += 4;
313
+ }
314
+
315
+ } else if (!source2Aligned && destAligned) { // source2 not aligned but dest
316
+ // aligned
317
+ while (group--) {
318
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
319
+ source2 = _mm_loadu_ps(inputVector2);
320
+ pDest = reinterpret_cast<__m128 *>(outputVector);
321
+ *pDest = _mm_add_ps(*pSource1, source2);
322
+
323
+ inputVector1 += 4;
324
+ inputVector2 += 4;
325
+ outputVector += 4;
326
+ }
327
+ } else if (!source2Aligned && !destAligned) { // both source2 and dest not
328
+ // aligned
329
+ while (group--) {
330
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
331
+ source2 = _mm_loadu_ps(inputVector2);
332
+ dest = _mm_add_ps(*pSource1, source2);
333
+ _mm_storeu_ps(outputVector, dest);
334
+
335
+ inputVector1 += 4;
336
+ inputVector2 += 4;
337
+ outputVector += 4;
338
+ }
339
+ }
340
+
341
+ // Non-SSE handling for remaining frames which is less than 4.
342
+ n %= 4;
343
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
344
+ size_t tailFrames = n % 4;
345
+ const float *endP = outputVector + n - tailFrames;
346
+
347
+ while (outputVector < endP) {
348
+ float32x4_t source1 = vld1q_f32(inputVector1);
349
+ float32x4_t source2 = vld1q_f32(inputVector2);
350
+ vst1q_f32(outputVector, vaddq_f32(source1, source2));
351
+
352
+ inputVector1 += 4;
353
+ inputVector2 += 4;
354
+ outputVector += 4;
355
+ }
356
+ n = tailFrames;
357
+ #endif
358
+ while (n--) {
359
+ *outputVector = *inputVector1 + *inputVector2;
360
+ ++inputVector1;
361
+ ++inputVector2;
362
+ ++outputVector;
363
+ }
364
+ }
365
+
366
+ void substract(
367
+ const float *inputVector1,
368
+ const float *inputVector2,
369
+ float *outputVector,
370
+ size_t numberOfElementsToProcess) {
371
+ size_t n = numberOfElementsToProcess;
372
+
373
+ #if defined(HAVE_X86_SSE2)
374
+ // If the inputVector address is not 16-byte aligned, the first several frames
375
+ // (at most three) should be processed separately.
376
+ while (!is16ByteAligned(inputVector1) && n) {
377
+ *outputVector = *inputVector1 - *inputVector2;
378
+ inputVector1++;
379
+ inputVector2++;
380
+ outputVector++;
381
+ n--;
382
+ }
383
+
384
+ // Now the inputVector1 address is aligned and start to apply SSE.
385
+ size_t group = n / 4;
386
+ __m128 *pSource1;
387
+ __m128 *pSource2;
388
+ __m128 *pDest;
389
+ __m128 source2;
390
+ __m128 dest;
391
+
392
+ bool source2Aligned = is16ByteAligned(inputVector2);
393
+ bool destAligned = is16ByteAligned(outputVector);
394
+
395
+ if (source2Aligned && destAligned) { // all aligned
396
+ while (group--) {
397
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
398
+ pSource2 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector2));
399
+ pDest = reinterpret_cast<__m128 *>(outputVector);
400
+ *pDest = _mm_sub_ps(*pSource1, *pSource2);
401
+
402
+ inputVector1 += 4;
403
+ inputVector2 += 4;
404
+ outputVector += 4;
405
+ }
406
+ } else if (source2Aligned && !destAligned) { // source2 aligned but dest not
407
+ // aligned
408
+ while (group--) {
409
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
410
+ pSource2 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector2));
411
+ dest = _mm_sub_ps(*pSource1, *pSource2);
412
+ _mm_storeu_ps(outputVector, dest);
413
+
414
+ inputVector1 += 4;
415
+ inputVector2 += 4;
416
+ outputVector += 4;
417
+ }
418
+ } else if (!source2Aligned && destAligned) { // source2 not aligned but dest
419
+ // aligned
420
+ while (group--) {
421
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
422
+ source2 = _mm_loadu_ps(inputVector2);
423
+ pDest = reinterpret_cast<__m128 *>(outputVector);
424
+ *pDest = _mm_sub_ps(*pSource1, source2);
425
+
426
+ inputVector1 += 4;
427
+ inputVector2 += 4;
428
+ outputVector += 4;
429
+ }
430
+ } else if (!source2Aligned && !destAligned) { // both source2 and dest not
431
+ // aligned
432
+ while (group--) {
433
+ pSource1 = reinterpret_cast<__m128 *>(const_cast<float *>(inputVector1));
434
+ source2 = _mm_loadu_ps(inputVector2);
435
+ dest = _mm_sub_ps(*pSource1, source2);
436
+ _mm_storeu_ps(outputVector, dest);
437
+
438
+ inputVector1 += 4;
439
+ inputVector2 += 4;
440
+ outputVector += 4;
441
+ }
442
+ }
443
+
444
+ // Non-SSE handling for remaining frames which is less than 4.
445
+ n %= 4;
446
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
447
+ size_t tailFrames = n % 4;
448
+ const float *endP = outputVector + n - tailFrames;
449
+
450
+ while (outputVector < endP) {
451
+ float32x4_t source1 = vld1q_f32(inputVector1);
452
+ float32x4_t source2 = vld1q_f32(inputVector2);
453
+ vst1q_f32(outputVector, vsubq_f32(source1, source2));
454
+
455
+ inputVector1 += 4;
456
+ inputVector2 += 4;
457
+ outputVector += 4;
458
+ }
459
+ n = tailFrames;
460
+ #endif
461
+ while (n--) {
462
+ *outputVector = *inputVector1 - *inputVector2;
463
+ ++inputVector1;
464
+ ++inputVector2;
465
+ ++outputVector;
466
+ }
467
+ }
468
+
469
+ void multiply(
470
+ const float *inputVector1,
471
+ const float *inputVector2,
472
+ float *outputVector,
473
+ size_t numberOfElementsToProcess) {
474
+ size_t n = numberOfElementsToProcess;
475
+
476
+ #if defined(HAVE_X86_SSE2)
477
+ // If the inputVector1 address is not 16-byte aligned, the first several
478
+ // frames (at most three) should be processed separately.
479
+ while (!is16ByteAligned(inputVector1) && n) {
480
+ *outputVector = *inputVector1 * *inputVector2;
481
+ inputVector1++;
482
+ inputVector2++;
483
+ outputVector++;
484
+ n--;
485
+ }
486
+
487
+ // Now the inputVector1 address aligned and start to apply SSE.
488
+ size_t tailFrames = n % 4;
489
+ const float *endP = outputVector + n - tailFrames;
490
+ __m128 pSource1;
491
+ __m128 pSource2;
492
+ __m128 dest;
493
+
494
+ bool source2Aligned = is16ByteAligned(inputVector2);
495
+ bool destAligned = is16ByteAligned(outputVector);
496
+
497
+ #define SSE2_MULT(loadInstr, storeInstr) \
498
+ while (outputVector < endP) { \
499
+ pSource1 = _mm_load_ps(inputVector1); \
500
+ pSource2 = _mm_##loadInstr##_ps(inputVector2); \
501
+ dest = _mm_mul_ps(pSource1, pSource2); \
502
+ _mm_##storeInstr##_ps(outputVector, dest); \
503
+ inputVector1 += 4; \
504
+ inputVector2 += 4; \
505
+ outputVector += 4; \
506
+ }
507
+
508
+ if (source2Aligned && destAligned) // Both aligned.
509
+ SSE2_MULT(load, store)
510
+ else if (source2Aligned && !destAligned) // Source2 is aligned but dest not.
511
+ SSE2_MULT(load, storeu)
512
+ else if (!source2Aligned && destAligned) // Dest is aligned but source2 not.
513
+ SSE2_MULT(loadu, store)
514
+ else // Neither aligned.
515
+ SSE2_MULT(loadu, storeu)
516
+
517
+ n = tailFrames;
518
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
519
+ size_t tailFrames = n % 4;
520
+ const float *endP = outputVector + n - tailFrames;
521
+
522
+ while (outputVector < endP) {
523
+ float32x4_t source1 = vld1q_f32(inputVector1);
524
+ float32x4_t source2 = vld1q_f32(inputVector2);
525
+ vst1q_f32(outputVector, vmulq_f32(source1, source2));
526
+
527
+ inputVector1 += 4;
528
+ inputVector2 += 4;
529
+ outputVector += 4;
530
+ }
531
+ n = tailFrames;
532
+ #endif
533
+ while (n--) {
534
+ *outputVector = *inputVector1 * *inputVector2;
535
+ ++inputVector1;
536
+ ++inputVector2;
537
+ ++outputVector;
538
+ }
539
+ }
540
+
541
+ float maximumMagnitude(
542
+ const float *inputVector,
543
+ size_t numberOfElementsToProcess) {
544
+ size_t n = numberOfElementsToProcess;
545
+ float max = 0;
546
+
547
+ #if defined(HAVE_X86_SSE2)
548
+ // If the inputVector address is not 16-byte aligned, the first several frames
549
+ // (at most three) should be processed separately.
550
+ while (!is16ByteAligned(inputVector) && n) {
551
+ max = std::max(max, std::abs(*inputVector));
552
+ inputVector++;
553
+ n--;
554
+ }
555
+
556
+ // Now the inputVector is aligned, use SSE.
557
+ size_t tailFrames = n % 4;
558
+ const float *endP = inputVector + n - tailFrames;
559
+ __m128 source;
560
+ __m128 mMax = _mm_setzero_ps();
561
+ int mask = 0x7FFFFFFF;
562
+ __m128 mMask = _mm_set1_ps(*reinterpret_cast<float *>(&mask));
563
+
564
+ while (inputVector < endP) {
565
+ source = _mm_load_ps(inputVector);
566
+ // Calculate the absolute value by anding source with mask, the sign bit is
567
+ // set to 0.
568
+ source = _mm_and_ps(source, mMask);
569
+ mMax = _mm_max_ps(mMax, source);
570
+ inputVector += 4;
571
+ }
572
+
573
+ // Get max from the SSE results.
574
+ const float *groupMaxP = reinterpret_cast<float *>(&mMax);
575
+ max = std::max(max, groupMaxP[0]);
576
+ max = std::max(max, groupMaxP[1]);
577
+ max = std::max(max, groupMaxP[2]);
578
+ max = std::max(max, groupMaxP[3]);
579
+
580
+ n = tailFrames;
581
+ #elif defined(HAVE_ARM_NEON_INTRINSICS)
582
+ size_t tailFrames = n % 4;
583
+ const float *endP = inputVector + n - tailFrames;
584
+
585
+ float32x4_t fourMax = vdupq_n_f32(0);
586
+ while (inputVector < endP) {
587
+ float32x4_t source = vld1q_f32(inputVector);
588
+ fourMax = vmaxq_f32(fourMax, vabsq_f32(source));
589
+ inputVector += 4;
590
+ }
591
+ float32x2_t twoMax = vmax_f32(vget_low_f32(fourMax), vget_high_f32(fourMax));
592
+
593
+ float groupMax[2];
594
+ vst1_f32(groupMax, twoMax);
595
+ max = std::max(groupMax[0], groupMax[1]);
596
+
597
+ n = tailFrames;
598
+ #endif
599
+
600
+ while (n--) {
601
+ max = std::max(max, std::abs(*inputVector));
602
+ ++inputVector;
603
+ }
604
+
605
+ return max;
606
+ }
607
+
608
+ #endif
609
+ } // namespace audioapi::VectorMath
@@ -0,0 +1,65 @@
1
+ /*
2
+ * Copyright (C) 2010, Google Inc. All rights reserved.
3
+ * Copyright (C) 2020, Apple Inc. All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
18
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+ */
25
+
26
+ #pragma once
27
+
28
+ // Defines the interface for several vector math functions whose implementation
29
+ // will ideally be optimized.
30
+
31
+ #include <cstddef>
32
+
33
+ namespace audioapi::VectorMath {
34
+
35
+ void multiplyByScalar(
36
+ const float *inputVector,
37
+ float scalar,
38
+ float *outputVector,
39
+ size_t numberOfElementsToProcess);
40
+ void addScalar(
41
+ const float *inputVector,
42
+ float scalar,
43
+ float *outputVector,
44
+ size_t numberOfElementsToProcess);
45
+ void add(
46
+ const float *inputVector1,
47
+ const float *inputVector2,
48
+ float *outputVector,
49
+ size_t numberOfElementsToProcess);
50
+ void substract(
51
+ const float *inputVector1,
52
+ const float *inputVector2,
53
+ float *outputVector,
54
+ size_t numberOfElementsToProcess);
55
+ void multiply(
56
+ const float *inputVector1,
57
+ const float *inputVector2,
58
+ float *outputVector,
59
+ size_t numberOfElementsToProcess);
60
+
61
+ // Finds the maximum magnitude of a float vector.
62
+ float maximumMagnitude(
63
+ const float *inputVector,
64
+ size_t numberOfElementsToProcess);
65
+ } // namespace audioapi::VectorMath
@@ -0,0 +1,35 @@
1
+ #include "AudioBufferSourceNodeWrapper.h"
2
+
3
+ namespace audioapi {
4
+
5
+ AudioBufferSourceNodeWrapper::AudioBufferSourceNodeWrapper(
6
+ const std::shared_ptr<AudioBufferSourceNode> &audioBufferSourceNode)
7
+ : AudioScheduledSourceNodeWrapper(audioBufferSourceNode) {}
8
+
9
+ std::shared_ptr<AudioBufferSourceNode>
10
+ AudioBufferSourceNodeWrapper::getAudioBufferSourceNodeFromAudioNode() {
11
+ return std::static_pointer_cast<AudioBufferSourceNode>(node_);
12
+ }
13
+
14
+ bool AudioBufferSourceNodeWrapper::getLoop() {
15
+ auto audioBufferSourceNode = getAudioBufferSourceNodeFromAudioNode();
16
+ return audioBufferSourceNode->getLoop();
17
+ }
18
+
19
+ void AudioBufferSourceNodeWrapper::setLoop(bool loop) {
20
+ auto audioBufferSourceNode = getAudioBufferSourceNodeFromAudioNode();
21
+ audioBufferSourceNode->setLoop(loop);
22
+ }
23
+
24
+ std::shared_ptr<AudioBufferWrapper> AudioBufferSourceNodeWrapper::getBuffer() {
25
+ auto audioBufferSourceNode = getAudioBufferSourceNodeFromAudioNode();
26
+ auto buffer = audioBufferSourceNode->getBuffer();
27
+ return std::make_shared<AudioBufferWrapper>(buffer);
28
+ }
29
+
30
+ void AudioBufferSourceNodeWrapper::setBuffer(
31
+ const std::shared_ptr<AudioBufferWrapper> &buffer) {
32
+ auto audioBufferSourceNode = getAudioBufferSourceNodeFromAudioNode();
33
+ audioBufferSourceNode->setBuffer(buffer->audioBuffer_);
34
+ }
35
+ } // namespace audioapi