react-native-audio-api 0.4.8 → 0.4.10-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/CMakeLists.txt +2 -5
- package/android/src/main/cpp/libs/pffft.c +1906 -0
- package/android/src/main/cpp/libs/pffft.h +198 -0
- package/common/cpp/HostObjects/AudioContextHostObject.h +13 -3
- package/common/cpp/core/AnalyserNode.cpp +9 -4
- package/common/cpp/core/AnalyserNode.h +2 -0
- package/common/cpp/core/AudioBufferSourceNode.cpp +12 -0
- package/common/cpp/core/AudioBufferSourceNode.h +2 -0
- package/common/cpp/core/PeriodicWave.cpp +9 -3
- package/common/cpp/utils/FFTFrame.cpp +44 -37
- package/common/cpp/utils/FFTFrame.h +5 -14
- package/lib/module/core/AudioContext.js +8 -4
- package/lib/module/core/AudioContext.js.map +1 -1
- package/lib/module/core/AudioNode.js +1 -0
- package/lib/module/core/AudioNode.js.map +1 -1
- package/lib/module/core/AudioParam.js +24 -8
- package/lib/module/core/AudioParam.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +6 -13
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/module/errors/NotSupportedError.js +10 -0
- package/lib/module/errors/NotSupportedError.js.map +1 -0
- package/lib/module/errors/index.js +1 -0
- package/lib/module/errors/index.js.map +1 -1
- package/lib/module/index.js +1 -1
- package/lib/module/index.js.map +1 -1
- package/lib/module/web-core/AudioContext.js +10 -7
- package/lib/module/web-core/AudioContext.js.map +1 -1
- package/lib/module/web-core/AudioNode.js +1 -0
- package/lib/module/web-core/AudioNode.js.map +1 -1
- package/lib/module/web-core/AudioParam.js +24 -8
- package/lib/module/web-core/AudioParam.js.map +1 -1
- package/lib/typescript/core/AudioContext.d.ts +3 -2
- package/lib/typescript/core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/core/AudioNode.d.ts +1 -1
- package/lib/typescript/core/AudioNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioParam.d.ts +7 -7
- package/lib/typescript/core/AudioParam.d.ts.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/errors/NotSupportedError.d.ts +5 -0
- package/lib/typescript/errors/NotSupportedError.d.ts.map +1 -0
- package/lib/typescript/errors/index.d.ts +1 -0
- package/lib/typescript/errors/index.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +1 -1
- package/lib/typescript/index.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +1 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +4 -1
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts +3 -3
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioNode.d.ts +1 -1
- package/lib/typescript/web-core/AudioNode.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioParam.d.ts +7 -7
- package/lib/typescript/web-core/AudioParam.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/core/AudioContext.ts +12 -4
- package/src/core/AudioNode.ts +3 -1
- package/src/core/AudioParam.ts +48 -15
- package/src/core/BaseAudioContext.ts +9 -16
- package/src/errors/NotSupportedError.tsx +8 -0
- package/src/errors/index.ts +1 -0
- package/src/index.ts +1 -0
- package/src/interfaces.ts +1 -1
- package/src/types.ts +5 -1
- package/src/web-core/AudioContext.tsx +20 -10
- package/src/web-core/AudioNode.tsx +3 -1
- package/src/web-core/AudioParam.tsx +48 -15
- package/android/libs/arm64-v8a/libfftw3.a +0 -0
- package/android/libs/armeabi-v7a/libfftw3.a +0 -0
- package/android/libs/include/fftw3.h +0 -413
- package/android/libs/x86/libfftw3.a +0 -0
- package/android/libs/x86_64/libfftw3.a +0 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
/* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com )
|
|
2
|
+
|
|
3
|
+
Based on original fortran 77 code from FFTPACKv4 from NETLIB,
|
|
4
|
+
authored by Dr Paul Swarztrauber of NCAR, in 1985.
|
|
5
|
+
|
|
6
|
+
As confirmed by the NCAR fftpack software curators, the following
|
|
7
|
+
FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
|
|
8
|
+
released under the same terms.
|
|
9
|
+
|
|
10
|
+
FFTPACK license:
|
|
11
|
+
|
|
12
|
+
http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
|
|
13
|
+
|
|
14
|
+
Copyright (c) 2004 the University Corporation for Atmospheric
|
|
15
|
+
Research ("UCAR"). All rights reserved. Developed by NCAR's
|
|
16
|
+
Computational and Information Systems Laboratory, UCAR,
|
|
17
|
+
www.cisl.ucar.edu.
|
|
18
|
+
|
|
19
|
+
Redistribution and use of the Software in source and binary forms,
|
|
20
|
+
with or without modification, is permitted provided that the
|
|
21
|
+
following conditions are met:
|
|
22
|
+
|
|
23
|
+
- Neither the names of NCAR's Computational and Information Systems
|
|
24
|
+
Laboratory, the University Corporation for Atmospheric Research,
|
|
25
|
+
nor the names of its sponsors or contributors may be used to
|
|
26
|
+
endorse or promote products derived from this Software without
|
|
27
|
+
specific prior written permission.
|
|
28
|
+
|
|
29
|
+
- Redistributions of source code must retain the above copyright
|
|
30
|
+
notices, this list of conditions, and the disclaimer below.
|
|
31
|
+
|
|
32
|
+
- Redistributions in binary form must reproduce the above copyright
|
|
33
|
+
notice, this list of conditions, and the disclaimer below in the
|
|
34
|
+
documentation and/or other materials provided with the
|
|
35
|
+
distribution.
|
|
36
|
+
|
|
37
|
+
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
38
|
+
EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
|
|
39
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
40
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
|
|
41
|
+
HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
|
|
42
|
+
EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
43
|
+
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
44
|
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
|
|
45
|
+
SOFTWARE.
|
|
46
|
+
*/
|
|
47
|
+
|
|
48
|
+
/*
|
|
49
|
+
PFFFT : a Pretty Fast FFT.
|
|
50
|
+
|
|
51
|
+
This is basically an adaptation of the single precision fftpack
|
|
52
|
+
(v4) as found on netlib taking advantage of SIMD instruction found
|
|
53
|
+
on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON).
|
|
54
|
+
|
|
55
|
+
For architectures where no SIMD instruction is available, the code
|
|
56
|
+
falls back to a scalar version.
|
|
57
|
+
|
|
58
|
+
Restrictions:
|
|
59
|
+
|
|
60
|
+
- 1D transforms only, with 32-bit single precision.
|
|
61
|
+
|
|
62
|
+
- supports only transforms for inputs of length N of the form
|
|
63
|
+
N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128,
|
|
64
|
+
144, 160, etc are all acceptable lengths). Performance is best for
|
|
65
|
+
128<=N<=8192.
|
|
66
|
+
|
|
67
|
+
- all (float*) pointers in the functions below are expected to
|
|
68
|
+
have an "simd-compatible" alignment, that is 16 bytes on x86 and
|
|
69
|
+
powerpc CPUs.
|
|
70
|
+
|
|
71
|
+
You can allocate such buffers with the functions
|
|
72
|
+
pffft_aligned_malloc / pffft_aligned_free (or with stuff like
|
|
73
|
+
posix_memalign..)
|
|
74
|
+
|
|
75
|
+
*/
|
|
76
|
+
|
|
77
|
+
#pragma once
|
|
78
|
+
|
|
79
|
+
#include <stddef.h> // for size_t
|
|
80
|
+
|
|
81
|
+
#ifdef __cplusplus
|
|
82
|
+
extern "C" {
|
|
83
|
+
#endif
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
Opaque struct holding internal stuff (precomputed twiddle factors)
|
|
87
|
+
this struct can be shared by many threads as it contains only
|
|
88
|
+
read-only data.
|
|
89
|
+
*/
|
|
90
|
+
typedef struct PFFFT_Setup PFFFT_Setup;
|
|
91
|
+
|
|
92
|
+
/** Direction of the transform */
|
|
93
|
+
typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t;
|
|
94
|
+
|
|
95
|
+
/** Type of transform */
|
|
96
|
+
typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t;
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
Prepare for performing transforms of size N -- the returned
|
|
100
|
+
PFFFT_Setup structure is read-only so it can safely be shared by
|
|
101
|
+
multiple concurrent threads.
|
|
102
|
+
|
|
103
|
+
Will return NULL if N is not suitable (too large / no decomposable with simple
|
|
104
|
+
integer factors..)
|
|
105
|
+
*/
|
|
106
|
+
PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform);
|
|
107
|
+
void pffft_destroy_setup(PFFFT_Setup *);
|
|
108
|
+
/**
|
|
109
|
+
Perform a Fourier transform , The z-domain data is stored in the
|
|
110
|
+
most efficient order for transforming it back, or using it for
|
|
111
|
+
convolution. If you need to have its content sorted in the
|
|
112
|
+
"usual" way, that is as an array of interleaved complex numbers,
|
|
113
|
+
either use pffft_transform_ordered , or call pffft_zreorder after
|
|
114
|
+
the forward fft, and before the backward fft.
|
|
115
|
+
|
|
116
|
+
Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x.
|
|
117
|
+
Typically you will want to scale the backward transform by 1/N.
|
|
118
|
+
|
|
119
|
+
The 'work' pointer should point to an area of N (2*N for complex
|
|
120
|
+
fft) floats, properly aligned. If 'work' is NULL, then stack will
|
|
121
|
+
be used instead (this is probably the best strategy for small
|
|
122
|
+
FFTs, say for N < 16384).
|
|
123
|
+
|
|
124
|
+
input and output may alias.
|
|
125
|
+
*/
|
|
126
|
+
void pffft_transform(
|
|
127
|
+
PFFFT_Setup *setup,
|
|
128
|
+
const float *input,
|
|
129
|
+
float *output,
|
|
130
|
+
float *work,
|
|
131
|
+
pffft_direction_t direction);
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
Similar to pffft_transform, but makes sure that the output is
|
|
135
|
+
ordered as expected (interleaved complex numbers). This is
|
|
136
|
+
similar to calling pffft_transform and then pffft_zreorder.
|
|
137
|
+
|
|
138
|
+
input and output may alias.
|
|
139
|
+
*/
|
|
140
|
+
void pffft_transform_ordered(
|
|
141
|
+
PFFFT_Setup *setup,
|
|
142
|
+
const float *input,
|
|
143
|
+
float *output,
|
|
144
|
+
float *work,
|
|
145
|
+
pffft_direction_t direction);
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(...,
|
|
149
|
+
PFFFT_FORWARD) if you want to have the frequency components in
|
|
150
|
+
the correct "canonical" order, as interleaved complex numbers.
|
|
151
|
+
|
|
152
|
+
(for real transforms, both 0-frequency and half frequency
|
|
153
|
+
components, which are real, are assembled in the first entry as
|
|
154
|
+
F(0)+i*F(n/2+1). Note that the original fftpack did place
|
|
155
|
+
F(n/2+1) at the end of the arrays).
|
|
156
|
+
|
|
157
|
+
input and output should not alias.
|
|
158
|
+
*/
|
|
159
|
+
void pffft_zreorder(
|
|
160
|
+
PFFFT_Setup *setup,
|
|
161
|
+
const float *input,
|
|
162
|
+
float *output,
|
|
163
|
+
pffft_direction_t direction);
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
Perform a multiplication of the frequency components of dft_a and
|
|
167
|
+
dft_b and accumulate them into dft_ab. The arrays should have
|
|
168
|
+
been obtained with pffft_transform(.., PFFFT_FORWARD) and should
|
|
169
|
+
*not* have been reordered with pffft_zreorder (otherwise just
|
|
170
|
+
perform the operation yourself as the dft coefs are stored as
|
|
171
|
+
interleaved complex numbers).
|
|
172
|
+
|
|
173
|
+
the operation performed is: dft_ab += (dft_a * fdt_b)*scaling
|
|
174
|
+
|
|
175
|
+
The dft_a, dft_b and dft_ab pointers may alias.
|
|
176
|
+
*/
|
|
177
|
+
void pffft_zconvolve_accumulate(
|
|
178
|
+
PFFFT_Setup *setup,
|
|
179
|
+
const float *dft_a,
|
|
180
|
+
const float *dft_b,
|
|
181
|
+
float *dft_ab,
|
|
182
|
+
float scaling);
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
the float buffers must have the correct alignment (16-byte boundary
|
|
186
|
+
on intel and powerpc). This function may be used to obtain such
|
|
187
|
+
correctly aligned buffers.
|
|
188
|
+
*/
|
|
189
|
+
void *pffft_aligned_malloc(size_t nb_bytes);
|
|
190
|
+
void pffft_aligned_free(void *);
|
|
191
|
+
|
|
192
|
+
/** return 4 or 1 wether support SSE/Altivec instructions was enable when
|
|
193
|
+
* building pffft.c */
|
|
194
|
+
int pffft_simd_size(void);
|
|
195
|
+
|
|
196
|
+
#ifdef __cplusplus
|
|
197
|
+
}
|
|
198
|
+
#endif
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
#include <jsi/jsi.h>
|
|
4
4
|
#include <memory>
|
|
5
|
+
#include <utility>
|
|
5
6
|
#include <vector>
|
|
6
7
|
|
|
7
8
|
#include "AudioContext.h"
|
|
@@ -20,9 +21,18 @@ class AudioContextHostObject : public BaseAudioContextHostObject {
|
|
|
20
21
|
}
|
|
21
22
|
|
|
22
23
|
JSI_HOST_FUNCTION(close) {
|
|
23
|
-
auto
|
|
24
|
-
|
|
25
|
-
|
|
24
|
+
auto promise = promiseVendor_->createPromise([this](std::shared_ptr<Promise> promise) {
|
|
25
|
+
std::thread([this, promise = std::move(promise)]() {
|
|
26
|
+
auto audioContext = std::static_pointer_cast<AudioContext>(context_);
|
|
27
|
+
audioContext->close();
|
|
28
|
+
|
|
29
|
+
promise->resolve([](jsi::Runtime &runtime) {
|
|
30
|
+
return jsi::Value::undefined();
|
|
31
|
+
});
|
|
32
|
+
}).detach();
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
return promise;
|
|
26
36
|
}
|
|
27
37
|
};
|
|
28
38
|
} // namespace audioapi
|
|
@@ -24,6 +24,8 @@ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
|
|
|
24
24
|
RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
|
|
25
25
|
|
|
26
26
|
fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
|
|
27
|
+
realData_ = std::make_shared<AudioArray>(fftSize_);
|
|
28
|
+
imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
|
|
27
29
|
|
|
28
30
|
isInitialized_ = true;
|
|
29
31
|
}
|
|
@@ -59,6 +61,8 @@ void AnalyserNode::setFftSize(int fftSize) {
|
|
|
59
61
|
|
|
60
62
|
fftSize_ = fftSize;
|
|
61
63
|
fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
|
|
64
|
+
realData_ = std::make_shared<AudioArray>(fftSize_);
|
|
65
|
+
imaginaryData_ = std::make_shared<AudioArray>(fftSize_);
|
|
62
66
|
magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
|
|
63
67
|
}
|
|
64
68
|
|
|
@@ -211,11 +215,12 @@ void AnalyserNode::doFFTAnalysis() {
|
|
|
211
215
|
break;
|
|
212
216
|
}
|
|
213
217
|
|
|
214
|
-
|
|
215
|
-
|
|
218
|
+
auto *realFFTFrameData = realData_->getData();
|
|
219
|
+
auto *imaginaryFFTFrameData = imaginaryData_->getData();
|
|
216
220
|
|
|
217
|
-
|
|
218
|
-
|
|
221
|
+
// do fft analysis - get frequency domain data
|
|
222
|
+
fftFrame_->doFFT(
|
|
223
|
+
tempBuffer.getData(), realFFTFrameData, imaginaryFFTFrameData);
|
|
219
224
|
|
|
220
225
|
// Zero out nquist component
|
|
221
226
|
imaginaryFFTFrameData[0] = 0.0f;
|
|
@@ -50,6 +50,8 @@ class AnalyserNode : public AudioNode {
|
|
|
50
50
|
int vWriteIndex_;
|
|
51
51
|
|
|
52
52
|
std::unique_ptr<FFTFrame> fftFrame_;
|
|
53
|
+
std::shared_ptr<AudioArray> realData_;
|
|
54
|
+
std::shared_ptr<AudioArray> imaginaryData_;
|
|
53
55
|
std::unique_ptr<AudioArray> magnitudeBuffer_;
|
|
54
56
|
bool shouldDoFFTAnalysis_ { true };
|
|
55
57
|
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
#include "AudioUtils.h"
|
|
9
9
|
#include "BaseAudioContext.h"
|
|
10
10
|
#include "Constants.h"
|
|
11
|
+
#include "Locker.h"
|
|
11
12
|
|
|
12
13
|
namespace audioapi {
|
|
13
14
|
|
|
@@ -67,6 +68,8 @@ void AudioBufferSourceNode::setLoopEnd(double loopEnd) {
|
|
|
67
68
|
|
|
68
69
|
void AudioBufferSourceNode::setBuffer(
|
|
69
70
|
const std::shared_ptr<AudioBuffer> &buffer) {
|
|
71
|
+
Locker locker(getBufferLock());
|
|
72
|
+
|
|
70
73
|
if (!buffer) {
|
|
71
74
|
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
|
|
72
75
|
alignedBus_ = std::make_shared<AudioBus>(
|
|
@@ -109,6 +112,10 @@ void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
|
109
112
|
vReadIndex_ = static_cast<double>(buffer_->getSampleRate() * offset);
|
|
110
113
|
}
|
|
111
114
|
|
|
115
|
+
std::mutex &AudioBufferSourceNode::getBufferLock() {
|
|
116
|
+
return bufferLock_;
|
|
117
|
+
}
|
|
118
|
+
|
|
112
119
|
void AudioBufferSourceNode::processNode(
|
|
113
120
|
AudioBus *processingBus,
|
|
114
121
|
int framesToProcess) {
|
|
@@ -118,6 +125,11 @@ void AudioBufferSourceNode::processNode(
|
|
|
118
125
|
return;
|
|
119
126
|
}
|
|
120
127
|
|
|
128
|
+
if (!Locker::tryLock(getBufferLock())) {
|
|
129
|
+
processingBus->zero();
|
|
130
|
+
return;
|
|
131
|
+
}
|
|
132
|
+
|
|
121
133
|
size_t startOffset = 0;
|
|
122
134
|
size_t offsetLength = 0;
|
|
123
135
|
|
|
@@ -30,6 +30,7 @@ class AudioBufferSourceNode : public AudioScheduledSourceNode {
|
|
|
30
30
|
void start(double when, double offset, double duration = -1);
|
|
31
31
|
|
|
32
32
|
protected:
|
|
33
|
+
std::mutex &getBufferLock();
|
|
33
34
|
void processNode(AudioBus *processingBus, int framesToProcess) override;
|
|
34
35
|
|
|
35
36
|
private:
|
|
@@ -37,6 +38,7 @@ class AudioBufferSourceNode : public AudioScheduledSourceNode {
|
|
|
37
38
|
bool loop_;
|
|
38
39
|
double loopStart_;
|
|
39
40
|
double loopEnd_;
|
|
41
|
+
std::mutex bufferLock_;
|
|
40
42
|
|
|
41
43
|
// playback rate aka pitch change params
|
|
42
44
|
std::shared_ptr<AudioParam> detuneParam_;
|
|
@@ -199,8 +199,8 @@ void PeriodicWave::createBandLimitedTables(
|
|
|
199
199
|
for (int rangeIndex = 0; rangeIndex < numberOfRanges_; rangeIndex++) {
|
|
200
200
|
FFTFrame fftFrame(fftSize);
|
|
201
201
|
|
|
202
|
-
auto *realFFTFrameData =
|
|
203
|
-
auto *imaginaryFFTFrameData =
|
|
202
|
+
auto *realFFTFrameData = new float[fftSize];
|
|
203
|
+
auto *imaginaryFFTFrameData = new float[fftSize];
|
|
204
204
|
|
|
205
205
|
// copy real and imaginary data to the FFT frame and scale it
|
|
206
206
|
VectorMath::multiplyByScalar(
|
|
@@ -235,7 +235,10 @@ void PeriodicWave::createBandLimitedTables(
|
|
|
235
235
|
|
|
236
236
|
// Perform the inverse FFT to get the time domain representation of the
|
|
237
237
|
// band-limited waveform.
|
|
238
|
-
fftFrame.doInverseFFT(
|
|
238
|
+
fftFrame.doInverseFFT(
|
|
239
|
+
bandLimitedTables_[rangeIndex],
|
|
240
|
+
realFFTFrameData,
|
|
241
|
+
imaginaryFFTFrameData);
|
|
239
242
|
|
|
240
243
|
if (!disableNormalization_ && rangeIndex == 0) {
|
|
241
244
|
float maxValue =
|
|
@@ -245,6 +248,9 @@ void PeriodicWave::createBandLimitedTables(
|
|
|
245
248
|
}
|
|
246
249
|
}
|
|
247
250
|
|
|
251
|
+
delete[] realFFTFrameData;
|
|
252
|
+
delete[] imaginaryFFTFrameData;
|
|
253
|
+
|
|
248
254
|
VectorMath::multiplyByScalar(
|
|
249
255
|
bandLimitedTables_[rangeIndex],
|
|
250
256
|
normalizationFactor,
|
|
@@ -5,7 +5,8 @@
|
|
|
5
5
|
#endif
|
|
6
6
|
|
|
7
7
|
#if defined(ANDROID)
|
|
8
|
-
#include <
|
|
8
|
+
#include <pffft.h>
|
|
9
|
+
#include <complex>
|
|
9
10
|
#endif
|
|
10
11
|
|
|
11
12
|
namespace audioapi {
|
|
@@ -13,19 +14,11 @@ namespace audioapi {
|
|
|
13
14
|
static std::unordered_map<size_t, FFTSetup> fftSetups_;
|
|
14
15
|
|
|
15
16
|
FFTFrame::FFTFrame(int size)
|
|
16
|
-
: size_(size),
|
|
17
|
-
log2Size_(static_cast<int>(log2(size))),
|
|
18
|
-
realData_(new float[size]),
|
|
19
|
-
imaginaryData_(new float[size]) {
|
|
17
|
+
: size_(size), log2Size_(static_cast<int>(log2(size))) {
|
|
20
18
|
fftSetup_ = getFFTSetupForSize(log2Size_);
|
|
21
|
-
frame_.realp = realData_;
|
|
22
|
-
frame_.imagp = imaginaryData_;
|
|
23
19
|
}
|
|
24
20
|
|
|
25
|
-
FFTFrame::~FFTFrame() {
|
|
26
|
-
delete[] realData_;
|
|
27
|
-
delete[] imaginaryData_;
|
|
28
|
-
}
|
|
21
|
+
FFTFrame::~FFTFrame() {}
|
|
29
22
|
|
|
30
23
|
FFTSetup FFTFrame::getFFTSetupForSize(size_t log2FFTSize) {
|
|
31
24
|
if (!fftSetups_.contains(log2FFTSize)) {
|
|
@@ -36,15 +29,22 @@ FFTSetup FFTFrame::getFFTSetupForSize(size_t log2FFTSize) {
|
|
|
36
29
|
return fftSetups_.at(log2FFTSize);
|
|
37
30
|
}
|
|
38
31
|
|
|
39
|
-
void FFTFrame::doFFT(float *data) {
|
|
32
|
+
void FFTFrame::doFFT(float *data, float *realData, float *imaginaryData) {
|
|
33
|
+
frame_.realp = realData;
|
|
34
|
+
frame_.imagp = imaginaryData;
|
|
40
35
|
vDSP_ctoz(reinterpret_cast<DSPComplex *>(data), 2, &frame_, 1, size_ / 2);
|
|
41
36
|
vDSP_fft_zrip(fftSetup_, &frame_, 1, log2Size_, FFT_FORWARD);
|
|
42
37
|
|
|
43
|
-
VectorMath::multiplyByScalar(
|
|
44
|
-
VectorMath::multiplyByScalar(
|
|
38
|
+
VectorMath::multiplyByScalar(realData, 0.5f, realData, size_ / 2);
|
|
39
|
+
VectorMath::multiplyByScalar(imaginaryData, 0.5f, imaginaryData, size_ / 2);
|
|
45
40
|
}
|
|
46
41
|
|
|
47
|
-
void FFTFrame::doInverseFFT(
|
|
42
|
+
void FFTFrame::doInverseFFT(
|
|
43
|
+
float *data,
|
|
44
|
+
float *realData,
|
|
45
|
+
float *imaginaryData) {
|
|
46
|
+
frame_.realp = realData;
|
|
47
|
+
frame_.imagp = imaginaryData;
|
|
48
48
|
vDSP_fft_zrip(fftSetup_, &frame_, 1, log2Size_, FFT_INVERSE);
|
|
49
49
|
vDSP_ztoc(&frame_, 1, reinterpret_cast<DSPComplex *>(data), 2, size_ / 2);
|
|
50
50
|
|
|
@@ -57,42 +57,49 @@ void FFTFrame::doInverseFFT(float *data) {
|
|
|
57
57
|
#elif defined(ANDROID)
|
|
58
58
|
|
|
59
59
|
FFTFrame::FFTFrame(int size)
|
|
60
|
-
: size_(size),
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
imaginaryData_(new float[size]) {
|
|
64
|
-
frame_ = fftwf_alloc_complex(size / 2);
|
|
60
|
+
: size_(size), log2Size_(static_cast<int>(log2(size))) {
|
|
61
|
+
pffftSetup_ = pffft_new_setup(size_, PFFFT_REAL);
|
|
62
|
+
work_ = (float *)pffft_aligned_malloc(size_ * sizeof(float));
|
|
65
63
|
}
|
|
66
64
|
|
|
67
65
|
FFTFrame::~FFTFrame() {
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
fftwf_free(frame_);
|
|
66
|
+
pffft_destroy_setup(pffftSetup_);
|
|
67
|
+
pffft_aligned_free(work_);
|
|
71
68
|
}
|
|
72
69
|
|
|
73
|
-
void FFTFrame::doFFT(float *data) {
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
70
|
+
void FFTFrame::doFFT(float *data, float *realData, float *imaginaryData) {
|
|
71
|
+
std::vector<std::complex<float>> out(size_);
|
|
72
|
+
pffft_transform_ordered(
|
|
73
|
+
pffftSetup_,
|
|
74
|
+
data,
|
|
75
|
+
reinterpret_cast<float *>(&out[0]),
|
|
76
|
+
work_,
|
|
77
|
+
PFFFT_FORWARD);
|
|
77
78
|
|
|
78
79
|
for (int i = 0; i < size_ / 2; ++i) {
|
|
79
|
-
|
|
80
|
-
|
|
80
|
+
realData[i] = out[i].real();
|
|
81
|
+
imaginaryData[i] = out[i].imag();
|
|
81
82
|
}
|
|
82
83
|
|
|
83
|
-
VectorMath::multiplyByScalar(
|
|
84
|
-
VectorMath::multiplyByScalar(
|
|
84
|
+
VectorMath::multiplyByScalar(realData, 0.5f, realData, size_ / 2);
|
|
85
|
+
VectorMath::multiplyByScalar(imaginaryData, 0.5f, imaginaryData, size_ / 2);
|
|
85
86
|
}
|
|
86
87
|
|
|
87
|
-
void FFTFrame::doInverseFFT(
|
|
88
|
+
void FFTFrame::doInverseFFT(
|
|
89
|
+
float *data,
|
|
90
|
+
float *realData,
|
|
91
|
+
float *imaginaryData) {
|
|
92
|
+
std::vector<std::complex<float>> out(size_ / 2);
|
|
88
93
|
for (int i = 0; i < size_ / 2; i++) {
|
|
89
|
-
|
|
90
|
-
frame_[i][1] = imaginaryData_[i];
|
|
94
|
+
out[i] = {realData[i], imaginaryData[i]};
|
|
91
95
|
}
|
|
92
96
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
97
|
+
pffft_transform_ordered(
|
|
98
|
+
pffftSetup_,
|
|
99
|
+
reinterpret_cast<float *>(&out[0]),
|
|
100
|
+
data,
|
|
101
|
+
work_,
|
|
102
|
+
PFFFT_BACKWARD);
|
|
96
103
|
|
|
97
104
|
VectorMath::multiplyByScalar(
|
|
98
105
|
data, 1.0f / static_cast<float>(size_), data, size_);
|
|
@@ -40,7 +40,7 @@
|
|
|
40
40
|
#endif
|
|
41
41
|
|
|
42
42
|
#if defined(ANDROID)
|
|
43
|
-
#include <
|
|
43
|
+
#include <pffft.h>
|
|
44
44
|
#endif
|
|
45
45
|
|
|
46
46
|
namespace audioapi {
|
|
@@ -50,22 +50,12 @@ class FFTFrame {
|
|
|
50
50
|
explicit FFTFrame(int size);
|
|
51
51
|
~FFTFrame();
|
|
52
52
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
}
|
|
56
|
-
[[nodiscard]] float *getImaginaryData() const {
|
|
57
|
-
return imaginaryData_;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
void doFFT(float *data);
|
|
61
|
-
|
|
62
|
-
void doInverseFFT(float *data);
|
|
53
|
+
void doFFT(float *data, float *realData, float *imaginaryData);
|
|
54
|
+
void doInverseFFT(float *data, float *realData, float *imaginaryData);
|
|
63
55
|
|
|
64
56
|
private:
|
|
65
57
|
int size_;
|
|
66
58
|
int log2Size_;
|
|
67
|
-
float *realData_;
|
|
68
|
-
float *imaginaryData_;
|
|
69
59
|
|
|
70
60
|
#if defined(HAVE_ACCELERATE)
|
|
71
61
|
FFTSetup fftSetup_;
|
|
@@ -75,7 +65,8 @@ class FFTFrame {
|
|
|
75
65
|
#endif
|
|
76
66
|
|
|
77
67
|
#if defined(ANDROID)
|
|
78
|
-
|
|
68
|
+
PFFFT_Setup *pffftSetup_;
|
|
69
|
+
float *work_;
|
|
79
70
|
#endif
|
|
80
71
|
};
|
|
81
72
|
|
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
|
|
3
3
|
import BaseAudioContext from "./BaseAudioContext.js";
|
|
4
|
+
import { NotSupportedError } from "../errors/index.js";
|
|
4
5
|
export default class AudioContext extends BaseAudioContext {
|
|
5
|
-
constructor(
|
|
6
|
-
|
|
6
|
+
constructor(options) {
|
|
7
|
+
if (options && (options.sampleRate < 8000 || options.sampleRate > 96000)) {
|
|
8
|
+
throw new NotSupportedError(`The provided sampleRate is not supported: ${options.sampleRate}`);
|
|
9
|
+
}
|
|
10
|
+
super(global.__AudioAPIInstaller.createAudioContext(options?.sampleRate));
|
|
7
11
|
}
|
|
8
|
-
close() {
|
|
9
|
-
this.context.close();
|
|
12
|
+
async close() {
|
|
13
|
+
await this.context.close();
|
|
10
14
|
}
|
|
11
15
|
}
|
|
12
16
|
//# sourceMappingURL=AudioContext.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"names":["BaseAudioContext","AudioContext","constructor","sampleRate","global","__AudioAPIInstaller","createAudioContext","close","context"],"sourceRoot":"../../../src","sources":["core/AudioContext.ts"],"mappings":";;AACA,OAAOA,gBAAgB,MAAM,uBAAoB;AAEjD,eAAe,MAAMC,YAAY,
|
|
1
|
+
{"version":3,"names":["BaseAudioContext","NotSupportedError","AudioContext","constructor","options","sampleRate","global","__AudioAPIInstaller","createAudioContext","close","context"],"sourceRoot":"../../../src","sources":["core/AudioContext.ts"],"mappings":";;AACA,OAAOA,gBAAgB,MAAM,uBAAoB;AAEjD,SAASC,iBAAiB,QAAQ,oBAAW;AAE7C,eAAe,MAAMC,YAAY,SAASF,gBAAgB,CAAC;EACzDG,WAAWA,CAACC,OAA6B,EAAE;IACzC,IAAIA,OAAO,KAAKA,OAAO,CAACC,UAAU,GAAG,IAAI,IAAID,OAAO,CAACC,UAAU,GAAG,KAAK,CAAC,EAAE;MACxE,MAAM,IAAIJ,iBAAiB,CACzB,6CAA6CG,OAAO,CAACC,UAAU,EACjE,CAAC;IACH;IAEA,KAAK,CAACC,MAAM,CAACC,mBAAmB,CAACC,kBAAkB,CAACJ,OAAO,EAAEC,UAAU,CAAC,CAAC;EAC3E;EAEA,MAAMI,KAAKA,CAAA,EAAuB;IAChC,MAAO,IAAI,CAACC,OAAO,CAAmBD,KAAK,CAAC,CAAC;EAC/C;AACF","ignoreList":[]}
|
|
@@ -16,6 +16,7 @@ export default class AudioNode {
|
|
|
16
16
|
throw new InvalidAccessError('The AudioNodes are from different BaseAudioContexts');
|
|
17
17
|
}
|
|
18
18
|
this.node.connect(destination.node);
|
|
19
|
+
return destination;
|
|
19
20
|
}
|
|
20
21
|
disconnect(destination) {
|
|
21
22
|
this.node.disconnect(destination?.node);
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"names":["InvalidAccessError","AudioNode","constructor","context","node","numberOfInputs","numberOfOutputs","channelCount","channelCountMode","channelInterpretation","connect","destination","disconnect"],"sourceRoot":"../../../src","sources":["core/AudioNode.ts"],"mappings":";;AAGA,SAASA,kBAAkB,QAAQ,oBAAW;AAE9C,eAAe,MAAMC,SAAS,CAAC;EAS7BC,WAAWA,CAACC,OAAyB,EAAEC,IAAgB,EAAE;IACvD,IAAI,CAACD,OAAO,GAAGA,OAAO;IACtB,IAAI,CAACC,IAAI,GAAGA,IAAI;IAChB,IAAI,CAACC,cAAc,GAAG,IAAI,CAACD,IAAI,CAACC,cAAc;IAC9C,IAAI,CAACC,eAAe,GAAG,IAAI,CAACF,IAAI,CAACE,eAAe;IAChD,IAAI,CAACC,YAAY,GAAG,IAAI,CAACH,IAAI,CAACG,YAAY;IAC1C,IAAI,CAACC,gBAAgB,GAAG,IAAI,CAACJ,IAAI,CAACI,gBAAgB;IAClD,IAAI,CAACC,qBAAqB,GAAG,IAAI,CAACL,IAAI,CAACK,qBAAqB;EAC9D;EAEOC,OAAOA,CAACC,WAAsB,
|
|
1
|
+
{"version":3,"names":["InvalidAccessError","AudioNode","constructor","context","node","numberOfInputs","numberOfOutputs","channelCount","channelCountMode","channelInterpretation","connect","destination","disconnect"],"sourceRoot":"../../../src","sources":["core/AudioNode.ts"],"mappings":";;AAGA,SAASA,kBAAkB,QAAQ,oBAAW;AAE9C,eAAe,MAAMC,SAAS,CAAC;EAS7BC,WAAWA,CAACC,OAAyB,EAAEC,IAAgB,EAAE;IACvD,IAAI,CAACD,OAAO,GAAGA,OAAO;IACtB,IAAI,CAACC,IAAI,GAAGA,IAAI;IAChB,IAAI,CAACC,cAAc,GAAG,IAAI,CAACD,IAAI,CAACC,cAAc;IAC9C,IAAI,CAACC,eAAe,GAAG,IAAI,CAACF,IAAI,CAACE,eAAe;IAChD,IAAI,CAACC,YAAY,GAAG,IAAI,CAACH,IAAI,CAACG,YAAY;IAC1C,IAAI,CAACC,gBAAgB,GAAG,IAAI,CAACJ,IAAI,CAACI,gBAAgB;IAClD,IAAI,CAACC,qBAAqB,GAAG,IAAI,CAACL,IAAI,CAACK,qBAAqB;EAC9D;EAEOC,OAAOA,CAACC,WAAsB,EAAa;IAChD,IAAI,IAAI,CAACR,OAAO,KAAKQ,WAAW,CAACR,OAAO,EAAE;MACxC,MAAM,IAAIH,kBAAkB,CAC1B,qDACF,CAAC;IACH;IAEA,IAAI,CAACI,IAAI,CAACM,OAAO,CAACC,WAAW,CAACP,IAAI,CAAC;IAEnC,OAAOO,WAAW;EACpB;EAEOC,UAAUA,CAACD,WAAuB,EAAQ;IAC/C,IAAI,CAACP,IAAI,CAACQ,UAAU,CAACD,WAAW,EAAEP,IAAI,CAAC;EACzC;AACF","ignoreList":[]}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
|
|
3
|
-
import { RangeError } from "../errors/index.js";
|
|
3
|
+
import { RangeError, InvalidStateError } from "../errors/index.js";
|
|
4
4
|
export default class AudioParam {
|
|
5
5
|
constructor(audioParam) {
|
|
6
6
|
this.audioParam = audioParam;
|
|
@@ -17,45 +17,61 @@ export default class AudioParam {
|
|
|
17
17
|
}
|
|
18
18
|
setValueAtTime(value, startTime) {
|
|
19
19
|
if (startTime < 0) {
|
|
20
|
-
throw new RangeError(`
|
|
20
|
+
throw new RangeError(`startTime must be a finite non-negative number: ${startTime}`);
|
|
21
21
|
}
|
|
22
22
|
this.audioParam.setValueAtTime(value, startTime);
|
|
23
|
+
return this;
|
|
23
24
|
}
|
|
24
25
|
linearRampToValueAtTime(value, endTime) {
|
|
25
26
|
if (endTime < 0) {
|
|
26
|
-
throw new RangeError(`
|
|
27
|
+
throw new RangeError(`endTime must be a finite non-negative number: ${endTime}`);
|
|
27
28
|
}
|
|
28
29
|
this.audioParam.linearRampToValueAtTime(value, endTime);
|
|
30
|
+
return this;
|
|
29
31
|
}
|
|
30
32
|
exponentialRampToValueAtTime(value, endTime) {
|
|
31
33
|
if (endTime < 0) {
|
|
32
|
-
throw new RangeError(`
|
|
34
|
+
throw new RangeError(`endTime must be a finite non-negative number: ${endTime}`);
|
|
33
35
|
}
|
|
34
36
|
this.audioParam.exponentialRampToValueAtTime(value, endTime);
|
|
37
|
+
return this;
|
|
35
38
|
}
|
|
36
39
|
setTargetAtTime(target, startTime, timeConstant) {
|
|
37
40
|
if (startTime < 0) {
|
|
38
|
-
throw new RangeError(`
|
|
41
|
+
throw new RangeError(`startTime must be a finite non-negative number: ${startTime}`);
|
|
42
|
+
}
|
|
43
|
+
if (timeConstant < 0) {
|
|
44
|
+
throw new RangeError(`timeConstant must be a finite non-negative number: ${startTime}`);
|
|
39
45
|
}
|
|
40
46
|
this.audioParam.setTargetAtTime(target, startTime, timeConstant);
|
|
47
|
+
return this;
|
|
41
48
|
}
|
|
42
49
|
setValueCurveAtTime(values, startTime, duration) {
|
|
43
50
|
if (startTime < 0) {
|
|
44
|
-
throw new RangeError(`
|
|
51
|
+
throw new RangeError(`startTime must be a finite non-negative number: ${startTime}`);
|
|
52
|
+
}
|
|
53
|
+
if (duration < 0) {
|
|
54
|
+
throw new RangeError(`duration must be a finite non-negative number: ${startTime}`);
|
|
55
|
+
}
|
|
56
|
+
if (values.length < 2) {
|
|
57
|
+
throw new InvalidStateError(`values must contain at least two values`);
|
|
45
58
|
}
|
|
46
59
|
this.audioParam.setValueCurveAtTime(values, startTime, duration);
|
|
60
|
+
return this;
|
|
47
61
|
}
|
|
48
62
|
cancelScheduledValues(cancelTime) {
|
|
49
63
|
if (cancelTime < 0) {
|
|
50
|
-
throw new RangeError(`
|
|
64
|
+
throw new RangeError(`cancelTime must be a finite non-negative number: ${cancelTime}`);
|
|
51
65
|
}
|
|
52
66
|
this.audioParam.cancelScheduledValues(cancelTime);
|
|
67
|
+
return this;
|
|
53
68
|
}
|
|
54
69
|
cancelAndHoldAtTime(cancelTime) {
|
|
55
70
|
if (cancelTime < 0) {
|
|
56
|
-
throw new RangeError(`
|
|
71
|
+
throw new RangeError(`cancelTime must be a finite non-negative number: ${cancelTime}`);
|
|
57
72
|
}
|
|
58
73
|
this.audioParam.cancelAndHoldAtTime(cancelTime);
|
|
74
|
+
return this;
|
|
59
75
|
}
|
|
60
76
|
}
|
|
61
77
|
//# sourceMappingURL=AudioParam.js.map
|