react-native-audio-api 0.4.8-rc2 → 0.4.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/build.gradle +25 -2
- package/android/src/main/cpp/core/AudioDecoder.cpp +10 -1
- package/android/src/main/cpp/core/AudioPlayer.cpp +6 -3
- package/common/cpp/core/AnalyserNode.cpp +2 -6
- package/common/cpp/core/AudioBuffer.cpp +1 -1
- package/common/cpp/core/AudioBufferSourceNode.cpp +26 -16
- package/common/cpp/core/AudioBus.cpp +105 -13
- package/common/cpp/core/AudioBus.h +6 -4
- package/common/cpp/core/AudioContext.cpp +4 -3
- package/common/cpp/core/AudioContext.h +4 -4
- package/common/cpp/core/AudioDestinationNode.cpp +2 -3
- package/common/cpp/core/AudioNode.cpp +78 -58
- package/common/cpp/core/AudioNode.h +10 -1
- package/common/cpp/core/AudioNodeManager.cpp +13 -1
- package/common/cpp/core/AudioNodeManager.h +2 -0
- package/common/cpp/core/AudioScheduledSourceNode.cpp +5 -1
- package/common/cpp/core/BaseAudioContext.cpp +4 -1
- package/common/cpp/core/BaseAudioContext.h +4 -2
- package/common/cpp/core/StereoPannerNode.cpp +9 -12
- package/ios/core/AudioDecoder.mm +10 -1
- package/ios/core/AudioPlayer.m +23 -23
- package/ios/core/IOSAudioPlayer.mm +3 -3
- package/lib/module/core/AudioBufferSourceNode.js +2 -2
- package/lib/module/core/AudioBufferSourceNode.js.map +1 -1
- package/lib/module/index.js +19 -335
- package/lib/module/index.js.map +1 -1
- package/lib/module/index.web.js +18 -0
- package/lib/module/index.web.js.map +1 -0
- package/lib/module/types.js.map +1 -0
- package/lib/module/web-core/AnalyserNode.js +48 -0
- package/lib/module/web-core/AnalyserNode.js.map +1 -0
- package/lib/module/web-core/AudioBuffer.js +43 -0
- package/lib/module/web-core/AudioBuffer.js.map +1 -0
- package/lib/module/web-core/AudioBufferSourceNode.js +62 -0
- package/lib/module/web-core/AudioBufferSourceNode.js.map +1 -0
- package/lib/module/web-core/AudioContext.js +69 -0
- package/lib/module/web-core/AudioContext.js.map +1 -0
- package/lib/module/web-core/AudioDestinationNode.js +5 -0
- package/lib/module/web-core/AudioDestinationNode.js.map +1 -0
- package/lib/module/web-core/AudioNode.js +27 -0
- package/lib/module/web-core/AudioNode.js.map +1 -0
- package/lib/module/web-core/AudioParam.js +60 -0
- package/lib/module/web-core/AudioParam.js.map +1 -0
- package/lib/module/web-core/AudioScheduledSourceNode.js +27 -0
- package/lib/module/web-core/AudioScheduledSourceNode.js.map +1 -0
- package/lib/module/web-core/BaseAudioContext.js +2 -0
- package/lib/module/{core/types.js.map → web-core/BaseAudioContext.js.map} +1 -1
- package/lib/module/web-core/BiquadFilterNode.js +35 -0
- package/lib/module/web-core/BiquadFilterNode.js.map +1 -0
- package/lib/module/web-core/GainNode.js +11 -0
- package/lib/module/web-core/GainNode.js.map +1 -0
- package/lib/module/web-core/OscillatorNode.js +25 -0
- package/lib/module/web-core/OscillatorNode.js.map +1 -0
- package/lib/module/web-core/PeriodicWave.js +10 -0
- package/lib/module/web-core/PeriodicWave.js.map +1 -0
- package/lib/module/web-core/StereoPannerNode.js +11 -0
- package/lib/module/web-core/StereoPannerNode.js.map +1 -0
- package/lib/typescript/core/AnalyserNode.d.ts +1 -1
- package/lib/typescript/core/AnalyserNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioNode.d.ts +1 -1
- package/lib/typescript/core/AudioNode.d.ts.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/core/BiquadFilterNode.d.ts +1 -1
- package/lib/typescript/core/BiquadFilterNode.d.ts.map +1 -1
- package/lib/typescript/core/OscillatorNode.d.ts +1 -1
- package/lib/typescript/core/OscillatorNode.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +15 -126
- package/lib/typescript/index.d.ts.map +1 -1
- package/lib/typescript/index.web.d.ts +16 -0
- package/lib/typescript/index.web.d.ts.map +1 -0
- package/lib/typescript/interfaces.d.ts +1 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts.map +1 -0
- package/lib/typescript/web-core/AnalyserNode.d.ts +18 -0
- package/lib/typescript/web-core/AnalyserNode.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioBuffer.d.ts +13 -0
- package/lib/typescript/web-core/AudioBuffer.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioBufferSourceNode.d.ts +19 -0
- package/lib/typescript/web-core/AudioBufferSourceNode.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioContext.d.ts +30 -0
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioDestinationNode.d.ts +4 -0
- package/lib/typescript/web-core/AudioDestinationNode.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioNode.d.ts +15 -0
- package/lib/typescript/web-core/AudioNode.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioParam.d.ts +17 -0
- package/lib/typescript/web-core/AudioParam.d.ts.map +1 -0
- package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts +7 -0
- package/lib/typescript/web-core/AudioScheduledSourceNode.d.ts.map +1 -0
- package/lib/typescript/web-core/BaseAudioContext.d.ts +27 -0
- package/lib/typescript/web-core/BaseAudioContext.d.ts.map +1 -0
- package/lib/typescript/web-core/BiquadFilterNode.d.ts +15 -0
- package/lib/typescript/web-core/BiquadFilterNode.d.ts.map +1 -0
- package/lib/typescript/web-core/GainNode.d.ts +8 -0
- package/lib/typescript/web-core/GainNode.d.ts.map +1 -0
- package/lib/typescript/web-core/OscillatorNode.d.ts +14 -0
- package/lib/typescript/web-core/OscillatorNode.d.ts.map +1 -0
- package/lib/typescript/web-core/PeriodicWave.d.ts +6 -0
- package/lib/typescript/web-core/PeriodicWave.d.ts.map +1 -0
- package/lib/typescript/web-core/StereoPannerNode.d.ts +8 -0
- package/lib/typescript/web-core/StereoPannerNode.d.ts.map +1 -0
- package/package.json +1 -1
- package/src/core/AnalyserNode.ts +1 -1
- package/src/core/AudioBufferSourceNode.ts +2 -2
- package/src/core/AudioNode.ts +1 -1
- package/src/core/BaseAudioContext.ts +1 -1
- package/src/core/BiquadFilterNode.ts +1 -1
- package/src/core/OscillatorNode.ts +1 -1
- package/src/index.ts +30 -568
- package/src/index.web.ts +30 -0
- package/src/interfaces.ts +1 -1
- package/src/web-core/AnalyserNode.tsx +69 -0
- package/src/web-core/AudioBuffer.tsx +79 -0
- package/src/web-core/AudioBufferSourceNode.tsx +94 -0
- package/src/web-core/AudioContext.tsx +114 -0
- package/src/web-core/AudioDestinationNode.tsx +3 -0
- package/src/web-core/AudioNode.tsx +40 -0
- package/src/web-core/AudioParam.tsx +106 -0
- package/src/web-core/AudioScheduledSourceNode.tsx +37 -0
- package/src/web-core/BaseAudioContext.tsx +37 -0
- package/src/web-core/BiquadFilterNode.tsx +62 -0
- package/src/web-core/GainNode.tsx +12 -0
- package/src/web-core/OscillatorNode.tsx +36 -0
- package/src/web-core/PeriodicWave.tsx +8 -0
- package/src/web-core/StereoPannerNode.tsx +12 -0
- package/lib/module/index.native.js +0 -21
- package/lib/module/index.native.js.map +0 -1
- package/lib/typescript/core/types.d.ts.map +0 -1
- package/lib/typescript/index.native.d.ts +0 -15
- package/lib/typescript/index.native.d.ts.map +0 -1
- package/src/index.native.ts +0 -27
- /package/lib/module/{core/types.js → types.js} +0 -0
- /package/lib/typescript/{core/types.d.ts → types.d.ts} +0 -0
- /package/src/{core/types.ts → types.ts} +0 -0
package/android/build.gradle
CHANGED
|
@@ -118,13 +118,36 @@ android {
|
|
|
118
118
|
"-DREACT_NATIVE_MINOR_VERSION=${REACT_NATIVE_MINOR_VERSION}",
|
|
119
119
|
"-DANDROID_TOOLCHAIN=clang",
|
|
120
120
|
"-DREACT_NATIVE_DIR=${toPlatformFileString(reactNativeRootDir.path)}",
|
|
121
|
-
"-DIS_NEW_ARCHITECTURE_ENABLED=${IS_NEW_ARCHITECTURE_ENABLED}"
|
|
121
|
+
"-DIS_NEW_ARCHITECTURE_ENABLED=${IS_NEW_ARCHITECTURE_ENABLED}",
|
|
122
|
+
"-DANDROID_SUPPORT_FLEXIBLE_PAGE_SIZES=ON"
|
|
122
123
|
}
|
|
123
124
|
}
|
|
124
125
|
}
|
|
125
126
|
|
|
126
127
|
packagingOptions {
|
|
127
|
-
excludes = [
|
|
128
|
+
excludes = [
|
|
129
|
+
"META-INF",
|
|
130
|
+
"META-INF/**",
|
|
131
|
+
"**/libc++_shared.so",
|
|
132
|
+
"**/libfbjni.so",
|
|
133
|
+
"**/libjsi.so",
|
|
134
|
+
"**/libfolly_json.so",
|
|
135
|
+
"**/libfolly_runtime.so",
|
|
136
|
+
"**/libglog.so",
|
|
137
|
+
"**/libhermes.so",
|
|
138
|
+
"**/libhermes-executor-debug.so",
|
|
139
|
+
"**/libhermes_executor.so",
|
|
140
|
+
"**/libhermestooling.so",
|
|
141
|
+
"**/libreactnativejni.so",
|
|
142
|
+
"**/libturbomodulejsijni.so",
|
|
143
|
+
"**/libreactnative.so",
|
|
144
|
+
"**/libreact_nativemodule_core.so",
|
|
145
|
+
"**/libreact_render*.so",
|
|
146
|
+
"**/librrc_root.so",
|
|
147
|
+
"**/libjscexecutor.so",
|
|
148
|
+
"**/libv8executor.so",
|
|
149
|
+
"**/libreanimated.so"
|
|
150
|
+
]
|
|
128
151
|
}
|
|
129
152
|
|
|
130
153
|
externalNativeBuild {
|
|
@@ -20,6 +20,9 @@ AudioBus *AudioDecoder::decodeWithFilePath(const std::string &path) const {
|
|
|
20
20
|
"AudioDecoder",
|
|
21
21
|
"Failed to initialize decoder for file: %s",
|
|
22
22
|
path.c_str());
|
|
23
|
+
|
|
24
|
+
ma_decoder_uninit(&decoder);
|
|
25
|
+
|
|
23
26
|
return nullptr;
|
|
24
27
|
}
|
|
25
28
|
|
|
@@ -27,7 +30,7 @@ AudioBus *AudioDecoder::decodeWithFilePath(const std::string &path) const {
|
|
|
27
30
|
ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
|
|
28
31
|
|
|
29
32
|
auto *audioBus =
|
|
30
|
-
new AudioBus(
|
|
33
|
+
new AudioBus(static_cast<int>(totalFrameCount), 2, sampleRate_);
|
|
31
34
|
auto *buffer = new float[totalFrameCount * 2];
|
|
32
35
|
|
|
33
36
|
ma_uint64 framesDecoded;
|
|
@@ -38,6 +41,12 @@ AudioBus *AudioDecoder::decodeWithFilePath(const std::string &path) const {
|
|
|
38
41
|
"AudioDecoder",
|
|
39
42
|
"Failed to decode audio file: %s",
|
|
40
43
|
path.c_str());
|
|
44
|
+
|
|
45
|
+
delete[] buffer;
|
|
46
|
+
delete audioBus;
|
|
47
|
+
ma_decoder_uninit(&decoder);
|
|
48
|
+
|
|
49
|
+
return nullptr;
|
|
41
50
|
}
|
|
42
51
|
|
|
43
52
|
for (int i = 0; i < decoder.outputChannels; ++i) {
|
|
@@ -1,8 +1,9 @@
|
|
|
1
|
+
#include <cassert>
|
|
1
2
|
|
|
2
|
-
#include "AudioPlayer.h"
|
|
3
3
|
#include "AudioArray.h"
|
|
4
4
|
#include "AudioBus.h"
|
|
5
5
|
#include "AudioContext.h"
|
|
6
|
+
#include "AudioPlayer.h"
|
|
6
7
|
#include "Constants.h"
|
|
7
8
|
|
|
8
9
|
namespace audioapi {
|
|
@@ -23,7 +24,7 @@ AudioPlayer::AudioPlayer(
|
|
|
23
24
|
|
|
24
25
|
sampleRate_ = static_cast<float>(mStream_->getSampleRate());
|
|
25
26
|
mBus_ = std::make_shared<AudioBus>(
|
|
26
|
-
|
|
27
|
+
RENDER_QUANTUM_SIZE, CHANNEL_COUNT, sampleRate_);
|
|
27
28
|
isInitialized_ = true;
|
|
28
29
|
}
|
|
29
30
|
|
|
@@ -45,7 +46,7 @@ AudioPlayer::AudioPlayer(
|
|
|
45
46
|
|
|
46
47
|
sampleRate_ = sampleRate;
|
|
47
48
|
mBus_ = std::make_shared<AudioBus>(
|
|
48
|
-
|
|
49
|
+
RENDER_QUANTUM_SIZE, CHANNEL_COUNT, sampleRate_);
|
|
49
50
|
isInitialized_ = true;
|
|
50
51
|
}
|
|
51
52
|
|
|
@@ -80,6 +81,8 @@ DataCallbackResult AudioPlayer::onAudioReady(
|
|
|
80
81
|
auto buffer = static_cast<float *>(audioData);
|
|
81
82
|
int processedFrames = 0;
|
|
82
83
|
|
|
84
|
+
assert(buffer != nullptr);
|
|
85
|
+
|
|
83
86
|
while (processedFrames < numFrames) {
|
|
84
87
|
int framesToProcess =
|
|
85
88
|
std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE);
|
|
@@ -21,7 +21,7 @@ AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
|
|
|
21
21
|
inputBuffer_ = std::make_unique<AudioArray>(MAX_FFT_SIZE * 2);
|
|
22
22
|
magnitudeBuffer_ = std::make_unique<AudioArray>(fftSize_ / 2);
|
|
23
23
|
downMixBus_ = std::make_unique<AudioBus>(
|
|
24
|
-
context_->getSampleRate()
|
|
24
|
+
RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
|
|
25
25
|
|
|
26
26
|
fftFrame_ = std::make_unique<FFTFrame>(fftSize_);
|
|
27
27
|
|
|
@@ -145,14 +145,10 @@ void AnalyserNode::getByteTimeDomainData(uint8_t *data, int length) {
|
|
|
145
145
|
void AnalyserNode::processNode(
|
|
146
146
|
audioapi::AudioBus *processingBus,
|
|
147
147
|
int framesToProcess) {
|
|
148
|
-
if (!isInitialized_) {
|
|
149
|
-
processingBus->zero();
|
|
150
|
-
return;
|
|
151
|
-
}
|
|
152
|
-
|
|
153
148
|
// Analyser should behave like a sniffer node, it should not modify the
|
|
154
149
|
// processingBus but instead copy the data to its own input buffer.
|
|
155
150
|
|
|
151
|
+
// Down mix the input bus to mono
|
|
156
152
|
downMixBus_->copy(processingBus);
|
|
157
153
|
|
|
158
154
|
if (vWriteIndex_ + framesToProcess > inputBuffer_->getSize()) {
|
|
@@ -8,7 +8,7 @@ AudioBuffer::AudioBuffer(
|
|
|
8
8
|
int numberOfChannels,
|
|
9
9
|
size_t length,
|
|
10
10
|
float sampleRate) {
|
|
11
|
-
bus_ = std::make_shared<AudioBus>(
|
|
11
|
+
bus_ = std::make_shared<AudioBus>(length, numberOfChannels, sampleRate);
|
|
12
12
|
}
|
|
13
13
|
|
|
14
14
|
AudioBuffer::AudioBuffer(AudioBus *bus) {
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
#include <algorithm>
|
|
2
|
+
#include <cassert>
|
|
2
3
|
|
|
3
4
|
#include "AudioArray.h"
|
|
4
5
|
#include "AudioBufferSourceNode.h"
|
|
@@ -17,7 +18,8 @@ AudioBufferSourceNode::AudioBufferSourceNode(BaseAudioContext *context)
|
|
|
17
18
|
loopEnd_(0),
|
|
18
19
|
vReadIndex_(0.0) {
|
|
19
20
|
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
|
|
20
|
-
alignedBus_ = std::
|
|
21
|
+
alignedBus_ = std::make_shared<AudioBus>(
|
|
22
|
+
RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
|
|
21
23
|
|
|
22
24
|
detuneParam_ = std::make_shared<AudioParam>(0.0, MIN_DETUNE, MAX_DETUNE);
|
|
23
25
|
playbackRateParam_ = std::make_shared<AudioParam>(
|
|
@@ -67,26 +69,33 @@ void AudioBufferSourceNode::setBuffer(
|
|
|
67
69
|
const std::shared_ptr<AudioBuffer> &buffer) {
|
|
68
70
|
if (!buffer) {
|
|
69
71
|
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
|
|
70
|
-
alignedBus_ = std::
|
|
72
|
+
alignedBus_ = std::make_shared<AudioBus>(
|
|
73
|
+
RENDER_QUANTUM_SIZE, 1, context_->getSampleRate());
|
|
71
74
|
loopEnd_ = 0;
|
|
72
75
|
return;
|
|
73
76
|
}
|
|
74
77
|
|
|
75
78
|
buffer_ = buffer;
|
|
76
|
-
|
|
77
|
-
context_->getSampleRate(),
|
|
78
|
-
buffer_->getLength(),
|
|
79
|
-
buffer_->getNumberOfChannels());
|
|
79
|
+
channelCount_ = buffer_->getNumberOfChannels();
|
|
80
80
|
|
|
81
|
+
alignedBus_ = std::make_shared<AudioBus>(
|
|
82
|
+
buffer_->getLength(), channelCount_, context_->getSampleRate());
|
|
81
83
|
alignedBus_->zero();
|
|
82
84
|
alignedBus_->sum(buffer_->bus_.get());
|
|
83
85
|
|
|
86
|
+
audioBus_ = std::make_shared<AudioBus>(
|
|
87
|
+
RENDER_QUANTUM_SIZE, channelCount_, context_->getSampleRate());
|
|
88
|
+
|
|
84
89
|
loopEnd_ = buffer_->getDuration();
|
|
85
90
|
}
|
|
86
91
|
|
|
87
92
|
void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
88
93
|
AudioScheduledSourceNode::start(when);
|
|
89
94
|
|
|
95
|
+
if (duration > 0) {
|
|
96
|
+
AudioScheduledSourceNode::stop(when + duration);
|
|
97
|
+
}
|
|
98
|
+
|
|
90
99
|
if (!buffer_) {
|
|
91
100
|
return;
|
|
92
101
|
}
|
|
@@ -98,29 +107,30 @@ void AudioBufferSourceNode::start(double when, double offset, double duration) {
|
|
|
98
107
|
}
|
|
99
108
|
|
|
100
109
|
vReadIndex_ = static_cast<double>(buffer_->getSampleRate() * offset);
|
|
101
|
-
|
|
102
|
-
if (duration > 0) {
|
|
103
|
-
AudioScheduledSourceNode::stop(when + duration);
|
|
104
|
-
}
|
|
105
110
|
}
|
|
106
111
|
|
|
107
112
|
void AudioBufferSourceNode::processNode(
|
|
108
113
|
AudioBus *processingBus,
|
|
109
114
|
int framesToProcess) {
|
|
115
|
+
// No audio data to fill, zero the output and return.
|
|
116
|
+
if (!buffer_) {
|
|
117
|
+
processingBus->zero();
|
|
118
|
+
return;
|
|
119
|
+
}
|
|
120
|
+
|
|
110
121
|
size_t startOffset = 0;
|
|
111
122
|
size_t offsetLength = 0;
|
|
112
123
|
|
|
113
124
|
updatePlaybackInfo(processingBus, framesToProcess, startOffset, offsetLength);
|
|
114
125
|
float playbackRate = getPlaybackRateValue(startOffset);
|
|
115
126
|
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
127
|
+
assert(alignedBus_ != nullptr);
|
|
128
|
+
assert(alignedBus_->getSize() > 0);
|
|
129
|
+
|
|
130
|
+
if (playbackRate == 0.0f || !isPlaying()) {
|
|
119
131
|
processingBus->zero();
|
|
120
132
|
return;
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
if (std::fabs(playbackRate) == 1.0) {
|
|
133
|
+
} else if (std::fabs(playbackRate) == 1.0) {
|
|
124
134
|
processWithoutInterpolation(
|
|
125
135
|
processingBus, startOffset, offsetLength, playbackRate);
|
|
126
136
|
} else {
|
|
@@ -18,7 +18,7 @@ namespace audioapi {
|
|
|
18
18
|
* Public interfaces - memory management
|
|
19
19
|
*/
|
|
20
20
|
|
|
21
|
-
AudioBus::AudioBus(
|
|
21
|
+
AudioBus::AudioBus(size_t size, int numberOfChannels, float sampleRate)
|
|
22
22
|
: numberOfChannels_(numberOfChannels),
|
|
23
23
|
sampleRate_(sampleRate),
|
|
24
24
|
size_(size) {
|
|
@@ -161,19 +161,26 @@ float AudioBus::maxAbsValue() const {
|
|
|
161
161
|
return maxAbsValue;
|
|
162
162
|
}
|
|
163
163
|
|
|
164
|
-
void AudioBus::sum(
|
|
165
|
-
|
|
164
|
+
void AudioBus::sum(
|
|
165
|
+
const AudioBus *source,
|
|
166
|
+
ChannelInterpretation interpretation) {
|
|
167
|
+
sum(source, 0, 0, getSize(), interpretation);
|
|
166
168
|
}
|
|
167
169
|
|
|
168
|
-
void AudioBus::sum(
|
|
169
|
-
|
|
170
|
+
void AudioBus::sum(
|
|
171
|
+
const AudioBus *source,
|
|
172
|
+
size_t start,
|
|
173
|
+
size_t length,
|
|
174
|
+
ChannelInterpretation interpretation) {
|
|
175
|
+
sum(source, start, start, length, interpretation);
|
|
170
176
|
}
|
|
171
177
|
|
|
172
178
|
void AudioBus::sum(
|
|
173
179
|
const AudioBus *source,
|
|
174
180
|
size_t sourceStart,
|
|
175
181
|
size_t destinationStart,
|
|
176
|
-
size_t length
|
|
182
|
+
size_t length,
|
|
183
|
+
ChannelInterpretation interpretation) {
|
|
177
184
|
if (source == this) {
|
|
178
185
|
return;
|
|
179
186
|
}
|
|
@@ -181,9 +188,12 @@ void AudioBus::sum(
|
|
|
181
188
|
int numberOfSourceChannels = source->getNumberOfChannels();
|
|
182
189
|
int numberOfChannels = getNumberOfChannels();
|
|
183
190
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
191
|
+
if (interpretation == ChannelInterpretation::DISCRETE) {
|
|
192
|
+
discreteSum(source, sourceStart, destinationStart, length);
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Source channel count is smaller than current bus, we need to up-mix.
|
|
187
197
|
if (numberOfSourceChannels < numberOfChannels) {
|
|
188
198
|
sumByUpMixing(source, sourceStart, destinationStart, length);
|
|
189
199
|
return;
|
|
@@ -373,8 +383,9 @@ void AudioBus::sumByDownMixing(
|
|
|
373
383
|
return;
|
|
374
384
|
}
|
|
375
385
|
|
|
376
|
-
// Stereo 4 to mono
|
|
377
|
-
// input.
|
|
386
|
+
// Stereo 4 to mono (4 -> 1):
|
|
387
|
+
// output += 0.25 * (input.left + input.right + input.surroundLeft +
|
|
388
|
+
// input.surroundRight)
|
|
378
389
|
if (numberOfSourceChannels == 4 && numberOfChannels == 1) {
|
|
379
390
|
float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
|
|
380
391
|
float *sourceRight = source->getChannelByType(ChannelRight)->getData();
|
|
@@ -408,7 +419,88 @@ void AudioBus::sumByDownMixing(
|
|
|
408
419
|
return;
|
|
409
420
|
}
|
|
410
421
|
|
|
411
|
-
// 5.1 to
|
|
422
|
+
// 5.1 to mono (6 -> 1):
|
|
423
|
+
// output += sqrt(1/2) * (input.left + input.right) + input.center + 0.5 *
|
|
424
|
+
// (input.surroundLeft + input.surroundRight)
|
|
425
|
+
if (numberOfSourceChannels == 6 && numberOfChannels == 1) {
|
|
426
|
+
float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
|
|
427
|
+
float *sourceRight = source->getChannelByType(ChannelRight)->getData();
|
|
428
|
+
float *sourceCenter = source->getChannelByType(ChannelCenter)->getData();
|
|
429
|
+
float *sourceSurroundLeft =
|
|
430
|
+
source->getChannelByType(ChannelSurroundLeft)->getData();
|
|
431
|
+
float *sourceSurroundRight =
|
|
432
|
+
source->getChannelByType(ChannelSurroundRight)->getData();
|
|
433
|
+
|
|
434
|
+
float *destinationData = getChannelByType(ChannelMono)->getData();
|
|
435
|
+
|
|
436
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
437
|
+
sourceLeft + sourceStart,
|
|
438
|
+
SQRT_HALF,
|
|
439
|
+
destinationData + destinationStart,
|
|
440
|
+
length);
|
|
441
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
442
|
+
sourceRight + sourceStart,
|
|
443
|
+
SQRT_HALF,
|
|
444
|
+
destinationData + destinationStart,
|
|
445
|
+
length);
|
|
446
|
+
VectorMath::add(
|
|
447
|
+
sourceCenter + sourceStart,
|
|
448
|
+
destinationData + destinationStart,
|
|
449
|
+
destinationData + destinationStart,
|
|
450
|
+
length);
|
|
451
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
452
|
+
sourceSurroundLeft + sourceStart,
|
|
453
|
+
0.5f,
|
|
454
|
+
destinationData + destinationStart,
|
|
455
|
+
length);
|
|
456
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
457
|
+
sourceSurroundRight + sourceStart,
|
|
458
|
+
0.5f,
|
|
459
|
+
destinationData + destinationStart,
|
|
460
|
+
length);
|
|
461
|
+
|
|
462
|
+
return;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
// Stereo 4 to stereo 2 (4 -> 2):
|
|
466
|
+
// output.left += 0.5 * (input.left + input.surroundLeft)
|
|
467
|
+
// output.right += 0.5 * (input.right + input.surroundRight)
|
|
468
|
+
if (numberOfSourceChannels == 4 && numberOfChannels == 2) {
|
|
469
|
+
float *sourceLeft = source->getChannelByType(ChannelLeft)->getData();
|
|
470
|
+
float *sourceRight = source->getChannelByType(ChannelRight)->getData();
|
|
471
|
+
float *sourceSurroundLeft =
|
|
472
|
+
source->getChannelByType(ChannelSurroundLeft)->getData();
|
|
473
|
+
float *sourceSurroundRight =
|
|
474
|
+
source->getChannelByType(ChannelSurroundRight)->getData();
|
|
475
|
+
|
|
476
|
+
float *destinationLeft = getChannelByType(ChannelLeft)->getData();
|
|
477
|
+
float *destinationRight = getChannelByType(ChannelRight)->getData();
|
|
478
|
+
|
|
479
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
480
|
+
sourceLeft + sourceStart,
|
|
481
|
+
0.5f,
|
|
482
|
+
destinationLeft + destinationStart,
|
|
483
|
+
length);
|
|
484
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
485
|
+
sourceSurroundLeft + sourceStart,
|
|
486
|
+
0.5f,
|
|
487
|
+
destinationLeft + destinationStart,
|
|
488
|
+
length);
|
|
489
|
+
|
|
490
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
491
|
+
sourceRight + sourceStart,
|
|
492
|
+
0.5f,
|
|
493
|
+
destinationRight + destinationStart,
|
|
494
|
+
length);
|
|
495
|
+
VectorMath::multiplyByScalarThenAddToOutput(
|
|
496
|
+
sourceSurroundRight + sourceStart,
|
|
497
|
+
0.5f,
|
|
498
|
+
destinationRight + destinationStart,
|
|
499
|
+
length);
|
|
500
|
+
return;
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
// 5.1 to stereo (6 -> 2):
|
|
412
504
|
// output.left += input.left + sqrt(1/2) * (input.center + input.surroundLeft)
|
|
413
505
|
// output.right += input.right + sqrt(1/2) * (input.center +
|
|
414
506
|
// input.surroundRight)
|
|
@@ -458,7 +550,7 @@ void AudioBus::sumByDownMixing(
|
|
|
458
550
|
return;
|
|
459
551
|
}
|
|
460
552
|
|
|
461
|
-
// 5.1 to stereo 4:
|
|
553
|
+
// 5.1 to stereo 4 (6 -> 4):
|
|
462
554
|
// output.left += input.left + sqrt(1/2) * input.center
|
|
463
555
|
// output.right += input.right + sqrt(1/2) * input.center
|
|
464
556
|
// output.surroundLeft += input.surroundLeft
|
|
@@ -5,6 +5,8 @@
|
|
|
5
5
|
#include <vector>
|
|
6
6
|
#include <cstddef>
|
|
7
7
|
|
|
8
|
+
#include "ChannelInterpretation.h"
|
|
9
|
+
|
|
8
10
|
namespace audioapi {
|
|
9
11
|
|
|
10
12
|
class BaseAudioContext;
|
|
@@ -22,7 +24,7 @@ class AudioBus {
|
|
|
22
24
|
ChannelSurroundRight = 5,
|
|
23
25
|
};
|
|
24
26
|
|
|
25
|
-
explicit AudioBus(
|
|
27
|
+
explicit AudioBus(size_t size, int numberOfChannels, float sampleRate);
|
|
26
28
|
|
|
27
29
|
~AudioBus();
|
|
28
30
|
|
|
@@ -39,13 +41,13 @@ class AudioBus {
|
|
|
39
41
|
void zero();
|
|
40
42
|
void zero(size_t start, size_t length);
|
|
41
43
|
|
|
42
|
-
void sum(const AudioBus *source);
|
|
43
|
-
void sum(const AudioBus *source, size_t start, size_t length);
|
|
44
|
+
void sum(const AudioBus *source, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
|
|
45
|
+
void sum(const AudioBus *source, size_t start, size_t length, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
|
|
44
46
|
void sum(
|
|
45
47
|
const AudioBus *source,
|
|
46
48
|
size_t sourceStart,
|
|
47
49
|
size_t destinationStart,
|
|
48
|
-
size_t length);
|
|
50
|
+
size_t length, ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS);
|
|
49
51
|
|
|
50
52
|
void copy(const AudioBus *source);
|
|
51
53
|
void copy(const AudioBus *source, size_t start, size_t length);
|
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
#include "AudioContext.h"
|
|
8
8
|
#include "AudioDecoder.h"
|
|
9
9
|
#include "AudioDestinationNode.h"
|
|
10
|
+
#include "AudioNodeManager.h"
|
|
10
11
|
|
|
11
12
|
namespace audioapi {
|
|
12
13
|
AudioContext::AudioContext() : BaseAudioContext() {
|
|
@@ -35,11 +36,11 @@ AudioContext::AudioContext(float sampleRate) : BaseAudioContext() {
|
|
|
35
36
|
}
|
|
36
37
|
|
|
37
38
|
AudioContext::~AudioContext() {
|
|
38
|
-
if (isClosed()) {
|
|
39
|
-
|
|
39
|
+
if (!isClosed()) {
|
|
40
|
+
close();
|
|
40
41
|
}
|
|
41
42
|
|
|
42
|
-
|
|
43
|
+
nodeManager_->cleanup();
|
|
43
44
|
}
|
|
44
45
|
|
|
45
46
|
void AudioContext::close() {
|
|
@@ -20,14 +20,14 @@ class AudioContext : public BaseAudioContext {
|
|
|
20
20
|
|
|
21
21
|
void close();
|
|
22
22
|
|
|
23
|
-
std::function<void(AudioBus *, int)> renderAudio();
|
|
24
|
-
|
|
25
23
|
private:
|
|
26
24
|
#ifdef ANDROID
|
|
27
|
-
|
|
25
|
+
std::shared_ptr<AudioPlayer> audioPlayer_;
|
|
28
26
|
#else
|
|
29
|
-
|
|
27
|
+
std::shared_ptr<IOSAudioPlayer> audioPlayer_;
|
|
30
28
|
#endif
|
|
29
|
+
|
|
30
|
+
std::function<void(AudioBus *, int)> renderAudio();
|
|
31
31
|
};
|
|
32
32
|
|
|
33
33
|
} // namespace audioapi
|
|
@@ -3,14 +3,13 @@
|
|
|
3
3
|
#include "AudioNode.h"
|
|
4
4
|
#include "AudioNodeManager.h"
|
|
5
5
|
#include "BaseAudioContext.h"
|
|
6
|
-
#include "VectorMath.h"
|
|
7
6
|
|
|
8
7
|
namespace audioapi {
|
|
9
8
|
|
|
10
9
|
AudioDestinationNode::AudioDestinationNode(BaseAudioContext *context)
|
|
11
10
|
: AudioNode(context), currentSampleFrame_(0) {
|
|
12
11
|
numberOfOutputs_ = 0;
|
|
13
|
-
numberOfInputs_ =
|
|
12
|
+
numberOfInputs_ = 1;
|
|
14
13
|
channelCountMode_ = ChannelCountMode::EXPLICIT;
|
|
15
14
|
isInitialized_ = true;
|
|
16
15
|
}
|
|
@@ -26,7 +25,7 @@ double AudioDestinationNode::getCurrentTime() const {
|
|
|
26
25
|
void AudioDestinationNode::renderAudio(
|
|
27
26
|
AudioBus *destinationBus,
|
|
28
27
|
int numFrames) {
|
|
29
|
-
if (
|
|
28
|
+
if (numFrames < 0 || !destinationBus || !isInitialized_) {
|
|
30
29
|
return;
|
|
31
30
|
}
|
|
32
31
|
|