react-native-audio-api 0.6.5 → 0.7.0-nightly-fba4835-20250717
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/android/core/AudioDecoder.cpp +44 -33
- package/android/src/main/java/com/swmansion/audioapi/system/MediaSessionCallback.kt +2 -2
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +24 -23
- package/common/cpp/audioapi/core/AudioParam.cpp +5 -4
- package/common/cpp/audioapi/core/BaseAudioContext.cpp +2 -2
- package/common/cpp/audioapi/core/BaseAudioContext.h +1 -1
- package/common/cpp/audioapi/core/utils/AudioDecoder.h +38 -1
- package/common/cpp/audioapi/libs/audio-stretch/stretch.c +610 -0
- package/common/cpp/audioapi/libs/audio-stretch/stretch.h +49 -0
- package/ios/audioapi/ios/AudioAPIModule.mm +11 -8
- package/ios/audioapi/ios/core/AudioDecoder.mm +37 -31
- package/lib/commonjs/core/BaseAudioContext.js +7 -6
- package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +7 -6
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +4 -1
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +1 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/core/BaseAudioContext.ts +12 -9
- package/src/interfaces.ts +4 -1
|
@@ -14,8 +14,8 @@ namespace audioapi {
|
|
|
14
14
|
std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
|
|
15
15
|
const std::string &path) const {
|
|
16
16
|
ma_decoder decoder;
|
|
17
|
-
ma_decoder_config config =
|
|
18
|
-
|
|
17
|
+
ma_decoder_config config = ma_decoder_config_init(
|
|
18
|
+
ma_format_s16, numChannels_, static_cast<int>(sampleRate_));
|
|
19
19
|
ma_result result = ma_decoder_init_file(path.c_str(), &config, &decoder);
|
|
20
20
|
if (result != MA_SUCCESS) {
|
|
21
21
|
// __android_log_print(
|
|
@@ -31,31 +31,31 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
|
|
|
31
31
|
ma_uint64 totalFrameCount;
|
|
32
32
|
ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
|
|
33
33
|
|
|
34
|
-
|
|
35
|
-
static_cast<int>(totalFrameCount), 2, sampleRate_);
|
|
36
|
-
auto *buffer = new float[totalFrameCount * 2];
|
|
34
|
+
std::vector<int16_t> buffer(totalFrameCount * numChannels_);
|
|
37
35
|
|
|
38
36
|
ma_uint64 framesDecoded;
|
|
39
|
-
ma_decoder_read_pcm_frames(
|
|
37
|
+
ma_decoder_read_pcm_frames(
|
|
38
|
+
&decoder, buffer.data(), totalFrameCount, &framesDecoded);
|
|
40
39
|
|
|
41
40
|
if (framesDecoded == 0) {
|
|
42
41
|
// __android_log_print(ANDROID_LOG_ERROR, "AudioDecoder", "Failed to decode");
|
|
43
42
|
|
|
44
|
-
delete[] buffer;
|
|
45
43
|
ma_decoder_uninit(&decoder);
|
|
46
|
-
|
|
47
44
|
return nullptr;
|
|
48
45
|
}
|
|
49
46
|
|
|
50
|
-
|
|
47
|
+
auto outputFrames = buffer.size() / numChannels_;
|
|
48
|
+
auto audioBus =
|
|
49
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
50
|
+
|
|
51
|
+
for (int i = 0; i < numChannels_; ++i) {
|
|
51
52
|
auto channelData = audioBus->getChannel(i)->getData();
|
|
52
53
|
|
|
53
|
-
for (ma_uint64 j = 0; j <
|
|
54
|
-
channelData[j] = buffer[j *
|
|
54
|
+
for (ma_uint64 j = 0; j < outputFrames; ++j) {
|
|
55
|
+
channelData[j] = int16ToFloat(buffer[j * numChannels_ + i]);
|
|
55
56
|
}
|
|
56
57
|
}
|
|
57
58
|
|
|
58
|
-
delete[] buffer;
|
|
59
59
|
ma_decoder_uninit(&decoder);
|
|
60
60
|
|
|
61
61
|
return audioBus;
|
|
@@ -65,8 +65,8 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithMemoryBlock(
|
|
|
65
65
|
const void *data,
|
|
66
66
|
size_t size) const {
|
|
67
67
|
ma_decoder decoder;
|
|
68
|
-
ma_decoder_config config =
|
|
69
|
-
|
|
68
|
+
ma_decoder_config config = ma_decoder_config_init(
|
|
69
|
+
ma_format_s16, numChannels_, static_cast<int>(sampleRate_));
|
|
70
70
|
ma_result result = ma_decoder_init_memory(data, size, &config, &decoder);
|
|
71
71
|
if (result != MA_SUCCESS) {
|
|
72
72
|
// __android_log_print(
|
|
@@ -81,52 +81,63 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithMemoryBlock(
|
|
|
81
81
|
ma_uint64 totalFrameCount;
|
|
82
82
|
ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
|
|
83
83
|
|
|
84
|
-
|
|
85
|
-
static_cast<int>(totalFrameCount), 2, sampleRate_);
|
|
86
|
-
auto *buffer = new float[totalFrameCount * 2];
|
|
84
|
+
std::vector<int16_t> buffer(totalFrameCount * numChannels_);
|
|
87
85
|
|
|
88
86
|
ma_uint64 framesDecoded;
|
|
89
|
-
ma_decoder_read_pcm_frames(
|
|
87
|
+
ma_decoder_read_pcm_frames(
|
|
88
|
+
&decoder, buffer.data(), totalFrameCount, &framesDecoded);
|
|
89
|
+
|
|
90
90
|
if (framesDecoded == 0) {
|
|
91
91
|
// __android_log_print(ANDROID_LOG_ERROR, "AudioDecoder", "Failed to decode");
|
|
92
92
|
|
|
93
|
-
delete[] buffer;
|
|
94
93
|
ma_decoder_uninit(&decoder);
|
|
95
|
-
|
|
96
94
|
return nullptr;
|
|
97
95
|
}
|
|
98
96
|
|
|
99
|
-
|
|
97
|
+
auto outputFrames = buffer.size() / numChannels_;
|
|
98
|
+
auto audioBus =
|
|
99
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
100
|
+
|
|
101
|
+
for (int i = 0; i < numChannels_; ++i) {
|
|
100
102
|
auto channelData = audioBus->getChannel(i)->getData();
|
|
101
103
|
|
|
102
|
-
for (ma_uint64 j = 0; j <
|
|
103
|
-
channelData[j] = buffer[j *
|
|
104
|
+
for (ma_uint64 j = 0; j < outputFrames; ++j) {
|
|
105
|
+
channelData[j] = int16ToFloat(buffer[j * numChannels_ + i]);
|
|
104
106
|
}
|
|
105
107
|
}
|
|
106
108
|
|
|
107
|
-
delete[] buffer;
|
|
108
109
|
ma_decoder_uninit(&decoder);
|
|
109
110
|
|
|
110
111
|
return audioBus;
|
|
111
112
|
}
|
|
112
113
|
|
|
113
114
|
std::shared_ptr<AudioBus> AudioDecoder::decodeWithPCMInBase64(
|
|
114
|
-
const std::string &data
|
|
115
|
+
const std::string &data,
|
|
116
|
+
float playbackSpeed) const {
|
|
115
117
|
auto decodedData = base64_decode(data, false);
|
|
116
118
|
|
|
117
119
|
const auto uint8Data = reinterpret_cast<uint8_t *>(decodedData.data());
|
|
118
|
-
size_t
|
|
120
|
+
size_t framesDecoded = decodedData.size() / 2;
|
|
119
121
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
for (size_t i = 0; i < frameCount; ++i) {
|
|
124
|
-
auto sample =
|
|
122
|
+
std::vector<int16_t> buffer(framesDecoded);
|
|
123
|
+
for (size_t i = 0; i < framesDecoded; ++i) {
|
|
124
|
+
buffer[i] =
|
|
125
125
|
static_cast<int16_t>((uint8Data[i * 2 + 1] << 8) | uint8Data[i * 2]);
|
|
126
|
-
channelData[i] = static_cast<float>(sample);
|
|
127
126
|
}
|
|
128
127
|
|
|
129
|
-
|
|
128
|
+
changePlaybackSpeedIfNeeded(buffer, framesDecoded, 1, playbackSpeed);
|
|
129
|
+
auto outputFrames = buffer.size();
|
|
130
|
+
|
|
131
|
+
auto audioBus =
|
|
132
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
133
|
+
auto leftChannelData = audioBus->getChannel(0)->getData();
|
|
134
|
+
auto rightChannelData = audioBus->getChannel(1)->getData();
|
|
135
|
+
|
|
136
|
+
for (size_t i = 0; i < outputFrames; ++i) {
|
|
137
|
+
auto sample = int16ToFloat(buffer[i]);
|
|
138
|
+
leftChannelData[i] = sample;
|
|
139
|
+
rightChannelData[i] = sample;
|
|
140
|
+
}
|
|
130
141
|
|
|
131
142
|
return audioBus;
|
|
132
143
|
}
|
|
@@ -42,12 +42,12 @@ class MediaSessionCallback(
|
|
|
42
42
|
}
|
|
43
43
|
|
|
44
44
|
override fun onFastForward() {
|
|
45
|
-
val body = HashMap<String, Any>().apply { put("value",
|
|
45
|
+
val body = HashMap<String, Any>().apply { put("value", 15) }
|
|
46
46
|
audioAPIModule.get()?.invokeHandlerWithEventNameAndEventBody("remoteSkipForward", body)
|
|
47
47
|
}
|
|
48
48
|
|
|
49
49
|
override fun onRewind() {
|
|
50
|
-
val body = HashMap<String, Any>().apply { put("value",
|
|
50
|
+
val body = HashMap<String, Any>().apply { put("value", 15) }
|
|
51
51
|
audioAPIModule.get()?.invokeHandlerWithEventNameAndEventBody("remoteSkipBackward", body)
|
|
52
52
|
}
|
|
53
53
|
|
|
@@ -219,30 +219,31 @@ JSI_HOST_FUNCTION(createBufferQueueSource) {
|
|
|
219
219
|
return promise;
|
|
220
220
|
}
|
|
221
221
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
222
|
+
JSI_HOST_FUNCTION(decodePCMAudioDataInBase64) {
|
|
223
|
+
auto b64 = args[0].getString(runtime).utf8(runtime);
|
|
224
|
+
auto playbackSpeed = static_cast<float>(args[1].getNumber());
|
|
225
|
+
|
|
226
|
+
auto promise = promiseVendor_->createPromise([this, b64, playbackSpeed](std::shared_ptr<Promise> promise) {
|
|
227
|
+
std::thread([this, b64, playbackSpeed, promise = std::move(promise)]() {
|
|
228
|
+
auto results = context_->decodeWithPCMInBase64(b64, playbackSpeed);
|
|
229
|
+
|
|
230
|
+
if (!results) {
|
|
231
|
+
promise->reject("Failed to decode audio data source.");
|
|
232
|
+
return;
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
auto audioBufferHostObject = std::make_shared<AudioBufferHostObject>(results);
|
|
236
|
+
|
|
237
|
+
promise->resolve([audioBufferHostObject = std::move(audioBufferHostObject)](jsi::Runtime &runtime) {
|
|
238
|
+
auto jsiObject = jsi::Object::createFromHostObject(runtime, audioBufferHostObject);
|
|
239
|
+
jsiObject.setExternalMemoryPressure(runtime, audioBufferHostObject->getSizeInBytes());
|
|
240
|
+
return jsiObject;
|
|
241
|
+
});
|
|
242
|
+
}).detach();
|
|
243
|
+
});
|
|
243
244
|
|
|
244
|
-
|
|
245
|
-
|
|
245
|
+
return promise;
|
|
246
|
+
}
|
|
246
247
|
|
|
247
248
|
protected:
|
|
248
249
|
std::shared_ptr<BaseAudioContext> context_;
|
|
@@ -18,10 +18,11 @@ AudioParam::AudioParam(
|
|
|
18
18
|
minValue_(minValue),
|
|
19
19
|
maxValue_(maxValue),
|
|
20
20
|
context_(context),
|
|
21
|
-
audioBus_(
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
21
|
+
audioBus_(
|
|
22
|
+
std::make_shared<AudioBus>(
|
|
23
|
+
RENDER_QUANTUM_SIZE,
|
|
24
|
+
1,
|
|
25
|
+
context->getSampleRate())) {
|
|
25
26
|
startTime_ = 0;
|
|
26
27
|
endTime_ = 0;
|
|
27
28
|
startValue_ = value_;
|
|
@@ -141,8 +141,8 @@ std::shared_ptr<AudioBuffer> BaseAudioContext::decodeAudioData(
|
|
|
141
141
|
}
|
|
142
142
|
|
|
143
143
|
std::shared_ptr<AudioBuffer> BaseAudioContext::decodeWithPCMInBase64(
|
|
144
|
-
const std::string &data) {
|
|
145
|
-
auto audioBus = audioDecoder_->decodeWithPCMInBase64(data);
|
|
144
|
+
const std::string &data, float playbackSpeed) {
|
|
145
|
+
auto audioBus = audioDecoder_->decodeWithPCMInBase64(data, playbackSpeed);
|
|
146
146
|
|
|
147
147
|
if (!audioBus) {
|
|
148
148
|
return nullptr;
|
|
@@ -59,7 +59,7 @@ class BaseAudioContext {
|
|
|
59
59
|
|
|
60
60
|
std::shared_ptr<AudioBuffer> decodeAudioDataSource(const std::string &path);
|
|
61
61
|
std::shared_ptr<AudioBuffer> decodeAudioData(const void *data, size_t size);
|
|
62
|
-
std::shared_ptr<AudioBuffer> decodeWithPCMInBase64(const std::string &data);
|
|
62
|
+
std::shared_ptr<AudioBuffer> decodeWithPCMInBase64(const std::string &data, float playbackSpeed);
|
|
63
63
|
|
|
64
64
|
std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
|
|
65
65
|
[[nodiscard]] float getNyquistFrequency() const;
|
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
+
#include <audioapi/libs/audio-stretch/stretch.h>
|
|
3
4
|
#include <memory>
|
|
4
5
|
#include <string>
|
|
6
|
+
#include <vector>
|
|
5
7
|
|
|
6
8
|
namespace audioapi {
|
|
7
9
|
|
|
@@ -13,10 +15,45 @@ class AudioDecoder {
|
|
|
13
15
|
|
|
14
16
|
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithFilePath(const std::string &path) const;
|
|
15
17
|
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithMemoryBlock(const void *data, size_t size) const;
|
|
16
|
-
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithPCMInBase64(const std::string &data) const;
|
|
18
|
+
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithPCMInBase64(const std::string &data, float playbackSpeed) const;
|
|
17
19
|
|
|
18
20
|
private:
|
|
19
21
|
float sampleRate_;
|
|
22
|
+
int numChannels_ = 2;
|
|
23
|
+
|
|
24
|
+
void changePlaybackSpeedIfNeeded(std::vector<int16_t> &buffer, size_t framesDecoded, int numChannels, float playbackSpeed) const {
|
|
25
|
+
if (playbackSpeed == 1.0f) {
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
auto stretcher = stretch_init(static_cast<int>(sampleRate_ / 333.0f), static_cast<int>(sampleRate_ / 55.0f), numChannels, 0x1);
|
|
30
|
+
|
|
31
|
+
int maxOutputFrames = stretch_output_capacity(stretcher, static_cast<int>(framesDecoded), 1 / playbackSpeed);
|
|
32
|
+
std::vector<int16_t> stretchedBuffer(maxOutputFrames);
|
|
33
|
+
|
|
34
|
+
int outputFrames = stretch_samples(
|
|
35
|
+
stretcher,
|
|
36
|
+
buffer.data(),
|
|
37
|
+
static_cast<int>(framesDecoded),
|
|
38
|
+
stretchedBuffer.data(),
|
|
39
|
+
1 / playbackSpeed
|
|
40
|
+
);
|
|
41
|
+
|
|
42
|
+
outputFrames += stretch_flush(stretcher, stretchedBuffer.data() + (outputFrames));
|
|
43
|
+
stretchedBuffer.resize(outputFrames);
|
|
44
|
+
|
|
45
|
+
buffer = stretchedBuffer;
|
|
46
|
+
|
|
47
|
+
stretch_deinit(stretcher);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
[[nodiscard]] static inline int16_t floatToInt16(float sample) {
|
|
51
|
+
return static_cast<int16_t>(sample * 32768.0f);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
[[nodiscard]] static inline float int16ToFloat(int16_t sample) {
|
|
55
|
+
return static_cast<float>(sample) / 32768.0f;
|
|
56
|
+
}
|
|
20
57
|
};
|
|
21
58
|
|
|
22
59
|
} // namespace audioapi
|