react-native-audio-api 0.6.4-nightly-a960beb-20250703 → 0.6.4-nightly-00c1dfe-20250704
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/android/src/main/cpp/audioapi/android/core/AudioDecoder.cpp +41 -33
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +4 -3
- package/common/cpp/audioapi/core/AudioParam.cpp +5 -4
- package/common/cpp/audioapi/core/BaseAudioContext.cpp +3 -2
- package/common/cpp/audioapi/core/BaseAudioContext.h +1 -1
- package/common/cpp/audioapi/core/utils/AudioDecoder.h +41 -1
- package/common/cpp/audioapi/libs/audio-stretch/stretch.c +610 -0
- package/common/cpp/audioapi/libs/audio-stretch/stretch.h +49 -0
- package/ios/audioapi/ios/AudioAPIModule.mm +11 -8
- package/ios/audioapi/ios/core/AudioDecoder.mm +33 -30
- package/lib/commonjs/core/BaseAudioContext.js +7 -6
- package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +7 -6
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +4 -1
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +1 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/core/BaseAudioContext.ts +12 -9
- package/src/interfaces.ts +4 -1
package/README.md
CHANGED
|
@@ -4,7 +4,10 @@
|
|
|
4
4
|
|
|
5
5
|
[](https://www.npmjs.com/package/react-native-audio-api)
|
|
6
6
|
[](https://www.npmjs.com/package/react-native-audio-api?activeTab=versions)
|
|
7
|
-
[](https://www.npmjs.com/package/react-native-audio-api?activeTab=versions)
|
|
8
|
+
[](https://github.com/software-mansion/react-native-audio-api/actions/workflows/ci.yml)
|
|
9
|
+
[](https://github.com/software-mansion/react-native-audio-api/actions/workflows/npm-publish-nightly.yml)
|
|
10
|
+
[](https://github.com/software-mansion/react-native-audio-api/actions/workflows/tests.yml)
|
|
8
11
|
|
|
9
12
|
`react-native-audio-api` provides system for controlling audio in React Native environment compatible with Web Audio API specification,
|
|
10
13
|
allowing developers to generate and modify audio in exact same way it is possible in browsers.
|
|
@@ -14,8 +14,8 @@ namespace audioapi {
|
|
|
14
14
|
std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
|
|
15
15
|
const std::string &path) const {
|
|
16
16
|
ma_decoder decoder;
|
|
17
|
-
ma_decoder_config config =
|
|
18
|
-
|
|
17
|
+
ma_decoder_config config = ma_decoder_config_init(
|
|
18
|
+
ma_format_s16, numChannels_, static_cast<int>(sampleRate_));
|
|
19
19
|
ma_result result = ma_decoder_init_file(path.c_str(), &config, &decoder);
|
|
20
20
|
if (result != MA_SUCCESS) {
|
|
21
21
|
__android_log_print(
|
|
@@ -32,31 +32,31 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithFilePath(
|
|
|
32
32
|
ma_uint64 totalFrameCount;
|
|
33
33
|
ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
|
|
34
34
|
|
|
35
|
-
|
|
36
|
-
static_cast<int>(totalFrameCount), 2, sampleRate_);
|
|
37
|
-
auto *buffer = new float[totalFrameCount * 2];
|
|
35
|
+
std::vector<int16_t> buffer(totalFrameCount * numChannels_);
|
|
38
36
|
|
|
39
37
|
ma_uint64 framesDecoded;
|
|
40
|
-
ma_decoder_read_pcm_frames(
|
|
38
|
+
ma_decoder_read_pcm_frames(
|
|
39
|
+
&decoder, buffer.data(), totalFrameCount, &framesDecoded);
|
|
41
40
|
|
|
42
41
|
if (framesDecoded == 0) {
|
|
43
42
|
__android_log_print(ANDROID_LOG_ERROR, "AudioDecoder", "Failed to decode");
|
|
44
43
|
|
|
45
|
-
delete[] buffer;
|
|
46
44
|
ma_decoder_uninit(&decoder);
|
|
47
|
-
|
|
48
45
|
return nullptr;
|
|
49
46
|
}
|
|
50
47
|
|
|
51
|
-
|
|
48
|
+
auto outputFrames = buffer.size() / numChannels_;
|
|
49
|
+
auto audioBus =
|
|
50
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
51
|
+
|
|
52
|
+
for (int i = 0; i < numChannels_; ++i) {
|
|
52
53
|
auto channelData = audioBus->getChannel(i)->getData();
|
|
53
54
|
|
|
54
|
-
for (ma_uint64 j = 0; j <
|
|
55
|
-
channelData[j] = buffer[j *
|
|
55
|
+
for (ma_uint64 j = 0; j < outputFrames; ++j) {
|
|
56
|
+
channelData[j] = int16ToFloat(buffer[j * numChannels_ + i]);
|
|
56
57
|
}
|
|
57
58
|
}
|
|
58
59
|
|
|
59
|
-
delete[] buffer;
|
|
60
60
|
ma_decoder_uninit(&decoder);
|
|
61
61
|
|
|
62
62
|
return audioBus;
|
|
@@ -66,8 +66,8 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithMemoryBlock(
|
|
|
66
66
|
const void *data,
|
|
67
67
|
size_t size) const {
|
|
68
68
|
ma_decoder decoder;
|
|
69
|
-
ma_decoder_config config =
|
|
70
|
-
|
|
69
|
+
ma_decoder_config config = ma_decoder_config_init(
|
|
70
|
+
ma_format_s16, numChannels_, static_cast<int>(sampleRate_));
|
|
71
71
|
ma_result result = ma_decoder_init_memory(data, size, &config, &decoder);
|
|
72
72
|
if (result != MA_SUCCESS) {
|
|
73
73
|
__android_log_print(
|
|
@@ -83,52 +83,60 @@ std::shared_ptr<AudioBus> AudioDecoder::decodeWithMemoryBlock(
|
|
|
83
83
|
ma_uint64 totalFrameCount;
|
|
84
84
|
ma_decoder_get_length_in_pcm_frames(&decoder, &totalFrameCount);
|
|
85
85
|
|
|
86
|
-
|
|
87
|
-
static_cast<int>(totalFrameCount), 2, sampleRate_);
|
|
88
|
-
auto *buffer = new float[totalFrameCount * 2];
|
|
86
|
+
std::vector<int16_t> buffer(totalFrameCount * numChannels_);
|
|
89
87
|
|
|
90
88
|
ma_uint64 framesDecoded;
|
|
91
|
-
ma_decoder_read_pcm_frames(
|
|
89
|
+
ma_decoder_read_pcm_frames(
|
|
90
|
+
&decoder, buffer.data(), totalFrameCount, &framesDecoded);
|
|
91
|
+
|
|
92
92
|
if (framesDecoded == 0) {
|
|
93
93
|
__android_log_print(ANDROID_LOG_ERROR, "AudioDecoder", "Failed to decode");
|
|
94
94
|
|
|
95
|
-
delete[] buffer;
|
|
96
95
|
ma_decoder_uninit(&decoder);
|
|
97
|
-
|
|
98
96
|
return nullptr;
|
|
99
97
|
}
|
|
100
98
|
|
|
101
|
-
|
|
99
|
+
auto outputFrames = buffer.size() / numChannels_;
|
|
100
|
+
auto audioBus =
|
|
101
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
102
|
+
|
|
103
|
+
for (int i = 0; i < numChannels_; ++i) {
|
|
102
104
|
auto channelData = audioBus->getChannel(i)->getData();
|
|
103
105
|
|
|
104
|
-
for (ma_uint64 j = 0; j <
|
|
105
|
-
channelData[j] = buffer[j *
|
|
106
|
+
for (ma_uint64 j = 0; j < outputFrames; ++j) {
|
|
107
|
+
channelData[j] = int16ToFloat(buffer[j * numChannels_ + i]);
|
|
106
108
|
}
|
|
107
109
|
}
|
|
108
110
|
|
|
109
|
-
delete[] buffer;
|
|
110
111
|
ma_decoder_uninit(&decoder);
|
|
111
112
|
|
|
112
113
|
return audioBus;
|
|
113
114
|
}
|
|
114
115
|
|
|
115
116
|
std::shared_ptr<AudioBus> AudioDecoder::decodeWithPCMInBase64(
|
|
116
|
-
const std::string &data
|
|
117
|
+
const std::string &data,
|
|
118
|
+
const float playbackSpeed) const {
|
|
117
119
|
auto decodedData = base64_decode(data, false);
|
|
118
120
|
|
|
119
121
|
const auto uint8Data = reinterpret_cast<uint8_t *>(decodedData.data());
|
|
120
|
-
size_t
|
|
122
|
+
size_t framesDecoded = decodedData.size() / 2;
|
|
121
123
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
for (size_t i = 0; i < frameCount; ++i) {
|
|
126
|
-
auto sample =
|
|
124
|
+
std::vector<int16_t> buffer(framesDecoded);
|
|
125
|
+
for (size_t i = 0; i < framesDecoded; ++i) {
|
|
126
|
+
buffer[i] =
|
|
127
127
|
static_cast<int16_t>((uint8Data[i * 2 + 1] << 8) | uint8Data[i * 2]);
|
|
128
|
-
channelData[i] = static_cast<float>(sample);
|
|
129
128
|
}
|
|
130
129
|
|
|
131
|
-
|
|
130
|
+
changePlaybackSpeedIfNeeded(buffer, framesDecoded, playbackSpeed);
|
|
131
|
+
auto outputFrames = buffer.size();
|
|
132
|
+
|
|
133
|
+
auto audioBus =
|
|
134
|
+
std::make_shared<AudioBus>(outputFrames, numChannels_, sampleRate_);
|
|
135
|
+
auto channelData = audioBus->getChannel(0)->getData();
|
|
136
|
+
|
|
137
|
+
for (size_t i = 0; i < outputFrames; ++i) {
|
|
138
|
+
channelData[i] = int16ToFloat(buffer[i]);
|
|
139
|
+
}
|
|
132
140
|
|
|
133
141
|
return audioBus;
|
|
134
142
|
}
|
|
@@ -221,10 +221,11 @@ JSI_HOST_FUNCTION(createBufferQueueSource) {
|
|
|
221
221
|
|
|
222
222
|
JSI_HOST_FUNCTION(decodePCMAudioDataInBase64) {
|
|
223
223
|
auto b64 = args[0].getString(runtime).utf8(runtime);
|
|
224
|
+
auto playbackSpeed = static_cast<float>(args[1].getNumber());
|
|
224
225
|
|
|
225
|
-
auto promise = promiseVendor_->createPromise([this, b64](std::shared_ptr<Promise> promise) {
|
|
226
|
-
std::thread([this, b64, promise = std::move(promise)]() {
|
|
227
|
-
auto results = context_->decodeWithPCMInBase64(b64);
|
|
226
|
+
auto promise = promiseVendor_->createPromise([this, b64, playbackSpeed](std::shared_ptr<Promise> promise) {
|
|
227
|
+
std::thread([this, b64, playbackSpeed, promise = std::move(promise)]() {
|
|
228
|
+
auto results = context_->decodeWithPCMInBase64(b64, playbackSpeed);
|
|
228
229
|
|
|
229
230
|
if (!results) {
|
|
230
231
|
promise->reject("Failed to decode audio data source.");
|
|
@@ -18,10 +18,11 @@ AudioParam::AudioParam(
|
|
|
18
18
|
minValue_(minValue),
|
|
19
19
|
maxValue_(maxValue),
|
|
20
20
|
context_(context),
|
|
21
|
-
audioBus_(
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
21
|
+
audioBus_(
|
|
22
|
+
std::make_shared<AudioBus>(
|
|
23
|
+
RENDER_QUANTUM_SIZE,
|
|
24
|
+
1,
|
|
25
|
+
context->getSampleRate())) {
|
|
25
26
|
startTime_ = 0;
|
|
26
27
|
endTime_ = 0;
|
|
27
28
|
startValue_ = value_;
|
|
@@ -145,9 +145,10 @@ std::shared_ptr<AudioBuffer> BaseAudioContext::decodeAudioData(
|
|
|
145
145
|
}
|
|
146
146
|
|
|
147
147
|
std::shared_ptr<AudioBuffer> BaseAudioContext::decodeWithPCMInBase64(
|
|
148
|
-
const std::string &data
|
|
148
|
+
const std::string &data,
|
|
149
|
+
float playbackSpeed) {
|
|
149
150
|
#ifndef AUDIO_API_TEST_SUITE
|
|
150
|
-
auto audioBus = audioDecoder_->decodeWithPCMInBase64(data);
|
|
151
|
+
auto audioBus = audioDecoder_->decodeWithPCMInBase64(data, playbackSpeed);
|
|
151
152
|
|
|
152
153
|
if (!audioBus) {
|
|
153
154
|
return nullptr;
|
|
@@ -60,7 +60,7 @@ class BaseAudioContext {
|
|
|
60
60
|
#ifndef TESTING
|
|
61
61
|
std::shared_ptr<AudioBuffer> decodeAudioDataSource(const std::string &path);
|
|
62
62
|
std::shared_ptr<AudioBuffer> decodeAudioData(const void *data, size_t size);
|
|
63
|
-
std::shared_ptr<AudioBuffer> decodeWithPCMInBase64(const std::string &data);
|
|
63
|
+
std::shared_ptr<AudioBuffer> decodeWithPCMInBase64(const std::string &data, float playbackSpeed);
|
|
64
64
|
#endif //TESTING
|
|
65
65
|
|
|
66
66
|
std::shared_ptr<PeriodicWave> getBasicWaveForm(OscillatorType type);
|
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
|
+
#include <audioapi/libs/audio-stretch/stretch.h>
|
|
3
4
|
#include <memory>
|
|
4
5
|
#include <string>
|
|
6
|
+
#include <vector>
|
|
5
7
|
|
|
6
8
|
namespace audioapi {
|
|
7
9
|
|
|
@@ -10,13 +12,51 @@ class AudioBus;
|
|
|
10
12
|
class AudioDecoder {
|
|
11
13
|
public:
|
|
12
14
|
explicit AudioDecoder(float sampleRate): sampleRate_(sampleRate) {}
|
|
15
|
+
~AudioDecoder() {
|
|
16
|
+
stretch_deinit(stretcher_);
|
|
17
|
+
}
|
|
13
18
|
|
|
14
19
|
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithFilePath(const std::string &path) const;
|
|
15
20
|
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithMemoryBlock(const void *data, size_t size) const;
|
|
16
|
-
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithPCMInBase64(const std::string &data) const;
|
|
21
|
+
[[nodiscard]] std::shared_ptr<AudioBus> decodeWithPCMInBase64(const std::string &data, float playbackSpeed) const;
|
|
17
22
|
|
|
18
23
|
private:
|
|
19
24
|
float sampleRate_;
|
|
25
|
+
int numChannels_ = 2;
|
|
26
|
+
StretchHandle stretcher_ =
|
|
27
|
+
stretch_init(static_cast<int>(sampleRate_ / 333.0f), static_cast<int>(sampleRate_ / 55.0f), 1, 0x1);
|
|
28
|
+
|
|
29
|
+
void changePlaybackSpeedIfNeeded(std::vector<int16_t> &buffer, size_t framesDecoded, float playbackSpeed) const {
|
|
30
|
+
if (playbackSpeed == 1.0f) {
|
|
31
|
+
return;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
int maxOutputFrames = stretch_output_capacity(stretcher_, static_cast<int>(framesDecoded), 1 / playbackSpeed);
|
|
35
|
+
std::vector<int16_t> stretchedBuffer(maxOutputFrames);
|
|
36
|
+
|
|
37
|
+
int outputFrames = stretch_samples(
|
|
38
|
+
stretcher_,
|
|
39
|
+
buffer.data(),
|
|
40
|
+
static_cast<int>(framesDecoded),
|
|
41
|
+
stretchedBuffer.data(),
|
|
42
|
+
1 / playbackSpeed
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
outputFrames += stretch_flush(stretcher_, stretchedBuffer.data() + (outputFrames));
|
|
46
|
+
stretchedBuffer.resize(outputFrames);
|
|
47
|
+
|
|
48
|
+
buffer = stretchedBuffer;
|
|
49
|
+
|
|
50
|
+
stretch_reset(stretcher_);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
[[nodiscard]] static inline int16_t floatToInt16(float sample) {
|
|
54
|
+
return static_cast<int16_t>(sample * 32768.0f);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
[[nodiscard]] static inline float int16ToFloat(int16_t sample) {
|
|
58
|
+
return static_cast<float>(sample) / 32768.0f;
|
|
59
|
+
}
|
|
20
60
|
};
|
|
21
61
|
|
|
22
62
|
} // namespace audioapi
|