react-native-audio-api 0.9.0-nightly-7ecb495-20251008 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/cpp/audioapi/android/core/{utils/AudioDecoder.cpp → AudioDecoder.cpp} +75 -79
- package/android/src/main/jniLibs/arm64-v8a/libavcodec.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libavformat.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libavutil.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libswresample.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/libavcodec.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/libavformat.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/libavutil.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/libswresample.so +0 -0
- package/android/src/main/jniLibs/x86/libavcodec.so +0 -0
- package/android/src/main/jniLibs/x86/libavformat.so +0 -0
- package/android/src/main/jniLibs/x86/libavutil.so +0 -0
- package/android/src/main/jniLibs/x86/libswresample.so +0 -0
- package/android/src/main/jniLibs/x86_64/libavcodec.so +0 -0
- package/android/src/main/jniLibs/x86_64/libavformat.so +0 -0
- package/android/src/main/jniLibs/x86_64/libavutil.so +0 -0
- package/android/src/main/jniLibs/x86_64/libswresample.so +0 -0
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +43 -124
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +101 -1
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h +3 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp +3 -8
- package/common/cpp/audioapi/core/AudioContext.cpp +2 -0
- package/common/cpp/audioapi/core/BaseAudioContext.cpp +35 -0
- package/common/cpp/audioapi/core/BaseAudioContext.h +12 -4
- package/common/cpp/audioapi/core/OfflineAudioContext.cpp +2 -0
- package/common/cpp/audioapi/core/effects/WorkletNode.cpp +16 -28
- package/common/cpp/audioapi/core/effects/WorkletNode.h +2 -3
- package/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp +5 -6
- package/common/cpp/audioapi/core/sources/AudioBuffer.h +1 -0
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +0 -4
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +0 -1
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +0 -2
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +0 -4
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +0 -1
- package/common/cpp/audioapi/core/sources/StreamerNode.cpp +16 -6
- package/common/cpp/audioapi/core/sources/StreamerNode.h +3 -1
- package/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp +2 -3
- package/common/cpp/audioapi/core/utils/AudioDecoder.h +91 -36
- package/common/cpp/audioapi/core/utils/Constants.h +0 -4
- package/common/cpp/audioapi/events/AudioEventHandlerRegistry.cpp +5 -1
- package/common/cpp/audioapi/external/libavcodec.xcframework/ios-arm64/libavcodec.framework/libavcodec +0 -0
- package/common/cpp/audioapi/external/libavcodec.xcframework/ios-arm64_x86_64-simulator/libavcodec.framework/libavcodec +0 -0
- package/common/cpp/audioapi/external/libavformat.xcframework/Info.plist +5 -5
- package/common/cpp/audioapi/external/libavformat.xcframework/ios-arm64/libavformat.framework/libavformat +0 -0
- package/common/cpp/audioapi/external/libavformat.xcframework/ios-arm64_x86_64-simulator/libavformat.framework/libavformat +0 -0
- package/common/cpp/audioapi/external/libavutil.xcframework/ios-arm64/libavutil.framework/libavutil +0 -0
- package/common/cpp/audioapi/external/libavutil.xcframework/ios-arm64_x86_64-simulator/libavutil.framework/libavutil +0 -0
- package/common/cpp/audioapi/external/libswresample.xcframework/Info.plist +5 -5
- package/common/cpp/audioapi/external/libswresample.xcframework/ios-arm64/libswresample.framework/libswresample +0 -0
- package/common/cpp/audioapi/external/libswresample.xcframework/ios-arm64_x86_64-simulator/libswresample.framework/libswresample +0 -0
- package/common/cpp/audioapi/jsi/AudioArrayBuffer.cpp +2 -2
- package/common/cpp/audioapi/jsi/AudioArrayBuffer.h +10 -11
- package/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp +282 -241
- package/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h +19 -57
- package/common/cpp/audioapi/libs/ffmpeg/ffmpeg_setup.sh +1 -1
- package/common/cpp/audioapi/utils/AudioBus.cpp +4 -0
- package/common/cpp/audioapi/utils/AudioBus.h +1 -0
- package/common/cpp/test/CMakeLists.txt +8 -5
- package/common/cpp/test/RunTests.sh +2 -2
- package/common/cpp/test/{AudioParamTest.cpp → src/AudioParamTest.cpp} +1 -1
- package/common/cpp/test/src/ConstantSourceTest.cpp +64 -0
- package/common/cpp/test/{GainTest.cpp → src/GainTest.cpp} +11 -10
- package/common/cpp/test/{MockAudioEventHandlerRegistry.h → src/MockAudioEventHandlerRegistry.h} +4 -2
- package/common/cpp/test/{OscillatorTest.cpp → src/OscillatorTest.cpp} +6 -4
- package/common/cpp/test/{StereoPannerTest.cpp → src/StereoPannerTest.cpp} +1 -1
- package/ios/audioapi/ios/core/AudioDecoder.mm +156 -0
- package/lib/commonjs/api.js +1 -21
- package/lib/commonjs/api.js.map +1 -1
- package/lib/commonjs/core/BaseAudioContext.js +18 -11
- package/lib/commonjs/core/BaseAudioContext.js.map +1 -1
- package/lib/module/api.js +1 -3
- package/lib/module/api.js.map +1 -1
- package/lib/module/core/BaseAudioContext.js +18 -11
- package/lib/module/core/BaseAudioContext.js.map +1 -1
- package/lib/typescript/api.d.ts +1 -5
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/core/BaseAudioContext.d.ts +6 -3
- package/lib/typescript/core/BaseAudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +3 -10
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/api.ts +0 -10
- package/src/core/BaseAudioContext.ts +29 -26
- package/src/interfaces.ts +6 -26
- package/common/cpp/audioapi/HostObjects/utils/AudioDecoderHostObject.cpp +0 -133
- package/common/cpp/audioapi/HostObjects/utils/AudioDecoderHostObject.h +0 -28
- package/common/cpp/audioapi/HostObjects/utils/AudioStretcherHostObject.cpp +0 -58
- package/common/cpp/audioapi/HostObjects/utils/AudioStretcherHostObject.h +0 -26
- package/common/cpp/audioapi/core/types/AudioFormat.h +0 -16
- package/common/cpp/audioapi/core/utils/AudioStretcher.cpp +0 -75
- package/common/cpp/audioapi/core/utils/AudioStretcher.h +0 -30
- package/ios/audioapi/ios/core/utils/AudioDecoder.mm +0 -160
- package/lib/commonjs/core/AudioDecoder.js +0 -48
- package/lib/commonjs/core/AudioDecoder.js.map +0 -1
- package/lib/commonjs/core/AudioStretcher.js +0 -31
- package/lib/commonjs/core/AudioStretcher.js.map +0 -1
- package/lib/module/core/AudioDecoder.js +0 -42
- package/lib/module/core/AudioDecoder.js.map +0 -1
- package/lib/module/core/AudioStretcher.js +0 -26
- package/lib/module/core/AudioStretcher.js.map +0 -1
- package/lib/typescript/core/AudioDecoder.d.ts +0 -4
- package/lib/typescript/core/AudioDecoder.d.ts.map +0 -1
- package/lib/typescript/core/AudioStretcher.d.ts +0 -3
- package/lib/typescript/core/AudioStretcher.d.ts.map +0 -1
- package/src/core/AudioDecoder.ts +0 -78
- package/src/core/AudioStretcher.ts +0 -43
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
#pragma once
|
|
2
|
-
|
|
3
|
-
#include <audioapi/HostObjects/sources/AudioBufferHostObject.h>
|
|
4
|
-
#include <audioapi/HostObjects/utils/AudioStretcherHostObject.h>
|
|
5
|
-
#include <audioapi/core/utils/AudioStretcher.h>
|
|
6
|
-
#include <audioapi/jsi/JsiPromise.h>
|
|
7
|
-
|
|
8
|
-
#include <jsi/jsi.h>
|
|
9
|
-
#include <memory>
|
|
10
|
-
#include <string>
|
|
11
|
-
#include <thread>
|
|
12
|
-
#include <utility>
|
|
13
|
-
|
|
14
|
-
namespace audioapi {
|
|
15
|
-
|
|
16
|
-
AudioStretcherHostObject::AudioStretcherHostObject(
|
|
17
|
-
jsi::Runtime *runtime,
|
|
18
|
-
const std::shared_ptr<react::CallInvoker> &callInvoker) {
|
|
19
|
-
promiseVendor_ = std::make_shared<PromiseVendor>(runtime, callInvoker);
|
|
20
|
-
addFunctions(
|
|
21
|
-
JSI_EXPORT_FUNCTION(AudioStretcherHostObject, changePlaybackSpeed));
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
JSI_HOST_FUNCTION_IMPL(AudioStretcherHostObject, changePlaybackSpeed) {
|
|
25
|
-
auto audioBuffer =
|
|
26
|
-
args[0].getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
|
|
27
|
-
auto playbackSpeed = static_cast<float>(args[1].asNumber());
|
|
28
|
-
|
|
29
|
-
auto promise = promiseVendor_->createPromise(
|
|
30
|
-
[audioBuffer, playbackSpeed](std::shared_ptr<Promise> promise) {
|
|
31
|
-
std::thread([audioBuffer,
|
|
32
|
-
playbackSpeed,
|
|
33
|
-
promise = std::move(promise)]() {
|
|
34
|
-
auto result = AudioStretcher::changePlaybackSpeed(
|
|
35
|
-
*audioBuffer->audioBuffer_, playbackSpeed);
|
|
36
|
-
|
|
37
|
-
if (!result) {
|
|
38
|
-
promise->reject("Failed to change audio playback speed.");
|
|
39
|
-
return;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
auto audioBufferHostObject =
|
|
43
|
-
std::make_shared<AudioBufferHostObject>(result);
|
|
44
|
-
|
|
45
|
-
promise->resolve([audioBufferHostObject = std::move(
|
|
46
|
-
audioBufferHostObject)](jsi::Runtime &runtime) {
|
|
47
|
-
auto jsiObject = jsi::Object::createFromHostObject(
|
|
48
|
-
runtime, audioBufferHostObject);
|
|
49
|
-
jsiObject.setExternalMemoryPressure(
|
|
50
|
-
runtime, audioBufferHostObject->getSizeInBytes());
|
|
51
|
-
return jsiObject;
|
|
52
|
-
});
|
|
53
|
-
}).detach();
|
|
54
|
-
});
|
|
55
|
-
return promise;
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
} // namespace audioapi
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
#pragma once
|
|
2
|
-
|
|
3
|
-
#include <audioapi/HostObjects/sources/AudioBufferHostObject.h>
|
|
4
|
-
#include <audioapi/core/utils/AudioStretcher.h>
|
|
5
|
-
#include <audioapi/jsi/JsiPromise.h>
|
|
6
|
-
|
|
7
|
-
#include <jsi/jsi.h>
|
|
8
|
-
#include <memory>
|
|
9
|
-
#include <string>
|
|
10
|
-
#include <thread>
|
|
11
|
-
#include <utility>
|
|
12
|
-
|
|
13
|
-
namespace audioapi {
|
|
14
|
-
using namespace facebook;
|
|
15
|
-
|
|
16
|
-
class AudioStretcherHostObject : public JsiHostObject {
|
|
17
|
-
public:
|
|
18
|
-
explicit AudioStretcherHostObject(
|
|
19
|
-
jsi::Runtime *runtime,
|
|
20
|
-
const std::shared_ptr<react::CallInvoker> &callInvoker);
|
|
21
|
-
JSI_HOST_FUNCTION_DECL(changePlaybackSpeed);
|
|
22
|
-
|
|
23
|
-
private:
|
|
24
|
-
std::shared_ptr<PromiseVendor> promiseVendor_;
|
|
25
|
-
};
|
|
26
|
-
} // namespace audioapi
|
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
#include <audioapi/core/sources/AudioBuffer.h>
|
|
2
|
-
#include <audioapi/core/utils/AudioStretcher.h>
|
|
3
|
-
#include <audioapi/core/utils/Constants.h>
|
|
4
|
-
#include <audioapi/libs/audio-stretch/stretch.h>
|
|
5
|
-
#include <audioapi/utils/AudioArray.h>
|
|
6
|
-
#include <audioapi/utils/AudioBus.h>
|
|
7
|
-
#include <cstdint>
|
|
8
|
-
|
|
9
|
-
namespace audioapi {
|
|
10
|
-
|
|
11
|
-
std::vector<int16_t> AudioStretcher::castToInt16Buffer(AudioBuffer &buffer) {
|
|
12
|
-
const size_t numChannels = buffer.getNumberOfChannels();
|
|
13
|
-
const size_t numFrames = buffer.getLength();
|
|
14
|
-
|
|
15
|
-
std::vector<int16_t> int16Buffer(numFrames * numChannels);
|
|
16
|
-
|
|
17
|
-
for (size_t ch = 0; ch < numChannels; ++ch) {
|
|
18
|
-
const float *channelData = buffer.getChannelData(ch);
|
|
19
|
-
for (size_t i = 0; i < numFrames; ++i) {
|
|
20
|
-
int16Buffer[i * numChannels + ch] = floatToInt16(channelData[i]);
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
return int16Buffer;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
std::shared_ptr<AudioBuffer> AudioStretcher::changePlaybackSpeed(
|
|
28
|
-
AudioBuffer buffer,
|
|
29
|
-
float playbackSpeed) {
|
|
30
|
-
const float sampleRate = buffer.getSampleRate();
|
|
31
|
-
const size_t outputChannels = buffer.getNumberOfChannels();
|
|
32
|
-
const size_t numFrames = buffer.getLength();
|
|
33
|
-
|
|
34
|
-
if (playbackSpeed == 1.0f) {
|
|
35
|
-
return std::make_shared<AudioBuffer>(buffer);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
std::vector<int16_t> int16Buffer = castToInt16Buffer(buffer);
|
|
39
|
-
|
|
40
|
-
auto stretcher = stretch_init(
|
|
41
|
-
static_cast<int>(sampleRate / UPPER_FREQUENCY_LIMIT_DETECTION),
|
|
42
|
-
static_cast<int>(sampleRate / LOWER_FREQUENCY_LIMIT_DETECTION),
|
|
43
|
-
outputChannels,
|
|
44
|
-
0x1);
|
|
45
|
-
|
|
46
|
-
int maxOutputFrames = stretch_output_capacity(
|
|
47
|
-
stretcher, static_cast<int>(numFrames), 1 / playbackSpeed);
|
|
48
|
-
std::vector<int16_t> stretchedBuffer(maxOutputFrames * outputChannels);
|
|
49
|
-
|
|
50
|
-
int outputFrames = stretch_samples(
|
|
51
|
-
stretcher,
|
|
52
|
-
int16Buffer.data(),
|
|
53
|
-
static_cast<int>(numFrames),
|
|
54
|
-
stretchedBuffer.data(),
|
|
55
|
-
1 / playbackSpeed);
|
|
56
|
-
|
|
57
|
-
outputFrames +=
|
|
58
|
-
stretch_flush(stretcher, stretchedBuffer.data() + (outputFrames));
|
|
59
|
-
stretchedBuffer.resize(outputFrames * outputChannels);
|
|
60
|
-
stretch_deinit(stretcher);
|
|
61
|
-
|
|
62
|
-
auto audioBus =
|
|
63
|
-
std::make_shared<AudioBus>(outputFrames, outputChannels, sampleRate);
|
|
64
|
-
|
|
65
|
-
for (int ch = 0; ch < outputChannels; ++ch) {
|
|
66
|
-
auto channelData = audioBus->getChannel(ch)->getData();
|
|
67
|
-
for (int i = 0; i < outputFrames; ++i) {
|
|
68
|
-
channelData[i] = int16ToFloat(stretchedBuffer[i * outputChannels + ch]);
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
return std::make_shared<AudioBuffer>(audioBus);
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
} // namespace audioapi
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
#pragma once
|
|
2
|
-
|
|
3
|
-
#include <memory>
|
|
4
|
-
#include <vector>
|
|
5
|
-
|
|
6
|
-
namespace audioapi {
|
|
7
|
-
|
|
8
|
-
class AudioBus;
|
|
9
|
-
class AudioBuffer;
|
|
10
|
-
|
|
11
|
-
class AudioStretcher {
|
|
12
|
-
public:
|
|
13
|
-
AudioStretcher() = delete;
|
|
14
|
-
|
|
15
|
-
[[nodiscard]] static std::shared_ptr<AudioBuffer> changePlaybackSpeed(
|
|
16
|
-
AudioBuffer buffer,
|
|
17
|
-
float playbackSpeed);
|
|
18
|
-
|
|
19
|
-
private:
|
|
20
|
-
static std::vector<int16_t> castToInt16Buffer(AudioBuffer &buffer);
|
|
21
|
-
|
|
22
|
-
[[nodiscard]] static inline int16_t floatToInt16(float sample) {
|
|
23
|
-
return static_cast<int16_t>(sample * INT16_MAX);
|
|
24
|
-
}
|
|
25
|
-
[[nodiscard]] static inline float int16ToFloat(int16_t sample) {
|
|
26
|
-
return static_cast<float>(sample) / INT16_MAX;
|
|
27
|
-
}
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
} // namespace audioapi
|
|
@@ -1,160 +0,0 @@
|
|
|
1
|
-
#define MINIAUDIO_IMPLEMENTATION
|
|
2
|
-
#import <audioapi/libs/miniaudio/miniaudio.h>
|
|
3
|
-
|
|
4
|
-
#include <audioapi/libs/miniaudio/decoders/libopus/miniaudio_libopus.h>
|
|
5
|
-
#include <audioapi/libs/miniaudio/decoders/libvorbis/miniaudio_libvorbis.h>
|
|
6
|
-
|
|
7
|
-
#include <audioapi/core/sources/AudioBuffer.h>
|
|
8
|
-
#include <audioapi/core/utils/AudioDecoder.h>
|
|
9
|
-
#include <audioapi/dsp/VectorMath.h>
|
|
10
|
-
#include <audioapi/libs/audio-stretch/stretch.h>
|
|
11
|
-
#include <audioapi/libs/base64/base64.h>
|
|
12
|
-
#include <audioapi/libs/ffmpeg/FFmpegDecoding.h>
|
|
13
|
-
#include <audioapi/utils/AudioArray.h>
|
|
14
|
-
#include <audioapi/utils/AudioBus.h>
|
|
15
|
-
|
|
16
|
-
namespace audioapi {
|
|
17
|
-
|
|
18
|
-
// Decoding audio in fixed-size chunks because total frame count can't be
|
|
19
|
-
// determined in advance. Note: ma_decoder_get_length_in_pcm_frames() always
|
|
20
|
-
// returns 0 for Vorbis decoders.
|
|
21
|
-
std::vector<float> AudioDecoder::readAllPcmFrames(ma_decoder &decoder, int outputChannels)
|
|
22
|
-
{
|
|
23
|
-
std::vector<float> buffer;
|
|
24
|
-
std::vector<float> temp(CHUNK_SIZE * outputChannels);
|
|
25
|
-
ma_uint64 outFramesRead = 0;
|
|
26
|
-
|
|
27
|
-
while (true) {
|
|
28
|
-
ma_uint64 tempFramesDecoded = 0;
|
|
29
|
-
ma_decoder_read_pcm_frames(&decoder, temp.data(), CHUNK_SIZE, &tempFramesDecoded);
|
|
30
|
-
if (tempFramesDecoded == 0) {
|
|
31
|
-
break;
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
buffer.insert(buffer.end(), temp.data(), temp.data() + tempFramesDecoded * outputChannels);
|
|
35
|
-
outFramesRead += tempFramesDecoded;
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
if (outFramesRead == 0) {
|
|
39
|
-
NSLog(@"Failed to decode");
|
|
40
|
-
}
|
|
41
|
-
return buffer;
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::makeAudioBufferFromFloatBuffer(
|
|
45
|
-
const std::vector<float> &buffer,
|
|
46
|
-
float outputSampleRate,
|
|
47
|
-
int outputChannels)
|
|
48
|
-
{
|
|
49
|
-
if (buffer.empty()) {
|
|
50
|
-
return nullptr;
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
auto outputFrames = buffer.size() / outputChannels;
|
|
54
|
-
auto audioBus = std::make_shared<AudioBus>(outputFrames, outputChannels, outputSampleRate);
|
|
55
|
-
|
|
56
|
-
for (int ch = 0; ch < outputChannels; ++ch) {
|
|
57
|
-
auto channelData = audioBus->getChannel(ch)->getData();
|
|
58
|
-
for (int i = 0; i < outputFrames; ++i) {
|
|
59
|
-
channelData[i] = buffer[i * outputChannels + ch];
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
return std::make_shared<AudioBuffer>(audioBus);
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(const std::string &path, float sampleRate)
|
|
66
|
-
{
|
|
67
|
-
if (AudioDecoder::pathHasExtension(path, {".mp4", ".m4a", ".aac"})) {
|
|
68
|
-
auto buffer = ffmpegdecoder::decodeWithFilePath(path, static_cast<int>(sampleRate));
|
|
69
|
-
if (buffer == nullptr) {
|
|
70
|
-
NSLog(@"Failed to decode with FFmpeg: %s", path.c_str());
|
|
71
|
-
return nullptr;
|
|
72
|
-
}
|
|
73
|
-
return buffer;
|
|
74
|
-
}
|
|
75
|
-
ma_decoder decoder;
|
|
76
|
-
ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
77
|
-
ma_decoding_backend_vtable *customBackends[] = {ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
78
|
-
|
|
79
|
-
config.ppCustomBackendVTables = customBackends;
|
|
80
|
-
config.customBackendCount = sizeof(customBackends) / sizeof(customBackends[0]);
|
|
81
|
-
|
|
82
|
-
if (ma_decoder_init_file(path.c_str(), &config, &decoder) != MA_SUCCESS) {
|
|
83
|
-
NSLog(@"Failed to initialize decoder for file: %s", path.c_str());
|
|
84
|
-
ma_decoder_uninit(&decoder);
|
|
85
|
-
return nullptr;
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
auto outputSampleRate = static_cast<float>(decoder.outputSampleRate);
|
|
89
|
-
auto outputChannels = static_cast<int>(decoder.outputChannels);
|
|
90
|
-
|
|
91
|
-
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
92
|
-
ma_decoder_uninit(&decoder);
|
|
93
|
-
return makeAudioBufferFromFloatBuffer(buffer, outputSampleRate, outputChannels);
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(const void *data, size_t size, float sampleRate)
|
|
97
|
-
{
|
|
98
|
-
const AudioFormat format = AudioDecoder::detectAudioFormat(data, size);
|
|
99
|
-
if (format == AudioFormat::MP4 || format == AudioFormat::M4A || format == AudioFormat::AAC) {
|
|
100
|
-
auto buffer = ffmpegdecoder::decodeWithMemoryBlock(data, size, static_cast<int>(sampleRate));
|
|
101
|
-
if (buffer == nullptr) {
|
|
102
|
-
NSLog(@"Failed to decode with FFmpeg");
|
|
103
|
-
return nullptr;
|
|
104
|
-
}
|
|
105
|
-
return buffer;
|
|
106
|
-
}
|
|
107
|
-
ma_decoder decoder;
|
|
108
|
-
ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
109
|
-
|
|
110
|
-
ma_decoding_backend_vtable *customBackends[] = {ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
111
|
-
|
|
112
|
-
config.ppCustomBackendVTables = customBackends;
|
|
113
|
-
config.customBackendCount = sizeof(customBackends) / sizeof(customBackends[0]);
|
|
114
|
-
|
|
115
|
-
if (ma_decoder_init_memory(data, size, &config, &decoder) != MA_SUCCESS) {
|
|
116
|
-
NSLog(@"Failed to initialize decoder for memory block");
|
|
117
|
-
ma_decoder_uninit(&decoder);
|
|
118
|
-
return nullptr;
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
auto outputSampleRate = static_cast<float>(decoder.outputSampleRate);
|
|
122
|
-
auto outputChannels = static_cast<int>(decoder.outputChannels);
|
|
123
|
-
|
|
124
|
-
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
125
|
-
ma_decoder_uninit(&decoder);
|
|
126
|
-
return makeAudioBufferFromFloatBuffer(buffer, outputSampleRate, outputChannels);
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithPCMInBase64(
|
|
130
|
-
const std::string &data,
|
|
131
|
-
float inputSampleRate,
|
|
132
|
-
int inputChannelCount,
|
|
133
|
-
bool interleaved)
|
|
134
|
-
{
|
|
135
|
-
auto decodedData = base64_decode(data, false);
|
|
136
|
-
const auto uint8Data = reinterpret_cast<uint8_t *>(decodedData.data());
|
|
137
|
-
size_t numFramesDecoded = decodedData.size() / (inputChannelCount * sizeof(int16_t));
|
|
138
|
-
|
|
139
|
-
auto audioBus = std::make_shared<AudioBus>(numFramesDecoded, inputChannelCount, inputSampleRate);
|
|
140
|
-
|
|
141
|
-
for (int ch = 0; ch < inputChannelCount; ++ch) {
|
|
142
|
-
auto channelData = audioBus->getChannel(ch)->getData();
|
|
143
|
-
|
|
144
|
-
for (size_t i = 0; i < numFramesDecoded; ++i) {
|
|
145
|
-
size_t offset;
|
|
146
|
-
if (interleaved) {
|
|
147
|
-
// Ch1, Ch2, Ch1, Ch2, ...
|
|
148
|
-
offset = (i * inputChannelCount + ch) * sizeof(int16_t);
|
|
149
|
-
} else {
|
|
150
|
-
// Ch1, Ch1, Ch1, ..., Ch2, Ch2, Ch2, ...
|
|
151
|
-
offset = (ch * numFramesDecoded + i) * sizeof(int16_t);
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
channelData[i] = uint8ToFloat(uint8Data[offset], uint8Data[offset + 1]);
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
return std::make_shared<AudioBuffer>(audioBus);
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
} // namespace audioapi
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
|
|
3
|
-
Object.defineProperty(exports, "__esModule", {
|
|
4
|
-
value: true
|
|
5
|
-
});
|
|
6
|
-
exports.decodeAudioData = decodeAudioData;
|
|
7
|
-
exports.decodePCMInBase64 = decodePCMInBase64;
|
|
8
|
-
var _AudioBuffer = _interopRequireDefault(require("./AudioBuffer"));
|
|
9
|
-
function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
|
|
10
|
-
class AudioDecoder {
|
|
11
|
-
static instance = null;
|
|
12
|
-
constructor() {
|
|
13
|
-
this.decoder = global.createAudioDecoder();
|
|
14
|
-
}
|
|
15
|
-
static getInstance() {
|
|
16
|
-
if (!AudioDecoder.instance) {
|
|
17
|
-
AudioDecoder.instance = new AudioDecoder();
|
|
18
|
-
}
|
|
19
|
-
return AudioDecoder.instance;
|
|
20
|
-
}
|
|
21
|
-
async decodeAudioDataInstance(input, sampleRate) {
|
|
22
|
-
let buffer;
|
|
23
|
-
if (typeof input === 'string') {
|
|
24
|
-
// Remove the file:// prefix if it exists
|
|
25
|
-
if (input.startsWith('file://')) {
|
|
26
|
-
input = input.replace('file://', '');
|
|
27
|
-
}
|
|
28
|
-
buffer = await this.decoder.decodeWithFilePath(input, sampleRate ?? 0);
|
|
29
|
-
} else if (input instanceof ArrayBuffer) {
|
|
30
|
-
buffer = await this.decoder.decodeWithMemoryBlock(new Uint8Array(input), sampleRate ?? 0);
|
|
31
|
-
}
|
|
32
|
-
if (!buffer) {
|
|
33
|
-
throw new Error('Unsupported input type or failed to decode audio');
|
|
34
|
-
}
|
|
35
|
-
return new _AudioBuffer.default(buffer);
|
|
36
|
-
}
|
|
37
|
-
async decodePCMInBase64Instance(base64String, inputSampleRate, inputChannelCount, interleaved) {
|
|
38
|
-
const buffer = await this.decoder.decodeWithPCMInBase64(base64String, inputSampleRate, inputChannelCount, interleaved);
|
|
39
|
-
return new _AudioBuffer.default(buffer);
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
async function decodeAudioData(input, sampleRate) {
|
|
43
|
-
return AudioDecoder.getInstance().decodeAudioDataInstance(input, sampleRate);
|
|
44
|
-
}
|
|
45
|
-
async function decodePCMInBase64(base64String, inputSampleRate, inputChannelCount, isInterleaved = true) {
|
|
46
|
-
return AudioDecoder.getInstance().decodePCMInBase64Instance(base64String, inputSampleRate, inputChannelCount, isInterleaved);
|
|
47
|
-
}
|
|
48
|
-
//# sourceMappingURL=AudioDecoder.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"names":["_AudioBuffer","_interopRequireDefault","require","e","__esModule","default","AudioDecoder","instance","constructor","decoder","global","createAudioDecoder","getInstance","decodeAudioDataInstance","input","sampleRate","buffer","startsWith","replace","decodeWithFilePath","ArrayBuffer","decodeWithMemoryBlock","Uint8Array","Error","AudioBuffer","decodePCMInBase64Instance","base64String","inputSampleRate","inputChannelCount","interleaved","decodeWithPCMInBase64","decodeAudioData","decodePCMInBase64","isInterleaved"],"sourceRoot":"../../../src","sources":["core/AudioDecoder.ts"],"mappings":";;;;;;;AACA,IAAAA,YAAA,GAAAC,sBAAA,CAAAC,OAAA;AAAwC,SAAAD,uBAAAE,CAAA,WAAAA,CAAA,IAAAA,CAAA,CAAAC,UAAA,GAAAD,CAAA,KAAAE,OAAA,EAAAF,CAAA;AAExC,MAAMG,YAAY,CAAC;EACjB,OAAeC,QAAQ,GAAwB,IAAI;EAG3CC,WAAWA,CAAA,EAAG;IACpB,IAAI,CAACC,OAAO,GAAGC,MAAM,CAACC,kBAAkB,CAAC,CAAC;EAC5C;EAEA,OAAcC,WAAWA,CAAA,EAAiB;IACxC,IAAI,CAACN,YAAY,CAACC,QAAQ,EAAE;MAC1BD,YAAY,CAACC,QAAQ,GAAG,IAAID,YAAY,CAAC,CAAC;IAC5C;IACA,OAAOA,YAAY,CAACC,QAAQ;EAC9B;EAEA,MAAaM,uBAAuBA,CAClCC,KAA2B,EAC3BC,UAAmB,EACG;IACtB,IAAIC,MAAM;IACV,IAAI,OAAOF,KAAK,KAAK,QAAQ,EAAE;MAC7B;MACA,IAAIA,KAAK,CAACG,UAAU,CAAC,SAAS,CAAC,EAAE;QAC/BH,KAAK,GAAGA,KAAK,CAACI,OAAO,CAAC,SAAS,EAAE,EAAE,CAAC;MACtC;MACAF,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACU,kBAAkB,CAACL,KAAK,EAAEC,UAAU,IAAI,CAAC,CAAC;IACxE,CAAC,MAAM,IAAID,KAAK,YAAYM,WAAW,EAAE;MACvCJ,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACY,qBAAqB,CAC/C,IAAIC,UAAU,CAACR,KAAK,CAAC,EACrBC,UAAU,IAAI,CAChB,CAAC;IACH;IAEA,IAAI,CAACC,MAAM,EAAE;MACX,MAAM,IAAIO,KAAK,CAAC,kDAAkD,CAAC;IACrE;IACA,OAAO,IAAIC,oBAAW,CAACR,MAAM,CAAC;EAChC;EAEA,MAAaS,yBAAyBA,CACpCC,YAAoB,EACpBC,eAAuB,EACvBC,iBAAyB,EACzBC,WAAoB,EACE;IACtB,MAAMb,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACqB,qBAAqB,CACrDJ,YAAY,EACZC,eAAe,EACfC,iBAAiB,EACjBC,WACF,CAAC;IACD,OAAO,IAAIL,oBAAW,CAACR,MAAM,CAAC;EAChC;AACF;AAEO,eAAee,eAAeA,CACnCjB,KAA2B,EAC3BC,UAAmB,EACG;EACtB,OAAOT,YAAY,CAACM,WAAW,CAAC,CAAC,CAACC,uBAAuB,CAACC,KAAK,EAAEC,UAAU,CAAC;AAC9E;AAEO,eAAeiB,iBAAiBA,CACrCN,YAAoB,EACpBC,eAAuB,EACvBC,iBAAyB,EACzBK,aAAsB,GAAG,IAAI,EACP;EACtB,OAAO3B,YAAY,CAACM,WAAW,CAAC,CAAC,CAACa,yBAAyB,CACzDC,YAAY,EACZC,eAAe,EACfC,iBAAiB,EACjBK,aACF,CAAC;AACH","ignoreList":[]}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
|
|
3
|
-
Object.defineProperty(exports, "__esModule", {
|
|
4
|
-
value: true
|
|
5
|
-
});
|
|
6
|
-
exports.default = changePlaybackSpeed;
|
|
7
|
-
var _AudioBuffer = _interopRequireDefault(require("./AudioBuffer"));
|
|
8
|
-
function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
|
|
9
|
-
class AudioStretcher {
|
|
10
|
-
static instance = null;
|
|
11
|
-
constructor() {
|
|
12
|
-
this.stretcher = global.createAudioStretcher();
|
|
13
|
-
}
|
|
14
|
-
static getInstance() {
|
|
15
|
-
if (!AudioStretcher.instance) {
|
|
16
|
-
AudioStretcher.instance = new AudioStretcher();
|
|
17
|
-
}
|
|
18
|
-
return AudioStretcher.instance;
|
|
19
|
-
}
|
|
20
|
-
async changePlaybackSpeedInstance(input, playbackSpeed) {
|
|
21
|
-
const buffer = await this.stretcher.changePlaybackSpeed(input.buffer, playbackSpeed);
|
|
22
|
-
if (!buffer) {
|
|
23
|
-
throw new Error('Failed to change playback speed');
|
|
24
|
-
}
|
|
25
|
-
return new _AudioBuffer.default(buffer);
|
|
26
|
-
}
|
|
27
|
-
}
|
|
28
|
-
async function changePlaybackSpeed(input, playbackSpeed) {
|
|
29
|
-
return AudioStretcher.getInstance().changePlaybackSpeedInstance(input, playbackSpeed);
|
|
30
|
-
}
|
|
31
|
-
//# sourceMappingURL=AudioStretcher.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"names":["_AudioBuffer","_interopRequireDefault","require","e","__esModule","default","AudioStretcher","instance","constructor","stretcher","global","createAudioStretcher","getInstance","changePlaybackSpeedInstance","input","playbackSpeed","buffer","changePlaybackSpeed","Error","AudioBuffer"],"sourceRoot":"../../../src","sources":["core/AudioStretcher.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,sBAAA,CAAAC,OAAA;AAAwC,SAAAD,uBAAAE,CAAA,WAAAA,CAAA,IAAAA,CAAA,CAAAC,UAAA,GAAAD,CAAA,KAAAE,OAAA,EAAAF,CAAA;AAExC,MAAMG,cAAc,CAAC;EACnB,OAAeC,QAAQ,GAA0B,IAAI;EAG7CC,WAAWA,CAAA,EAAG;IACpB,IAAI,CAACC,SAAS,GAAGC,MAAM,CAACC,oBAAoB,CAAC,CAAC;EAChD;EAEA,OAAcC,WAAWA,CAAA,EAAmB;IAC1C,IAAI,CAACN,cAAc,CAACC,QAAQ,EAAE;MAC5BD,cAAc,CAACC,QAAQ,GAAG,IAAID,cAAc,CAAC,CAAC;IAChD;IACA,OAAOA,cAAc,CAACC,QAAQ;EAChC;EAEA,MAAaM,2BAA2BA,CACtCC,KAAkB,EAClBC,aAAqB,EACC;IACtB,MAAMC,MAAM,GAAG,MAAM,IAAI,CAACP,SAAS,CAACQ,mBAAmB,CACrDH,KAAK,CAACE,MAAM,EACZD,aACF,CAAC;IAED,IAAI,CAACC,MAAM,EAAE;MACX,MAAM,IAAIE,KAAK,CAAC,iCAAiC,CAAC;IACpD;IACA,OAAO,IAAIC,oBAAW,CAACH,MAAM,CAAC;EAChC;AACF;AAEe,eAAeC,mBAAmBA,CAC/CH,KAAkB,EAClBC,aAAqB,EACC;EACtB,OAAOT,cAAc,CAACM,WAAW,CAAC,CAAC,CAACC,2BAA2B,CAC7DC,KAAK,EACLC,aACF,CAAC;AACH","ignoreList":[]}
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
|
|
3
|
-
import AudioBuffer from "./AudioBuffer.js";
|
|
4
|
-
class AudioDecoder {
|
|
5
|
-
static instance = null;
|
|
6
|
-
constructor() {
|
|
7
|
-
this.decoder = global.createAudioDecoder();
|
|
8
|
-
}
|
|
9
|
-
static getInstance() {
|
|
10
|
-
if (!AudioDecoder.instance) {
|
|
11
|
-
AudioDecoder.instance = new AudioDecoder();
|
|
12
|
-
}
|
|
13
|
-
return AudioDecoder.instance;
|
|
14
|
-
}
|
|
15
|
-
async decodeAudioDataInstance(input, sampleRate) {
|
|
16
|
-
let buffer;
|
|
17
|
-
if (typeof input === 'string') {
|
|
18
|
-
// Remove the file:// prefix if it exists
|
|
19
|
-
if (input.startsWith('file://')) {
|
|
20
|
-
input = input.replace('file://', '');
|
|
21
|
-
}
|
|
22
|
-
buffer = await this.decoder.decodeWithFilePath(input, sampleRate ?? 0);
|
|
23
|
-
} else if (input instanceof ArrayBuffer) {
|
|
24
|
-
buffer = await this.decoder.decodeWithMemoryBlock(new Uint8Array(input), sampleRate ?? 0);
|
|
25
|
-
}
|
|
26
|
-
if (!buffer) {
|
|
27
|
-
throw new Error('Unsupported input type or failed to decode audio');
|
|
28
|
-
}
|
|
29
|
-
return new AudioBuffer(buffer);
|
|
30
|
-
}
|
|
31
|
-
async decodePCMInBase64Instance(base64String, inputSampleRate, inputChannelCount, interleaved) {
|
|
32
|
-
const buffer = await this.decoder.decodeWithPCMInBase64(base64String, inputSampleRate, inputChannelCount, interleaved);
|
|
33
|
-
return new AudioBuffer(buffer);
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
export async function decodeAudioData(input, sampleRate) {
|
|
37
|
-
return AudioDecoder.getInstance().decodeAudioDataInstance(input, sampleRate);
|
|
38
|
-
}
|
|
39
|
-
export async function decodePCMInBase64(base64String, inputSampleRate, inputChannelCount, isInterleaved = true) {
|
|
40
|
-
return AudioDecoder.getInstance().decodePCMInBase64Instance(base64String, inputSampleRate, inputChannelCount, isInterleaved);
|
|
41
|
-
}
|
|
42
|
-
//# sourceMappingURL=AudioDecoder.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"names":["AudioBuffer","AudioDecoder","instance","constructor","decoder","global","createAudioDecoder","getInstance","decodeAudioDataInstance","input","sampleRate","buffer","startsWith","replace","decodeWithFilePath","ArrayBuffer","decodeWithMemoryBlock","Uint8Array","Error","decodePCMInBase64Instance","base64String","inputSampleRate","inputChannelCount","interleaved","decodeWithPCMInBase64","decodeAudioData","decodePCMInBase64","isInterleaved"],"sourceRoot":"../../../src","sources":["core/AudioDecoder.ts"],"mappings":";;AACA,OAAOA,WAAW,MAAM,kBAAe;AAEvC,MAAMC,YAAY,CAAC;EACjB,OAAeC,QAAQ,GAAwB,IAAI;EAG3CC,WAAWA,CAAA,EAAG;IACpB,IAAI,CAACC,OAAO,GAAGC,MAAM,CAACC,kBAAkB,CAAC,CAAC;EAC5C;EAEA,OAAcC,WAAWA,CAAA,EAAiB;IACxC,IAAI,CAACN,YAAY,CAACC,QAAQ,EAAE;MAC1BD,YAAY,CAACC,QAAQ,GAAG,IAAID,YAAY,CAAC,CAAC;IAC5C;IACA,OAAOA,YAAY,CAACC,QAAQ;EAC9B;EAEA,MAAaM,uBAAuBA,CAClCC,KAA2B,EAC3BC,UAAmB,EACG;IACtB,IAAIC,MAAM;IACV,IAAI,OAAOF,KAAK,KAAK,QAAQ,EAAE;MAC7B;MACA,IAAIA,KAAK,CAACG,UAAU,CAAC,SAAS,CAAC,EAAE;QAC/BH,KAAK,GAAGA,KAAK,CAACI,OAAO,CAAC,SAAS,EAAE,EAAE,CAAC;MACtC;MACAF,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACU,kBAAkB,CAACL,KAAK,EAAEC,UAAU,IAAI,CAAC,CAAC;IACxE,CAAC,MAAM,IAAID,KAAK,YAAYM,WAAW,EAAE;MACvCJ,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACY,qBAAqB,CAC/C,IAAIC,UAAU,CAACR,KAAK,CAAC,EACrBC,UAAU,IAAI,CAChB,CAAC;IACH;IAEA,IAAI,CAACC,MAAM,EAAE;MACX,MAAM,IAAIO,KAAK,CAAC,kDAAkD,CAAC;IACrE;IACA,OAAO,IAAIlB,WAAW,CAACW,MAAM,CAAC;EAChC;EAEA,MAAaQ,yBAAyBA,CACpCC,YAAoB,EACpBC,eAAuB,EACvBC,iBAAyB,EACzBC,WAAoB,EACE;IACtB,MAAMZ,MAAM,GAAG,MAAM,IAAI,CAACP,OAAO,CAACoB,qBAAqB,CACrDJ,YAAY,EACZC,eAAe,EACfC,iBAAiB,EACjBC,WACF,CAAC;IACD,OAAO,IAAIvB,WAAW,CAACW,MAAM,CAAC;EAChC;AACF;AAEA,OAAO,eAAec,eAAeA,CACnChB,KAA2B,EAC3BC,UAAmB,EACG;EACtB,OAAOT,YAAY,CAACM,WAAW,CAAC,CAAC,CAACC,uBAAuB,CAACC,KAAK,EAAEC,UAAU,CAAC;AAC9E;AAEA,OAAO,eAAegB,iBAAiBA,CACrCN,YAAoB,EACpBC,eAAuB,EACvBC,iBAAyB,EACzBK,aAAsB,GAAG,IAAI,EACP;EACtB,OAAO1B,YAAY,CAACM,WAAW,CAAC,CAAC,CAACY,yBAAyB,CACzDC,YAAY,EACZC,eAAe,EACfC,iBAAiB,EACjBK,aACF,CAAC;AACH","ignoreList":[]}
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
|
|
3
|
-
import AudioBuffer from "./AudioBuffer.js";
|
|
4
|
-
class AudioStretcher {
|
|
5
|
-
static instance = null;
|
|
6
|
-
constructor() {
|
|
7
|
-
this.stretcher = global.createAudioStretcher();
|
|
8
|
-
}
|
|
9
|
-
static getInstance() {
|
|
10
|
-
if (!AudioStretcher.instance) {
|
|
11
|
-
AudioStretcher.instance = new AudioStretcher();
|
|
12
|
-
}
|
|
13
|
-
return AudioStretcher.instance;
|
|
14
|
-
}
|
|
15
|
-
async changePlaybackSpeedInstance(input, playbackSpeed) {
|
|
16
|
-
const buffer = await this.stretcher.changePlaybackSpeed(input.buffer, playbackSpeed);
|
|
17
|
-
if (!buffer) {
|
|
18
|
-
throw new Error('Failed to change playback speed');
|
|
19
|
-
}
|
|
20
|
-
return new AudioBuffer(buffer);
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
export default async function changePlaybackSpeed(input, playbackSpeed) {
|
|
24
|
-
return AudioStretcher.getInstance().changePlaybackSpeedInstance(input, playbackSpeed);
|
|
25
|
-
}
|
|
26
|
-
//# sourceMappingURL=AudioStretcher.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"names":["AudioBuffer","AudioStretcher","instance","constructor","stretcher","global","createAudioStretcher","getInstance","changePlaybackSpeedInstance","input","playbackSpeed","buffer","changePlaybackSpeed","Error"],"sourceRoot":"../../../src","sources":["core/AudioStretcher.ts"],"mappings":";;AACA,OAAOA,WAAW,MAAM,kBAAe;AAEvC,MAAMC,cAAc,CAAC;EACnB,OAAeC,QAAQ,GAA0B,IAAI;EAG7CC,WAAWA,CAAA,EAAG;IACpB,IAAI,CAACC,SAAS,GAAGC,MAAM,CAACC,oBAAoB,CAAC,CAAC;EAChD;EAEA,OAAcC,WAAWA,CAAA,EAAmB;IAC1C,IAAI,CAACN,cAAc,CAACC,QAAQ,EAAE;MAC5BD,cAAc,CAACC,QAAQ,GAAG,IAAID,cAAc,CAAC,CAAC;IAChD;IACA,OAAOA,cAAc,CAACC,QAAQ;EAChC;EAEA,MAAaM,2BAA2BA,CACtCC,KAAkB,EAClBC,aAAqB,EACC;IACtB,MAAMC,MAAM,GAAG,MAAM,IAAI,CAACP,SAAS,CAACQ,mBAAmB,CACrDH,KAAK,CAACE,MAAM,EACZD,aACF,CAAC;IAED,IAAI,CAACC,MAAM,EAAE;MACX,MAAM,IAAIE,KAAK,CAAC,iCAAiC,CAAC;IACpD;IACA,OAAO,IAAIb,WAAW,CAACW,MAAM,CAAC;EAChC;AACF;AAEA,eAAe,eAAeC,mBAAmBA,CAC/CH,KAAkB,EAClBC,aAAqB,EACC;EACtB,OAAOT,cAAc,CAACM,WAAW,CAAC,CAAC,CAACC,2BAA2B,CAC7DC,KAAK,EACLC,aACF,CAAC;AACH","ignoreList":[]}
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
import AudioBuffer from './AudioBuffer';
|
|
2
|
-
export declare function decodeAudioData(input: string | ArrayBuffer, sampleRate?: number): Promise<AudioBuffer>;
|
|
3
|
-
export declare function decodePCMInBase64(base64String: string, inputSampleRate: number, inputChannelCount: number, isInterleaved?: boolean): Promise<AudioBuffer>;
|
|
4
|
-
//# sourceMappingURL=AudioDecoder.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"AudioDecoder.d.ts","sourceRoot":"","sources":["../../../src/core/AudioDecoder.ts"],"names":[],"mappings":"AACA,OAAO,WAAW,MAAM,eAAe,CAAC;AAyDxC,wBAAsB,eAAe,CACnC,KAAK,EAAE,MAAM,GAAG,WAAW,EAC3B,UAAU,CAAC,EAAE,MAAM,GAClB,OAAO,CAAC,WAAW,CAAC,CAEtB;AAED,wBAAsB,iBAAiB,CACrC,YAAY,EAAE,MAAM,EACpB,eAAe,EAAE,MAAM,EACvB,iBAAiB,EAAE,MAAM,EACzB,aAAa,GAAE,OAAc,GAC5B,OAAO,CAAC,WAAW,CAAC,CAOtB"}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"AudioStretcher.d.ts","sourceRoot":"","sources":["../../../src/core/AudioStretcher.ts"],"names":[],"mappings":"AACA,OAAO,WAAW,MAAM,eAAe,CAAC;AAiCxC,wBAA8B,mBAAmB,CAC/C,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,GACpB,OAAO,CAAC,WAAW,CAAC,CAKtB"}
|
package/src/core/AudioDecoder.ts
DELETED
|
@@ -1,78 +0,0 @@
|
|
|
1
|
-
import { IAudioDecoder } from '../interfaces';
|
|
2
|
-
import AudioBuffer from './AudioBuffer';
|
|
3
|
-
|
|
4
|
-
class AudioDecoder {
|
|
5
|
-
private static instance: AudioDecoder | null = null;
|
|
6
|
-
protected readonly decoder: IAudioDecoder;
|
|
7
|
-
|
|
8
|
-
private constructor() {
|
|
9
|
-
this.decoder = global.createAudioDecoder();
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
public static getInstance(): AudioDecoder {
|
|
13
|
-
if (!AudioDecoder.instance) {
|
|
14
|
-
AudioDecoder.instance = new AudioDecoder();
|
|
15
|
-
}
|
|
16
|
-
return AudioDecoder.instance;
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
public async decodeAudioDataInstance(
|
|
20
|
-
input: string | ArrayBuffer,
|
|
21
|
-
sampleRate?: number
|
|
22
|
-
): Promise<AudioBuffer> {
|
|
23
|
-
let buffer;
|
|
24
|
-
if (typeof input === 'string') {
|
|
25
|
-
// Remove the file:// prefix if it exists
|
|
26
|
-
if (input.startsWith('file://')) {
|
|
27
|
-
input = input.replace('file://', '');
|
|
28
|
-
}
|
|
29
|
-
buffer = await this.decoder.decodeWithFilePath(input, sampleRate ?? 0);
|
|
30
|
-
} else if (input instanceof ArrayBuffer) {
|
|
31
|
-
buffer = await this.decoder.decodeWithMemoryBlock(
|
|
32
|
-
new Uint8Array(input),
|
|
33
|
-
sampleRate ?? 0
|
|
34
|
-
);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
if (!buffer) {
|
|
38
|
-
throw new Error('Unsupported input type or failed to decode audio');
|
|
39
|
-
}
|
|
40
|
-
return new AudioBuffer(buffer);
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
public async decodePCMInBase64Instance(
|
|
44
|
-
base64String: string,
|
|
45
|
-
inputSampleRate: number,
|
|
46
|
-
inputChannelCount: number,
|
|
47
|
-
interleaved: boolean
|
|
48
|
-
): Promise<AudioBuffer> {
|
|
49
|
-
const buffer = await this.decoder.decodeWithPCMInBase64(
|
|
50
|
-
base64String,
|
|
51
|
-
inputSampleRate,
|
|
52
|
-
inputChannelCount,
|
|
53
|
-
interleaved
|
|
54
|
-
);
|
|
55
|
-
return new AudioBuffer(buffer);
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
export async function decodeAudioData(
|
|
60
|
-
input: string | ArrayBuffer,
|
|
61
|
-
sampleRate?: number
|
|
62
|
-
): Promise<AudioBuffer> {
|
|
63
|
-
return AudioDecoder.getInstance().decodeAudioDataInstance(input, sampleRate);
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
export async function decodePCMInBase64(
|
|
67
|
-
base64String: string,
|
|
68
|
-
inputSampleRate: number,
|
|
69
|
-
inputChannelCount: number,
|
|
70
|
-
isInterleaved: boolean = true
|
|
71
|
-
): Promise<AudioBuffer> {
|
|
72
|
-
return AudioDecoder.getInstance().decodePCMInBase64Instance(
|
|
73
|
-
base64String,
|
|
74
|
-
inputSampleRate,
|
|
75
|
-
inputChannelCount,
|
|
76
|
-
isInterleaved
|
|
77
|
-
);
|
|
78
|
-
}
|