react-native-audio-api 0.11.0-nightly-010ea11-20251110 → 0.11.0-nightly-c8d92af-20251111
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +1 -3
- package/common/cpp/audioapi/HostObjects/AudioContextHostObject.cpp +6 -2
- package/common/cpp/audioapi/HostObjects/OfflineAudioContextHostObject.cpp +3 -1
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.cpp +19 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.h +3 -0
- package/common/cpp/audioapi/core/AudioContext.cpp +19 -24
- package/common/cpp/audioapi/core/AudioContext.h +2 -2
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +16 -0
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +23 -3
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +16 -3
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +5 -5
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +10 -0
- package/common/cpp/audioapi/dsp/VectorMath.cpp +15 -15
- package/common/cpp/test/src/biquad/BiquadFilterTest.cpp +5 -5
- package/ios/audioapi/ios/AudioAPIModule.h +6 -4
- package/ios/audioapi/ios/AudioAPIModule.mm +62 -41
- package/ios/audioapi/ios/core/IOSAudioPlayer.h +1 -1
- package/ios/audioapi/ios/core/IOSAudioPlayer.mm +33 -24
- package/ios/audioapi/ios/core/IOSAudioRecorder.h +3 -2
- package/ios/audioapi/ios/core/IOSAudioRecorder.mm +6 -4
- package/ios/audioapi/ios/core/NativeAudioPlayer.m +18 -9
- package/ios/audioapi/ios/core/NativeAudioRecorder.h +2 -1
- package/ios/audioapi/ios/core/NativeAudioRecorder.m +45 -27
- package/ios/audioapi/ios/core/utils/AudioDecoder.mm +44 -19
- package/ios/audioapi/ios/system/AudioEngine.h +4 -2
- package/ios/audioapi/ios/system/AudioEngine.mm +22 -8
- package/ios/audioapi/ios/system/AudioSessionManager.h +9 -5
- package/ios/audioapi/ios/system/AudioSessionManager.mm +51 -21
- package/ios/audioapi/ios/system/LockScreenManager.mm +137 -88
- package/ios/audioapi/ios/system/NotificationManager.mm +79 -48
- package/lib/commonjs/api.js.map +1 -1
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/commonjs/core/AudioContext.js +1 -1
- package/lib/commonjs/core/AudioContext.js.map +1 -1
- package/lib/commonjs/web-core/AudioContext.js +1 -1
- package/lib/commonjs/web-core/AudioContext.js.map +1 -1
- package/lib/module/api.js.map +1 -1
- package/lib/module/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/module/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/module/core/AudioContext.js +1 -1
- package/lib/module/core/AudioContext.js.map +1 -1
- package/lib/module/web-core/AudioContext.js +1 -1
- package/lib/module/web-core/AudioContext.js.map +1 -1
- package/lib/typescript/api.d.ts +1 -1
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts +1 -0
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +2 -0
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +0 -1
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/api.ts +0 -1
- package/src/core/AudioBufferBaseSourceNode.ts +8 -0
- package/src/core/AudioContext.ts +0 -1
- package/src/interfaces.ts +3 -0
- package/src/types.ts +0 -1
- package/src/web-core/AudioContext.tsx +1 -1
|
@@ -13,36 +13,45 @@ IOSAudioPlayer::IOSAudioPlayer(
|
|
|
13
13
|
const std::function<void(std::shared_ptr<AudioBus>, int)> &renderAudio,
|
|
14
14
|
float sampleRate,
|
|
15
15
|
int channelCount)
|
|
16
|
-
: renderAudio_(renderAudio),
|
|
16
|
+
: renderAudio_(renderAudio),
|
|
17
|
+
channelCount_(channelCount),
|
|
18
|
+
audioBus_(0),
|
|
19
|
+
isRunning_(false)
|
|
17
20
|
{
|
|
18
|
-
RenderAudioBlock renderAudioBlock =
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
21
|
+
RenderAudioBlock renderAudioBlock =
|
|
22
|
+
^(AudioBufferList *outputData, int numFrames) {
|
|
23
|
+
int processedFrames = 0;
|
|
24
|
+
|
|
25
|
+
while (processedFrames < numFrames) {
|
|
26
|
+
int framesToProcess =
|
|
27
|
+
std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE);
|
|
28
|
+
|
|
29
|
+
if (isRunning_.load()) {
|
|
30
|
+
renderAudio_(audioBus_, framesToProcess);
|
|
31
|
+
} else {
|
|
32
|
+
audioBus_->zero();
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
for (int channel = 0; channel < channelCount_; channel += 1) {
|
|
36
|
+
float *outputChannel = (float *)outputData->mBuffers[channel].mData;
|
|
37
|
+
auto *inputChannel = audioBus_->getChannel(channel)->getData();
|
|
38
|
+
|
|
39
|
+
memcpy(
|
|
40
|
+
outputChannel + processedFrames,
|
|
41
|
+
inputChannel,
|
|
42
|
+
framesToProcess * sizeof(float));
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
processedFrames += framesToProcess;
|
|
46
|
+
}
|
|
47
|
+
};
|
|
40
48
|
|
|
41
49
|
audioPlayer_ = [[NativeAudioPlayer alloc] initWithRenderAudio:renderAudioBlock
|
|
42
50
|
sampleRate:sampleRate
|
|
43
51
|
channelCount:channelCount_];
|
|
44
52
|
|
|
45
|
-
audioBus_ = std::make_shared<AudioBus>(
|
|
53
|
+
audioBus_ = std::make_shared<AudioBus>(
|
|
54
|
+
RENDER_QUANTUM_SIZE, channelCount_, sampleRate);
|
|
46
55
|
}
|
|
47
56
|
|
|
48
57
|
IOSAudioPlayer::~IOSAudioPlayer()
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
#ifdef __OBJC__ // when compiled as Objective-C++
|
|
4
4
|
#import <NativeAudioRecorder.h>
|
|
5
|
-
#else
|
|
5
|
+
#else // when compiled as C++
|
|
6
6
|
typedef struct objc_object NativeAudioRecorder;
|
|
7
7
|
#endif // __OBJC__
|
|
8
8
|
|
|
@@ -18,7 +18,8 @@ class IOSAudioRecorder : public AudioRecorder {
|
|
|
18
18
|
IOSAudioRecorder(
|
|
19
19
|
float sampleRate,
|
|
20
20
|
int bufferLength,
|
|
21
|
-
const std::shared_ptr<AudioEventHandlerRegistry>
|
|
21
|
+
const std::shared_ptr<AudioEventHandlerRegistry>
|
|
22
|
+
&audioEventHandlerRegistry);
|
|
22
23
|
|
|
23
24
|
~IOSAudioRecorder() override;
|
|
24
25
|
|
|
@@ -18,7 +18,8 @@ IOSAudioRecorder::IOSAudioRecorder(
|
|
|
18
18
|
const std::shared_ptr<AudioEventHandlerRegistry> &audioEventHandlerRegistry)
|
|
19
19
|
: AudioRecorder(sampleRate, bufferLength, audioEventHandlerRegistry)
|
|
20
20
|
{
|
|
21
|
-
AudioReceiverBlock audioReceiverBlock = ^(
|
|
21
|
+
AudioReceiverBlock audioReceiverBlock = ^(
|
|
22
|
+
const AudioBufferList *inputBuffer, int numFrames) {
|
|
22
23
|
if (isRunning_.load()) {
|
|
23
24
|
auto *inputChannel = static_cast<float *>(inputBuffer->mBuffers[0].mData);
|
|
24
25
|
writeToBuffers(inputChannel, numFrames);
|
|
@@ -34,9 +35,10 @@ IOSAudioRecorder::IOSAudioRecorder(
|
|
|
34
35
|
}
|
|
35
36
|
};
|
|
36
37
|
|
|
37
|
-
audioRecorder_ =
|
|
38
|
-
|
|
39
|
-
|
|
38
|
+
audioRecorder_ =
|
|
39
|
+
[[NativeAudioRecorder alloc] initWithReceiverBlock:audioReceiverBlock
|
|
40
|
+
bufferLength:bufferLength
|
|
41
|
+
sampleRate:sampleRate];
|
|
40
42
|
}
|
|
41
43
|
|
|
42
44
|
IOSAudioRecorder::~IOSAudioRecorder()
|
|
@@ -15,7 +15,10 @@
|
|
|
15
15
|
|
|
16
16
|
__weak typeof(self) weakSelf = self;
|
|
17
17
|
self.renderBlock = ^OSStatus(
|
|
18
|
-
BOOL *isSilence,
|
|
18
|
+
BOOL *isSilence,
|
|
19
|
+
const AudioTimeStamp *timestamp,
|
|
20
|
+
AVAudioFrameCount frameCount,
|
|
21
|
+
AudioBufferList *outputData) {
|
|
19
22
|
if (outputData->mNumberBuffers != weakSelf.channelCount) {
|
|
20
23
|
return kAudioServicesBadPropertySizeError;
|
|
21
24
|
}
|
|
@@ -25,8 +28,11 @@
|
|
|
25
28
|
return kAudioServicesNoError;
|
|
26
29
|
};
|
|
27
30
|
|
|
28
|
-
_format = [[AVAudioFormat alloc]
|
|
29
|
-
|
|
31
|
+
_format = [[AVAudioFormat alloc]
|
|
32
|
+
initStandardFormatWithSampleRate:self.sampleRate
|
|
33
|
+
channels:self.channelCount];
|
|
34
|
+
_sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format
|
|
35
|
+
renderBlock:self.renderBlock];
|
|
30
36
|
}
|
|
31
37
|
|
|
32
38
|
return self;
|
|
@@ -39,15 +45,18 @@
|
|
|
39
45
|
AudioEngine *audioEngine = [AudioEngine sharedInstance];
|
|
40
46
|
assert(audioEngine != nil);
|
|
41
47
|
|
|
42
|
-
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
43
|
-
// in this case if it is the first player and recorder started the
|
|
44
|
-
// It can be optimized by tracking if we haven't
|
|
48
|
+
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
49
|
+
// limitations in this case if it is the first player and recorder started the
|
|
50
|
+
// engine we need to restart. It can be optimized by tracking if we haven't
|
|
51
|
+
// break rules of at runtime modifications from docs
|
|
45
52
|
// https://developer.apple.com/documentation/avfaudio/avaudioengine?language=objc
|
|
46
53
|
//
|
|
47
|
-
// Currently we are restarting because we do not see any significant
|
|
48
|
-
// you will need to start and stop player very
|
|
54
|
+
// Currently we are restarting because we do not see any significant
|
|
55
|
+
// performance issue and case when you will need to start and stop player very
|
|
56
|
+
// frequently
|
|
49
57
|
[audioEngine stopEngine];
|
|
50
|
-
self.sourceNodeId = [audioEngine attachSourceNode:self.sourceNode
|
|
58
|
+
self.sourceNodeId = [audioEngine attachSourceNode:self.sourceNode
|
|
59
|
+
format:self.format];
|
|
51
60
|
return [audioEngine startIfNecessary];
|
|
52
61
|
}
|
|
53
62
|
|
|
@@ -3,7 +3,8 @@
|
|
|
3
3
|
#import <AVFoundation/AVFoundation.h>
|
|
4
4
|
#import <Foundation/Foundation.h>
|
|
5
5
|
|
|
6
|
-
typedef void (
|
|
6
|
+
typedef void (
|
|
7
|
+
^AudioReceiverBlock)(const AudioBufferList *inputBuffer, int numFrames);
|
|
7
8
|
|
|
8
9
|
@interface NativeAudioRecorder : NSObject
|
|
9
10
|
|
|
@@ -14,34 +14,43 @@
|
|
|
14
14
|
|
|
15
15
|
self.receiverBlock = [receiverBlock copy];
|
|
16
16
|
|
|
17
|
-
float devicePrefferedSampleRate =
|
|
17
|
+
float devicePrefferedSampleRate =
|
|
18
|
+
[[AVAudioSession sharedInstance] sampleRate];
|
|
18
19
|
|
|
19
20
|
if (!devicePrefferedSampleRate) {
|
|
20
21
|
NSError *error;
|
|
21
22
|
devicePrefferedSampleRate = sampleRate;
|
|
22
23
|
|
|
23
|
-
[[AVAudioSession sharedInstance] setPreferredSampleRate:sampleRate
|
|
24
|
+
[[AVAudioSession sharedInstance] setPreferredSampleRate:sampleRate
|
|
25
|
+
error:&error];
|
|
24
26
|
}
|
|
25
27
|
|
|
26
|
-
self.inputFormat =
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
28
|
+
self.inputFormat =
|
|
29
|
+
[[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32
|
|
30
|
+
sampleRate:devicePrefferedSampleRate
|
|
31
|
+
channels:1
|
|
32
|
+
interleaved:NO];
|
|
33
|
+
self.outputFormat =
|
|
34
|
+
[[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32
|
|
35
|
+
sampleRate:sampleRate
|
|
36
|
+
channels:1
|
|
37
|
+
interleaved:NO];
|
|
38
|
+
self.audioConverter =
|
|
39
|
+
[[AVAudioConverter alloc] initFromFormat:self.inputFormat
|
|
40
|
+
toFormat:self.outputFormat];
|
|
35
41
|
|
|
36
42
|
__weak typeof(self) weakSelf = self;
|
|
37
43
|
self.receiverSinkBlock = ^OSStatus(
|
|
38
44
|
const AudioTimeStamp *_Nonnull timestamp,
|
|
39
45
|
AVAudioFrameCount frameCount,
|
|
40
46
|
const AudioBufferList *_Nonnull inputData) {
|
|
41
|
-
return [weakSelf processAudioInput:inputData
|
|
47
|
+
return [weakSelf processAudioInput:inputData
|
|
48
|
+
withFrameCount:frameCount
|
|
49
|
+
atTimestamp:timestamp];
|
|
42
50
|
};
|
|
43
51
|
|
|
44
|
-
self.sinkNode =
|
|
52
|
+
self.sinkNode =
|
|
53
|
+
[[AVAudioSinkNode alloc] initWithReceiverBlock:self.receiverSinkBlock];
|
|
45
54
|
}
|
|
46
55
|
|
|
47
56
|
return self;
|
|
@@ -55,8 +64,9 @@
|
|
|
55
64
|
float outputSampleRate = self.outputFormat.sampleRate;
|
|
56
65
|
|
|
57
66
|
if (inputSampleRate != outputSampleRate) {
|
|
58
|
-
AVAudioPCMBuffer *inputBuffer =
|
|
59
|
-
|
|
67
|
+
AVAudioPCMBuffer *inputBuffer =
|
|
68
|
+
[[AVAudioPCMBuffer alloc] initWithPCMFormat:self.inputFormat
|
|
69
|
+
frameCapacity:frameCount];
|
|
60
70
|
memcpy(
|
|
61
71
|
inputBuffer.mutableAudioBufferList->mBuffers[0].mData,
|
|
62
72
|
inputData->mBuffers[0].mData,
|
|
@@ -65,22 +75,28 @@
|
|
|
65
75
|
|
|
66
76
|
int outputFrameCount = frameCount * outputSampleRate / inputSampleRate;
|
|
67
77
|
|
|
68
|
-
AVAudioPCMBuffer *outputBuffer = [[AVAudioPCMBuffer alloc]
|
|
69
|
-
|
|
78
|
+
AVAudioPCMBuffer *outputBuffer = [[AVAudioPCMBuffer alloc]
|
|
79
|
+
initWithPCMFormat:self.audioConverter.outputFormat
|
|
80
|
+
frameCapacity:outputFrameCount];
|
|
70
81
|
|
|
71
82
|
NSError *error = nil;
|
|
72
|
-
AVAudioConverterInputBlock inputBlock =
|
|
73
|
-
|
|
83
|
+
AVAudioConverterInputBlock inputBlock = ^AVAudioBuffer *_Nullable(
|
|
84
|
+
AVAudioPacketCount inNumberOfPackets,
|
|
85
|
+
AVAudioConverterInputStatus *outStatus)
|
|
74
86
|
{
|
|
75
87
|
*outStatus = AVAudioConverterInputStatus_HaveData;
|
|
76
88
|
return inputBuffer;
|
|
77
89
|
};
|
|
78
90
|
|
|
79
91
|
/// IMPORTANT: AVAudioConverter leaks memory without autorelease pool
|
|
80
|
-
/// more details here:
|
|
81
|
-
///
|
|
92
|
+
/// more details here:
|
|
93
|
+
/// https://github.com/poneciak57/AVAudioConverter-memory-leak-repro-electric-boogaloo
|
|
94
|
+
/// we can try to remove it in the future or refactor to reuse buffers to
|
|
95
|
+
/// minimize allocations
|
|
82
96
|
@autoreleasepool {
|
|
83
|
-
[self.audioConverter convertToBuffer:outputBuffer
|
|
97
|
+
[self.audioConverter convertToBuffer:outputBuffer
|
|
98
|
+
error:&error
|
|
99
|
+
withInputFromBlock:inputBlock];
|
|
84
100
|
}
|
|
85
101
|
|
|
86
102
|
if (error) {
|
|
@@ -103,13 +119,15 @@
|
|
|
103
119
|
AudioEngine *audioEngine = [AudioEngine sharedInstance];
|
|
104
120
|
assert(audioEngine != nil);
|
|
105
121
|
|
|
106
|
-
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
107
|
-
// in this case if it is the first recorder node and player
|
|
108
|
-
// It can be optimized by tracking if
|
|
122
|
+
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
123
|
+
// limitations in this case if it is the first recorder node and player
|
|
124
|
+
// started the engine we need to restart. It can be optimized by tracking if
|
|
125
|
+
// we haven't break rules of at runtime modifications from docs
|
|
109
126
|
// https://developer.apple.com/documentation/avfaudio/avaudioengine?language=objc
|
|
110
127
|
//
|
|
111
|
-
// Currently we are restarting because we do not see any significant
|
|
112
|
-
// you will need to start and stop recorder
|
|
128
|
+
// Currently we are restarting because we do not see any significant
|
|
129
|
+
// performance issue and case when you will need to start and stop recorder
|
|
130
|
+
// very frequently
|
|
113
131
|
[audioEngine stopEngine];
|
|
114
132
|
[audioEngine attachInputNode:self.sinkNode];
|
|
115
133
|
[audioEngine startIfNecessary];
|
|
@@ -18,7 +18,9 @@ namespace audioapi {
|
|
|
18
18
|
// Decoding audio in fixed-size chunks because total frame count can't be
|
|
19
19
|
// determined in advance. Note: ma_decoder_get_length_in_pcm_frames() always
|
|
20
20
|
// returns 0 for Vorbis decoders.
|
|
21
|
-
std::vector<float> AudioDecoder::readAllPcmFrames(
|
|
21
|
+
std::vector<float> AudioDecoder::readAllPcmFrames(
|
|
22
|
+
ma_decoder &decoder,
|
|
23
|
+
int outputChannels)
|
|
22
24
|
{
|
|
23
25
|
std::vector<float> buffer;
|
|
24
26
|
std::vector<float> temp(CHUNK_SIZE * outputChannels);
|
|
@@ -26,12 +28,16 @@ std::vector<float> AudioDecoder::readAllPcmFrames(ma_decoder &decoder, int outpu
|
|
|
26
28
|
|
|
27
29
|
while (true) {
|
|
28
30
|
ma_uint64 tempFramesDecoded = 0;
|
|
29
|
-
ma_decoder_read_pcm_frames(
|
|
31
|
+
ma_decoder_read_pcm_frames(
|
|
32
|
+
&decoder, temp.data(), CHUNK_SIZE, &tempFramesDecoded);
|
|
30
33
|
if (tempFramesDecoded == 0) {
|
|
31
34
|
break;
|
|
32
35
|
}
|
|
33
36
|
|
|
34
|
-
buffer.insert(
|
|
37
|
+
buffer.insert(
|
|
38
|
+
buffer.end(),
|
|
39
|
+
temp.data(),
|
|
40
|
+
temp.data() + tempFramesDecoded * outputChannels);
|
|
35
41
|
outFramesRead += tempFramesDecoded;
|
|
36
42
|
}
|
|
37
43
|
|
|
@@ -51,7 +57,8 @@ std::shared_ptr<AudioBuffer> AudioDecoder::makeAudioBufferFromFloatBuffer(
|
|
|
51
57
|
}
|
|
52
58
|
|
|
53
59
|
auto outputFrames = buffer.size() / outputChannels;
|
|
54
|
-
auto audioBus = std::make_shared<AudioBus>(
|
|
60
|
+
auto audioBus = std::make_shared<AudioBus>(
|
|
61
|
+
outputFrames, outputChannels, outputSampleRate);
|
|
55
62
|
|
|
56
63
|
for (int ch = 0; ch < outputChannels; ++ch) {
|
|
57
64
|
auto channelData = audioBus->getChannel(ch)->getData();
|
|
@@ -62,10 +69,13 @@ std::shared_ptr<AudioBuffer> AudioDecoder::makeAudioBufferFromFloatBuffer(
|
|
|
62
69
|
return std::make_shared<AudioBuffer>(audioBus);
|
|
63
70
|
}
|
|
64
71
|
|
|
65
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(
|
|
72
|
+
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(
|
|
73
|
+
const std::string &path,
|
|
74
|
+
float sampleRate)
|
|
66
75
|
{
|
|
67
76
|
if (AudioDecoder::pathHasExtension(path, {".mp4", ".m4a", ".aac"})) {
|
|
68
|
-
auto buffer =
|
|
77
|
+
auto buffer =
|
|
78
|
+
ffmpegdecoder::decodeWithFilePath(path, static_cast<int>(sampleRate));
|
|
69
79
|
if (buffer == nullptr) {
|
|
70
80
|
NSLog(@"Failed to decode with FFmpeg: %s", path.c_str());
|
|
71
81
|
return nullptr;
|
|
@@ -73,11 +83,14 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(const std::string
|
|
|
73
83
|
return buffer;
|
|
74
84
|
}
|
|
75
85
|
ma_decoder decoder;
|
|
76
|
-
ma_decoder_config config =
|
|
77
|
-
|
|
86
|
+
ma_decoder_config config =
|
|
87
|
+
ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
88
|
+
ma_decoding_backend_vtable *customBackends[] = {
|
|
89
|
+
ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
78
90
|
|
|
79
91
|
config.ppCustomBackendVTables = customBackends;
|
|
80
|
-
config.customBackendCount =
|
|
92
|
+
config.customBackendCount =
|
|
93
|
+
sizeof(customBackends) / sizeof(customBackends[0]);
|
|
81
94
|
|
|
82
95
|
if (ma_decoder_init_file(path.c_str(), &config, &decoder) != MA_SUCCESS) {
|
|
83
96
|
NSLog(@"Failed to initialize decoder for file: %s", path.c_str());
|
|
@@ -90,14 +103,20 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(const std::string
|
|
|
90
103
|
|
|
91
104
|
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
92
105
|
ma_decoder_uninit(&decoder);
|
|
93
|
-
return makeAudioBufferFromFloatBuffer(
|
|
106
|
+
return makeAudioBufferFromFloatBuffer(
|
|
107
|
+
buffer, outputSampleRate, outputChannels);
|
|
94
108
|
}
|
|
95
109
|
|
|
96
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(
|
|
110
|
+
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(
|
|
111
|
+
const void *data,
|
|
112
|
+
size_t size,
|
|
113
|
+
float sampleRate)
|
|
97
114
|
{
|
|
98
115
|
const AudioFormat format = AudioDecoder::detectAudioFormat(data, size);
|
|
99
|
-
if (format == AudioFormat::MP4 || format == AudioFormat::M4A ||
|
|
100
|
-
|
|
116
|
+
if (format == AudioFormat::MP4 || format == AudioFormat::M4A ||
|
|
117
|
+
format == AudioFormat::AAC) {
|
|
118
|
+
auto buffer = ffmpegdecoder::decodeWithMemoryBlock(
|
|
119
|
+
data, size, static_cast<int>(sampleRate));
|
|
101
120
|
if (buffer == nullptr) {
|
|
102
121
|
NSLog(@"Failed to decode with FFmpeg");
|
|
103
122
|
return nullptr;
|
|
@@ -105,12 +124,15 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(const void *dat
|
|
|
105
124
|
return buffer;
|
|
106
125
|
}
|
|
107
126
|
ma_decoder decoder;
|
|
108
|
-
ma_decoder_config config =
|
|
127
|
+
ma_decoder_config config =
|
|
128
|
+
ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
109
129
|
|
|
110
|
-
ma_decoding_backend_vtable *customBackends[] = {
|
|
130
|
+
ma_decoding_backend_vtable *customBackends[] = {
|
|
131
|
+
ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
111
132
|
|
|
112
133
|
config.ppCustomBackendVTables = customBackends;
|
|
113
|
-
config.customBackendCount =
|
|
134
|
+
config.customBackendCount =
|
|
135
|
+
sizeof(customBackends) / sizeof(customBackends[0]);
|
|
114
136
|
|
|
115
137
|
if (ma_decoder_init_memory(data, size, &config, &decoder) != MA_SUCCESS) {
|
|
116
138
|
NSLog(@"Failed to initialize decoder for memory block");
|
|
@@ -123,7 +145,8 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(const void *dat
|
|
|
123
145
|
|
|
124
146
|
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
125
147
|
ma_decoder_uninit(&decoder);
|
|
126
|
-
return makeAudioBufferFromFloatBuffer(
|
|
148
|
+
return makeAudioBufferFromFloatBuffer(
|
|
149
|
+
buffer, outputSampleRate, outputChannels);
|
|
127
150
|
}
|
|
128
151
|
|
|
129
152
|
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithPCMInBase64(
|
|
@@ -134,9 +157,11 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithPCMInBase64(
|
|
|
134
157
|
{
|
|
135
158
|
auto decodedData = base64_decode(data, false);
|
|
136
159
|
const auto uint8Data = reinterpret_cast<uint8_t *>(decodedData.data());
|
|
137
|
-
size_t numFramesDecoded =
|
|
160
|
+
size_t numFramesDecoded =
|
|
161
|
+
decodedData.size() / (inputChannelCount * sizeof(int16_t));
|
|
138
162
|
|
|
139
|
-
auto audioBus = std::make_shared<AudioBus>(
|
|
163
|
+
auto audioBus = std::make_shared<AudioBus>(
|
|
164
|
+
numFramesDecoded, inputChannelCount, inputSampleRate);
|
|
140
165
|
|
|
141
166
|
for (int ch = 0; ch < inputChannelCount; ++ch) {
|
|
142
167
|
auto channelData = audioBus->getChannel(ch)->getData();
|
|
@@ -16,7 +16,8 @@
|
|
|
16
16
|
@property (nonatomic, strong) AVAudioSinkNode *inputNode;
|
|
17
17
|
@property (nonatomic, weak) AudioSessionManager *sessionManager;
|
|
18
18
|
|
|
19
|
-
- (instancetype)initWithAudioSessionManager:
|
|
19
|
+
- (instancetype)initWithAudioSessionManager:
|
|
20
|
+
(AudioSessionManager *)sessionManager;
|
|
20
21
|
|
|
21
22
|
+ (instancetype)sharedInstance;
|
|
22
23
|
- (void)cleanup;
|
|
@@ -30,7 +31,8 @@
|
|
|
30
31
|
- (void)unmarkAsInterrupted;
|
|
31
32
|
- (bool)isSupposedToRun;
|
|
32
33
|
|
|
33
|
-
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
34
|
+
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
35
|
+
format:(AVAudioFormat *)format;
|
|
34
36
|
- (void)detachSourceNodeWithId:(NSString *)sourceNodeId;
|
|
35
37
|
|
|
36
38
|
- (void)attachInputNode:(AVAudioSinkNode *)inputNode;
|
|
@@ -10,7 +10,8 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
10
10
|
return _sharedInstance;
|
|
11
11
|
}
|
|
12
12
|
|
|
13
|
-
- (instancetype)initWithAudioSessionManager:
|
|
13
|
+
- (instancetype)initWithAudioSessionManager:
|
|
14
|
+
(AudioSessionManager *)sessionManager
|
|
14
15
|
{
|
|
15
16
|
if (self = [super init]) {
|
|
16
17
|
self.isInterrupted = false;
|
|
@@ -54,12 +55,16 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
54
55
|
AVAudioFormat *format = [self.sourceFormats valueForKey:sourceNodeId];
|
|
55
56
|
|
|
56
57
|
[self.audioEngine attachNode:sourceNode];
|
|
57
|
-
[self.audioEngine connect:sourceNode
|
|
58
|
+
[self.audioEngine connect:sourceNode
|
|
59
|
+
to:self.audioEngine.mainMixerNode
|
|
60
|
+
format:format];
|
|
58
61
|
}
|
|
59
62
|
|
|
60
63
|
if (self.inputNode) {
|
|
61
64
|
[self.audioEngine attachNode:self.inputNode];
|
|
62
|
-
[self.audioEngine connect:self.audioEngine.inputNode
|
|
65
|
+
[self.audioEngine connect:self.audioEngine.inputNode
|
|
66
|
+
to:self.inputNode
|
|
67
|
+
format:nil];
|
|
63
68
|
}
|
|
64
69
|
}
|
|
65
70
|
|
|
@@ -108,7 +113,8 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
108
113
|
[self.audioEngine startAndReturnError:&error];
|
|
109
114
|
|
|
110
115
|
if (error != nil) {
|
|
111
|
-
NSLog(
|
|
116
|
+
NSLog(
|
|
117
|
+
@"Error while starting the audio engine: %@", [error debugDescription]);
|
|
112
118
|
return false;
|
|
113
119
|
}
|
|
114
120
|
|
|
@@ -155,7 +161,8 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
155
161
|
self.isInterrupted = false;
|
|
156
162
|
}
|
|
157
163
|
|
|
158
|
-
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
164
|
+
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
165
|
+
format:(AVAudioFormat *)format
|
|
159
166
|
{
|
|
160
167
|
NSString *sourceNodeId = [[NSUUID UUID] UUIDString];
|
|
161
168
|
NSLog(@"[AudioEngine] attaching new source node with ID: %@", sourceNodeId);
|
|
@@ -165,7 +172,9 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
165
172
|
[self.sourceStates setValue:@true forKey:sourceNodeId];
|
|
166
173
|
|
|
167
174
|
[self.audioEngine attachNode:sourceNode];
|
|
168
|
-
[self.audioEngine connect:sourceNode
|
|
175
|
+
[self.audioEngine connect:sourceNode
|
|
176
|
+
to:self.audioEngine.mainMixerNode
|
|
177
|
+
format:format];
|
|
169
178
|
|
|
170
179
|
return sourceNodeId;
|
|
171
180
|
}
|
|
@@ -256,7 +265,9 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
256
265
|
|
|
257
266
|
// AVAudioEngine state
|
|
258
267
|
NSLog(@"➡️ engine.isRunning: %@", self.audioEngine.isRunning ? @"YES" : @"NO");
|
|
259
|
-
NSLog(
|
|
268
|
+
NSLog(
|
|
269
|
+
@"➡️ engine.isInManualRenderingMode: %@",
|
|
270
|
+
self.audioEngine.isInManualRenderingMode ? @"YES" : @"NO");
|
|
260
271
|
|
|
261
272
|
// Session state
|
|
262
273
|
NSLog(@"🎚️ Session category: %@", session.category);
|
|
@@ -279,7 +290,10 @@ static AudioEngine *_sharedInstance = nil;
|
|
|
279
290
|
|
|
280
291
|
// Output node format
|
|
281
292
|
AVAudioFormat *format = [self.audioEngine.outputNode inputFormatForBus:0];
|
|
282
|
-
NSLog(
|
|
293
|
+
NSLog(
|
|
294
|
+
@"📐 Engine output format: %.0f Hz, %u channels",
|
|
295
|
+
format.sampleRate,
|
|
296
|
+
format.channelCount);
|
|
283
297
|
|
|
284
298
|
NSLog(@"=======================================================");
|
|
285
299
|
}
|
|
@@ -30,10 +30,14 @@
|
|
|
30
30
|
- (bool)setActive:(bool)active;
|
|
31
31
|
- (void)disableSessionManagement;
|
|
32
32
|
|
|
33
|
-
- (void)requestRecordingPermissions:(RCTPromiseResolveBlock)resolve
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
33
|
+
- (void)requestRecordingPermissions:(RCTPromiseResolveBlock)resolve
|
|
34
|
+
reject:(RCTPromiseRejectBlock)reject;
|
|
35
|
+
- (void)checkRecordingPermissions:(RCTPromiseResolveBlock)resolve
|
|
36
|
+
reject:(RCTPromiseRejectBlock)reject;
|
|
37
|
+
|
|
38
|
+
- (void)getDevicesInfo:(RCTPromiseResolveBlock)resolve
|
|
39
|
+
reject:(RCTPromiseRejectBlock)reject;
|
|
40
|
+
- (NSArray<NSDictionary *> *)parseDeviceList:
|
|
41
|
+
(NSArray<AVAudioSessionPortDescription *> *)devices;
|
|
38
42
|
|
|
39
43
|
@end
|