react-native-audio-api 0.11.0-nightly-010ea11-20251110 → 0.11.0-nightly-141c86f-20251112
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/common/cpp/audioapi/AudioAPIModuleInstaller.h +6 -4
- package/common/cpp/audioapi/HostObjects/AudioContextHostObject.cpp +6 -2
- package/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +16 -2
- package/common/cpp/audioapi/HostObjects/OfflineAudioContextHostObject.cpp +3 -1
- package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.cpp +14 -9
- package/common/cpp/audioapi/HostObjects/effects/ConvolverNodeHostObject.h +1 -1
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.cpp +19 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferBaseSourceNodeHostObject.h +3 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp +2 -0
- package/common/cpp/audioapi/HostObjects/sources/AudioBufferSourceNodeHostObject.cpp +4 -1
- package/common/cpp/audioapi/core/AudioContext.cpp +19 -24
- package/common/cpp/audioapi/core/AudioContext.h +2 -2
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +16 -0
- package/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +23 -3
- package/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +2 -0
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +16 -3
- package/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +5 -5
- package/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +10 -0
- package/common/cpp/audioapi/dsp/VectorMath.cpp +15 -15
- package/common/cpp/test/src/biquad/BiquadFilterTest.cpp +5 -5
- package/ios/audioapi/ios/AudioAPIModule.h +6 -4
- package/ios/audioapi/ios/AudioAPIModule.mm +62 -41
- package/ios/audioapi/ios/core/IOSAudioPlayer.h +1 -1
- package/ios/audioapi/ios/core/IOSAudioPlayer.mm +33 -24
- package/ios/audioapi/ios/core/IOSAudioRecorder.h +3 -2
- package/ios/audioapi/ios/core/IOSAudioRecorder.mm +6 -4
- package/ios/audioapi/ios/core/NativeAudioPlayer.m +18 -9
- package/ios/audioapi/ios/core/NativeAudioRecorder.h +2 -1
- package/ios/audioapi/ios/core/NativeAudioRecorder.m +45 -27
- package/ios/audioapi/ios/core/utils/AudioDecoder.mm +44 -19
- package/ios/audioapi/ios/system/AudioEngine.h +4 -2
- package/ios/audioapi/ios/system/AudioEngine.mm +22 -8
- package/ios/audioapi/ios/system/AudioSessionManager.h +9 -5
- package/ios/audioapi/ios/system/AudioSessionManager.mm +51 -21
- package/ios/audioapi/ios/system/LockScreenManager.mm +137 -88
- package/ios/audioapi/ios/system/NotificationManager.mm +79 -48
- package/lib/commonjs/api.js.map +1 -1
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/commonjs/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/commonjs/core/AudioContext.js +1 -1
- package/lib/commonjs/core/AudioContext.js.map +1 -1
- package/lib/commonjs/core/ConvolverNode.js +2 -2
- package/lib/commonjs/core/ConvolverNode.js.map +1 -1
- package/lib/commonjs/web-core/AudioContext.js +1 -1
- package/lib/commonjs/web-core/AudioContext.js.map +1 -1
- package/lib/module/api.js.map +1 -1
- package/lib/module/core/AudioBufferBaseSourceNode.js +3 -0
- package/lib/module/core/AudioBufferBaseSourceNode.js.map +1 -1
- package/lib/module/core/AudioContext.js +1 -1
- package/lib/module/core/AudioContext.js.map +1 -1
- package/lib/module/core/ConvolverNode.js +2 -2
- package/lib/module/core/ConvolverNode.js.map +1 -1
- package/lib/module/web-core/AudioContext.js +1 -1
- package/lib/module/web-core/AudioContext.js.map +1 -1
- package/lib/typescript/api.d.ts +1 -1
- package/lib/typescript/api.d.ts.map +1 -1
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts +1 -0
- package/lib/typescript/core/AudioBufferBaseSourceNode.d.ts.map +1 -1
- package/lib/typescript/core/AudioContext.d.ts.map +1 -1
- package/lib/typescript/interfaces.d.ts +4 -1
- package/lib/typescript/interfaces.d.ts.map +1 -1
- package/lib/typescript/types.d.ts +0 -1
- package/lib/typescript/types.d.ts.map +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts +1 -1
- package/lib/typescript/web-core/AudioContext.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/api.ts +0 -1
- package/src/core/AudioBufferBaseSourceNode.ts +8 -0
- package/src/core/AudioContext.ts +0 -1
- package/src/core/ConvolverNode.ts +2 -2
- package/src/interfaces.ts +6 -1
- package/src/types.ts +0 -1
- package/src/web-core/AudioContext.tsx +1 -1
|
@@ -26,7 +26,7 @@ using namespace worklets;
|
|
|
26
26
|
|
|
27
27
|
#if defined(RCT_NEW_ARCH_ENABLED)
|
|
28
28
|
// nothing
|
|
29
|
-
#else
|
|
29
|
+
#else // defined(RCT_NEW_ARCH_ENABLED)
|
|
30
30
|
@interface RCTBridge (RCTTurboModule)
|
|
31
31
|
- (std::shared_ptr<facebook::react::CallInvoker>)jsCallInvoker;
|
|
32
32
|
- (void)_tryAndHandleError:(dispatch_block_t)block;
|
|
@@ -62,24 +62,30 @@ RCT_EXPORT_MODULE(AudioAPIModule);
|
|
|
62
62
|
RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(install)
|
|
63
63
|
{
|
|
64
64
|
self.audioSessionManager = [[AudioSessionManager alloc] init];
|
|
65
|
-
self.audioEngine = [[AudioEngine alloc]
|
|
66
|
-
|
|
67
|
-
self.
|
|
65
|
+
self.audioEngine = [[AudioEngine alloc]
|
|
66
|
+
initWithAudioSessionManager:self.audioSessionManager];
|
|
67
|
+
self.lockScreenManager =
|
|
68
|
+
[[LockScreenManager alloc] initWithAudioAPIModule:self];
|
|
69
|
+
self.notificationManager =
|
|
70
|
+
[[NotificationManager alloc] initWithAudioAPIModule:self];
|
|
68
71
|
|
|
69
|
-
auto jsiRuntime =
|
|
72
|
+
auto jsiRuntime =
|
|
73
|
+
reinterpret_cast<facebook::jsi::Runtime *>(self.bridge.runtime);
|
|
70
74
|
|
|
71
75
|
#if defined(RCT_NEW_ARCH_ENABLED)
|
|
72
76
|
auto jsCallInvoker = _callInvoker.callInvoker;
|
|
73
|
-
#else
|
|
77
|
+
#else // defined(RCT_NEW_ARCH_ENABLED)
|
|
74
78
|
auto jsCallInvoker = self.bridge.jsCallInvoker;
|
|
75
79
|
#endif // defined(RCT_NEW_ARCH_ENABLED)
|
|
76
80
|
|
|
77
81
|
assert(jsiRuntime != nullptr);
|
|
78
82
|
|
|
79
|
-
_eventHandler =
|
|
83
|
+
_eventHandler =
|
|
84
|
+
std::make_shared<AudioEventHandlerRegistry>(jsiRuntime, jsCallInvoker);
|
|
80
85
|
|
|
81
86
|
#if RN_AUDIO_API_ENABLE_WORKLETS
|
|
82
|
-
WorkletsModule *workletsModule =
|
|
87
|
+
WorkletsModule *workletsModule =
|
|
88
|
+
[_moduleRegistry moduleForName:"WorkletsModule"];
|
|
83
89
|
|
|
84
90
|
if (!workletsModule) {
|
|
85
91
|
NSLog(@"WorkletsModule not found in module registry");
|
|
@@ -100,9 +106,11 @@ RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(install)
|
|
|
100
106
|
}
|
|
101
107
|
|
|
102
108
|
// Get the actual JSI Runtime reference
|
|
103
|
-
audioapi::AudioAPIModuleInstaller::injectJSIBindings(
|
|
109
|
+
audioapi::AudioAPIModuleInstaller::injectJSIBindings(
|
|
110
|
+
jsiRuntime, jsCallInvoker, _eventHandler, uiWorkletRuntime);
|
|
104
111
|
#else
|
|
105
|
-
audioapi::AudioAPIModuleInstaller::injectJSIBindings(
|
|
112
|
+
audioapi::AudioAPIModuleInstaller::injectJSIBindings(
|
|
113
|
+
jsiRuntime, jsCallInvoker, _eventHandler);
|
|
106
114
|
#endif
|
|
107
115
|
|
|
108
116
|
NSLog(@"Successfully installed JSI bindings for react-native-audio-api!");
|
|
@@ -115,29 +123,33 @@ RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(getDevicePreferredSampleRate)
|
|
|
115
123
|
}
|
|
116
124
|
|
|
117
125
|
RCT_EXPORT_METHOD(
|
|
118
|
-
setAudioSessionActivity : (BOOL)enabled resolve : (RCTPromiseResolveBlock)
|
|
119
|
-
reject)
|
|
126
|
+
setAudioSessionActivity : (BOOL)enabled resolve : (RCTPromiseResolveBlock)
|
|
127
|
+
resolve reject : (RCTPromiseRejectBlock)reject)
|
|
120
128
|
{
|
|
121
|
-
dispatch_async(
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
129
|
+
dispatch_async(
|
|
130
|
+
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
|
|
131
|
+
if (!self.audioSessionManager.shouldManageSession) {
|
|
132
|
+
[self.audioSessionManager setShouldManageSession:true];
|
|
133
|
+
}
|
|
134
|
+
if ([self.audioSessionManager setActive:enabled]) {
|
|
135
|
+
resolve(@"true");
|
|
136
|
+
return;
|
|
137
|
+
}
|
|
138
|
+
resolve(@"false");
|
|
139
|
+
});
|
|
131
140
|
}
|
|
132
141
|
|
|
133
142
|
RCT_EXPORT_METHOD(
|
|
134
|
-
setAudioSessionOptions : (NSString *)category mode : (NSString *)
|
|
135
|
-
options allowHaptics : (BOOL)allowHaptics)
|
|
143
|
+
setAudioSessionOptions : (NSString *)category mode : (NSString *)
|
|
144
|
+
mode options : (NSArray *)options allowHaptics : (BOOL)allowHaptics)
|
|
136
145
|
{
|
|
137
146
|
if (!self.audioSessionManager.shouldManageSession) {
|
|
138
147
|
[self.audioSessionManager setShouldManageSession:true];
|
|
139
148
|
}
|
|
140
|
-
[self.audioSessionManager setAudioSessionOptions:category
|
|
149
|
+
[self.audioSessionManager setAudioSessionOptions:category
|
|
150
|
+
mode:mode
|
|
151
|
+
options:options
|
|
152
|
+
allowHaptics:allowHaptics];
|
|
141
153
|
}
|
|
142
154
|
|
|
143
155
|
RCT_EXPORT_METHOD(setLockScreenInfo : (NSDictionary *)info)
|
|
@@ -150,7 +162,8 @@ RCT_EXPORT_METHOD(resetLockScreenInfo)
|
|
|
150
162
|
[self.lockScreenManager resetLockScreenInfo];
|
|
151
163
|
}
|
|
152
164
|
|
|
153
|
-
RCT_EXPORT_METHOD(
|
|
165
|
+
RCT_EXPORT_METHOD(
|
|
166
|
+
enableRemoteCommand : (NSString *)name enabled : (BOOL)enabled)
|
|
154
167
|
{
|
|
155
168
|
[self.lockScreenManager enableRemoteCommand:name enabled:enabled];
|
|
156
169
|
}
|
|
@@ -171,28 +184,34 @@ RCT_EXPORT_METHOD(observeVolumeChanges : (BOOL)enabled)
|
|
|
171
184
|
}
|
|
172
185
|
|
|
173
186
|
RCT_EXPORT_METHOD(
|
|
174
|
-
requestRecordingPermissions : (nonnull RCTPromiseResolveBlock)
|
|
175
|
-
reject)
|
|
187
|
+
requestRecordingPermissions : (nonnull RCTPromiseResolveBlock)
|
|
188
|
+
resolve reject : (nonnull RCTPromiseRejectBlock)reject)
|
|
176
189
|
{
|
|
177
|
-
dispatch_async(
|
|
178
|
-
|
|
179
|
-
|
|
190
|
+
dispatch_async(
|
|
191
|
+
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
|
|
192
|
+
[self.audioSessionManager requestRecordingPermissions:resolve
|
|
193
|
+
reject:reject];
|
|
194
|
+
});
|
|
180
195
|
}
|
|
181
196
|
|
|
182
197
|
RCT_EXPORT_METHOD(
|
|
183
|
-
checkRecordingPermissions : (nonnull RCTPromiseResolveBlock)
|
|
198
|
+
checkRecordingPermissions : (nonnull RCTPromiseResolveBlock)
|
|
199
|
+
resolve reject : (nonnull RCTPromiseRejectBlock)reject)
|
|
184
200
|
{
|
|
185
|
-
dispatch_async(
|
|
186
|
-
|
|
187
|
-
|
|
201
|
+
dispatch_async(
|
|
202
|
+
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
|
|
203
|
+
[self.audioSessionManager checkRecordingPermissions:resolve
|
|
204
|
+
reject:reject];
|
|
205
|
+
});
|
|
188
206
|
}
|
|
189
207
|
|
|
190
208
|
RCT_EXPORT_METHOD(
|
|
191
|
-
getDevicesInfo : (nonnull RCTPromiseResolveBlock)
|
|
209
|
+
getDevicesInfo : (nonnull RCTPromiseResolveBlock)
|
|
210
|
+
resolve reject : (nonnull RCTPromiseRejectBlock)reject)
|
|
192
211
|
{
|
|
193
|
-
dispatch_async(
|
|
194
|
-
|
|
195
|
-
|
|
212
|
+
dispatch_async(
|
|
213
|
+
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
|
|
214
|
+
^{ [self.audioSessionManager getDevicesInfo:resolve reject:reject]; });
|
|
196
215
|
}
|
|
197
216
|
|
|
198
217
|
RCT_EXPORT_METHOD(disableSessionManagement)
|
|
@@ -208,7 +227,8 @@ RCT_EXPORT_METHOD(disableSessionManagement)
|
|
|
208
227
|
}
|
|
209
228
|
#endif // RCT_NEW_ARCH_ENABLED
|
|
210
229
|
|
|
211
|
-
- (void)invokeHandlerWithEventName:(NSString *)eventName
|
|
230
|
+
- (void)invokeHandlerWithEventName:(NSString *)eventName
|
|
231
|
+
eventBody:(NSDictionary *)eventBody
|
|
212
232
|
{
|
|
213
233
|
auto name = [eventName UTF8String];
|
|
214
234
|
|
|
@@ -242,7 +262,8 @@ RCT_EXPORT_METHOD(disableSessionManagement)
|
|
|
242
262
|
|
|
243
263
|
- (dispatch_queue_t)methodQueue
|
|
244
264
|
{
|
|
245
|
-
return dispatch_queue_create(
|
|
265
|
+
return dispatch_queue_create(
|
|
266
|
+
"swmansion.audioapi.Queue", DISPATCH_QUEUE_SERIAL);
|
|
246
267
|
}
|
|
247
268
|
|
|
248
269
|
@end
|
|
@@ -13,36 +13,45 @@ IOSAudioPlayer::IOSAudioPlayer(
|
|
|
13
13
|
const std::function<void(std::shared_ptr<AudioBus>, int)> &renderAudio,
|
|
14
14
|
float sampleRate,
|
|
15
15
|
int channelCount)
|
|
16
|
-
: renderAudio_(renderAudio),
|
|
16
|
+
: renderAudio_(renderAudio),
|
|
17
|
+
channelCount_(channelCount),
|
|
18
|
+
audioBus_(0),
|
|
19
|
+
isRunning_(false)
|
|
17
20
|
{
|
|
18
|
-
RenderAudioBlock renderAudioBlock =
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
21
|
+
RenderAudioBlock renderAudioBlock =
|
|
22
|
+
^(AudioBufferList *outputData, int numFrames) {
|
|
23
|
+
int processedFrames = 0;
|
|
24
|
+
|
|
25
|
+
while (processedFrames < numFrames) {
|
|
26
|
+
int framesToProcess =
|
|
27
|
+
std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE);
|
|
28
|
+
|
|
29
|
+
if (isRunning_.load()) {
|
|
30
|
+
renderAudio_(audioBus_, framesToProcess);
|
|
31
|
+
} else {
|
|
32
|
+
audioBus_->zero();
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
for (int channel = 0; channel < channelCount_; channel += 1) {
|
|
36
|
+
float *outputChannel = (float *)outputData->mBuffers[channel].mData;
|
|
37
|
+
auto *inputChannel = audioBus_->getChannel(channel)->getData();
|
|
38
|
+
|
|
39
|
+
memcpy(
|
|
40
|
+
outputChannel + processedFrames,
|
|
41
|
+
inputChannel,
|
|
42
|
+
framesToProcess * sizeof(float));
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
processedFrames += framesToProcess;
|
|
46
|
+
}
|
|
47
|
+
};
|
|
40
48
|
|
|
41
49
|
audioPlayer_ = [[NativeAudioPlayer alloc] initWithRenderAudio:renderAudioBlock
|
|
42
50
|
sampleRate:sampleRate
|
|
43
51
|
channelCount:channelCount_];
|
|
44
52
|
|
|
45
|
-
audioBus_ = std::make_shared<AudioBus>(
|
|
53
|
+
audioBus_ = std::make_shared<AudioBus>(
|
|
54
|
+
RENDER_QUANTUM_SIZE, channelCount_, sampleRate);
|
|
46
55
|
}
|
|
47
56
|
|
|
48
57
|
IOSAudioPlayer::~IOSAudioPlayer()
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
#ifdef __OBJC__ // when compiled as Objective-C++
|
|
4
4
|
#import <NativeAudioRecorder.h>
|
|
5
|
-
#else
|
|
5
|
+
#else // when compiled as C++
|
|
6
6
|
typedef struct objc_object NativeAudioRecorder;
|
|
7
7
|
#endif // __OBJC__
|
|
8
8
|
|
|
@@ -18,7 +18,8 @@ class IOSAudioRecorder : public AudioRecorder {
|
|
|
18
18
|
IOSAudioRecorder(
|
|
19
19
|
float sampleRate,
|
|
20
20
|
int bufferLength,
|
|
21
|
-
const std::shared_ptr<AudioEventHandlerRegistry>
|
|
21
|
+
const std::shared_ptr<AudioEventHandlerRegistry>
|
|
22
|
+
&audioEventHandlerRegistry);
|
|
22
23
|
|
|
23
24
|
~IOSAudioRecorder() override;
|
|
24
25
|
|
|
@@ -18,7 +18,8 @@ IOSAudioRecorder::IOSAudioRecorder(
|
|
|
18
18
|
const std::shared_ptr<AudioEventHandlerRegistry> &audioEventHandlerRegistry)
|
|
19
19
|
: AudioRecorder(sampleRate, bufferLength, audioEventHandlerRegistry)
|
|
20
20
|
{
|
|
21
|
-
AudioReceiverBlock audioReceiverBlock = ^(
|
|
21
|
+
AudioReceiverBlock audioReceiverBlock = ^(
|
|
22
|
+
const AudioBufferList *inputBuffer, int numFrames) {
|
|
22
23
|
if (isRunning_.load()) {
|
|
23
24
|
auto *inputChannel = static_cast<float *>(inputBuffer->mBuffers[0].mData);
|
|
24
25
|
writeToBuffers(inputChannel, numFrames);
|
|
@@ -34,9 +35,10 @@ IOSAudioRecorder::IOSAudioRecorder(
|
|
|
34
35
|
}
|
|
35
36
|
};
|
|
36
37
|
|
|
37
|
-
audioRecorder_ =
|
|
38
|
-
|
|
39
|
-
|
|
38
|
+
audioRecorder_ =
|
|
39
|
+
[[NativeAudioRecorder alloc] initWithReceiverBlock:audioReceiverBlock
|
|
40
|
+
bufferLength:bufferLength
|
|
41
|
+
sampleRate:sampleRate];
|
|
40
42
|
}
|
|
41
43
|
|
|
42
44
|
IOSAudioRecorder::~IOSAudioRecorder()
|
|
@@ -15,7 +15,10 @@
|
|
|
15
15
|
|
|
16
16
|
__weak typeof(self) weakSelf = self;
|
|
17
17
|
self.renderBlock = ^OSStatus(
|
|
18
|
-
BOOL *isSilence,
|
|
18
|
+
BOOL *isSilence,
|
|
19
|
+
const AudioTimeStamp *timestamp,
|
|
20
|
+
AVAudioFrameCount frameCount,
|
|
21
|
+
AudioBufferList *outputData) {
|
|
19
22
|
if (outputData->mNumberBuffers != weakSelf.channelCount) {
|
|
20
23
|
return kAudioServicesBadPropertySizeError;
|
|
21
24
|
}
|
|
@@ -25,8 +28,11 @@
|
|
|
25
28
|
return kAudioServicesNoError;
|
|
26
29
|
};
|
|
27
30
|
|
|
28
|
-
_format = [[AVAudioFormat alloc]
|
|
29
|
-
|
|
31
|
+
_format = [[AVAudioFormat alloc]
|
|
32
|
+
initStandardFormatWithSampleRate:self.sampleRate
|
|
33
|
+
channels:self.channelCount];
|
|
34
|
+
_sourceNode = [[AVAudioSourceNode alloc] initWithFormat:self.format
|
|
35
|
+
renderBlock:self.renderBlock];
|
|
30
36
|
}
|
|
31
37
|
|
|
32
38
|
return self;
|
|
@@ -39,15 +45,18 @@
|
|
|
39
45
|
AudioEngine *audioEngine = [AudioEngine sharedInstance];
|
|
40
46
|
assert(audioEngine != nil);
|
|
41
47
|
|
|
42
|
-
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
43
|
-
// in this case if it is the first player and recorder started the
|
|
44
|
-
// It can be optimized by tracking if we haven't
|
|
48
|
+
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
49
|
+
// limitations in this case if it is the first player and recorder started the
|
|
50
|
+
// engine we need to restart. It can be optimized by tracking if we haven't
|
|
51
|
+
// break rules of at runtime modifications from docs
|
|
45
52
|
// https://developer.apple.com/documentation/avfaudio/avaudioengine?language=objc
|
|
46
53
|
//
|
|
47
|
-
// Currently we are restarting because we do not see any significant
|
|
48
|
-
// you will need to start and stop player very
|
|
54
|
+
// Currently we are restarting because we do not see any significant
|
|
55
|
+
// performance issue and case when you will need to start and stop player very
|
|
56
|
+
// frequently
|
|
49
57
|
[audioEngine stopEngine];
|
|
50
|
-
self.sourceNodeId = [audioEngine attachSourceNode:self.sourceNode
|
|
58
|
+
self.sourceNodeId = [audioEngine attachSourceNode:self.sourceNode
|
|
59
|
+
format:self.format];
|
|
51
60
|
return [audioEngine startIfNecessary];
|
|
52
61
|
}
|
|
53
62
|
|
|
@@ -3,7 +3,8 @@
|
|
|
3
3
|
#import <AVFoundation/AVFoundation.h>
|
|
4
4
|
#import <Foundation/Foundation.h>
|
|
5
5
|
|
|
6
|
-
typedef void (
|
|
6
|
+
typedef void (
|
|
7
|
+
^AudioReceiverBlock)(const AudioBufferList *inputBuffer, int numFrames);
|
|
7
8
|
|
|
8
9
|
@interface NativeAudioRecorder : NSObject
|
|
9
10
|
|
|
@@ -14,34 +14,43 @@
|
|
|
14
14
|
|
|
15
15
|
self.receiverBlock = [receiverBlock copy];
|
|
16
16
|
|
|
17
|
-
float devicePrefferedSampleRate =
|
|
17
|
+
float devicePrefferedSampleRate =
|
|
18
|
+
[[AVAudioSession sharedInstance] sampleRate];
|
|
18
19
|
|
|
19
20
|
if (!devicePrefferedSampleRate) {
|
|
20
21
|
NSError *error;
|
|
21
22
|
devicePrefferedSampleRate = sampleRate;
|
|
22
23
|
|
|
23
|
-
[[AVAudioSession sharedInstance] setPreferredSampleRate:sampleRate
|
|
24
|
+
[[AVAudioSession sharedInstance] setPreferredSampleRate:sampleRate
|
|
25
|
+
error:&error];
|
|
24
26
|
}
|
|
25
27
|
|
|
26
|
-
self.inputFormat =
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
28
|
+
self.inputFormat =
|
|
29
|
+
[[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32
|
|
30
|
+
sampleRate:devicePrefferedSampleRate
|
|
31
|
+
channels:1
|
|
32
|
+
interleaved:NO];
|
|
33
|
+
self.outputFormat =
|
|
34
|
+
[[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32
|
|
35
|
+
sampleRate:sampleRate
|
|
36
|
+
channels:1
|
|
37
|
+
interleaved:NO];
|
|
38
|
+
self.audioConverter =
|
|
39
|
+
[[AVAudioConverter alloc] initFromFormat:self.inputFormat
|
|
40
|
+
toFormat:self.outputFormat];
|
|
35
41
|
|
|
36
42
|
__weak typeof(self) weakSelf = self;
|
|
37
43
|
self.receiverSinkBlock = ^OSStatus(
|
|
38
44
|
const AudioTimeStamp *_Nonnull timestamp,
|
|
39
45
|
AVAudioFrameCount frameCount,
|
|
40
46
|
const AudioBufferList *_Nonnull inputData) {
|
|
41
|
-
return [weakSelf processAudioInput:inputData
|
|
47
|
+
return [weakSelf processAudioInput:inputData
|
|
48
|
+
withFrameCount:frameCount
|
|
49
|
+
atTimestamp:timestamp];
|
|
42
50
|
};
|
|
43
51
|
|
|
44
|
-
self.sinkNode =
|
|
52
|
+
self.sinkNode =
|
|
53
|
+
[[AVAudioSinkNode alloc] initWithReceiverBlock:self.receiverSinkBlock];
|
|
45
54
|
}
|
|
46
55
|
|
|
47
56
|
return self;
|
|
@@ -55,8 +64,9 @@
|
|
|
55
64
|
float outputSampleRate = self.outputFormat.sampleRate;
|
|
56
65
|
|
|
57
66
|
if (inputSampleRate != outputSampleRate) {
|
|
58
|
-
AVAudioPCMBuffer *inputBuffer =
|
|
59
|
-
|
|
67
|
+
AVAudioPCMBuffer *inputBuffer =
|
|
68
|
+
[[AVAudioPCMBuffer alloc] initWithPCMFormat:self.inputFormat
|
|
69
|
+
frameCapacity:frameCount];
|
|
60
70
|
memcpy(
|
|
61
71
|
inputBuffer.mutableAudioBufferList->mBuffers[0].mData,
|
|
62
72
|
inputData->mBuffers[0].mData,
|
|
@@ -65,22 +75,28 @@
|
|
|
65
75
|
|
|
66
76
|
int outputFrameCount = frameCount * outputSampleRate / inputSampleRate;
|
|
67
77
|
|
|
68
|
-
AVAudioPCMBuffer *outputBuffer = [[AVAudioPCMBuffer alloc]
|
|
69
|
-
|
|
78
|
+
AVAudioPCMBuffer *outputBuffer = [[AVAudioPCMBuffer alloc]
|
|
79
|
+
initWithPCMFormat:self.audioConverter.outputFormat
|
|
80
|
+
frameCapacity:outputFrameCount];
|
|
70
81
|
|
|
71
82
|
NSError *error = nil;
|
|
72
|
-
AVAudioConverterInputBlock inputBlock =
|
|
73
|
-
|
|
83
|
+
AVAudioConverterInputBlock inputBlock = ^AVAudioBuffer *_Nullable(
|
|
84
|
+
AVAudioPacketCount inNumberOfPackets,
|
|
85
|
+
AVAudioConverterInputStatus *outStatus)
|
|
74
86
|
{
|
|
75
87
|
*outStatus = AVAudioConverterInputStatus_HaveData;
|
|
76
88
|
return inputBuffer;
|
|
77
89
|
};
|
|
78
90
|
|
|
79
91
|
/// IMPORTANT: AVAudioConverter leaks memory without autorelease pool
|
|
80
|
-
/// more details here:
|
|
81
|
-
///
|
|
92
|
+
/// more details here:
|
|
93
|
+
/// https://github.com/poneciak57/AVAudioConverter-memory-leak-repro-electric-boogaloo
|
|
94
|
+
/// we can try to remove it in the future or refactor to reuse buffers to
|
|
95
|
+
/// minimize allocations
|
|
82
96
|
@autoreleasepool {
|
|
83
|
-
[self.audioConverter convertToBuffer:outputBuffer
|
|
97
|
+
[self.audioConverter convertToBuffer:outputBuffer
|
|
98
|
+
error:&error
|
|
99
|
+
withInputFromBlock:inputBlock];
|
|
84
100
|
}
|
|
85
101
|
|
|
86
102
|
if (error) {
|
|
@@ -103,13 +119,15 @@
|
|
|
103
119
|
AudioEngine *audioEngine = [AudioEngine sharedInstance];
|
|
104
120
|
assert(audioEngine != nil);
|
|
105
121
|
|
|
106
|
-
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
107
|
-
// in this case if it is the first recorder node and player
|
|
108
|
-
// It can be optimized by tracking if
|
|
122
|
+
// AudioEngine allows us to attach and connect nodes at runtime but with few
|
|
123
|
+
// limitations in this case if it is the first recorder node and player
|
|
124
|
+
// started the engine we need to restart. It can be optimized by tracking if
|
|
125
|
+
// we haven't break rules of at runtime modifications from docs
|
|
109
126
|
// https://developer.apple.com/documentation/avfaudio/avaudioengine?language=objc
|
|
110
127
|
//
|
|
111
|
-
// Currently we are restarting because we do not see any significant
|
|
112
|
-
// you will need to start and stop recorder
|
|
128
|
+
// Currently we are restarting because we do not see any significant
|
|
129
|
+
// performance issue and case when you will need to start and stop recorder
|
|
130
|
+
// very frequently
|
|
113
131
|
[audioEngine stopEngine];
|
|
114
132
|
[audioEngine attachInputNode:self.sinkNode];
|
|
115
133
|
[audioEngine startIfNecessary];
|
|
@@ -18,7 +18,9 @@ namespace audioapi {
|
|
|
18
18
|
// Decoding audio in fixed-size chunks because total frame count can't be
|
|
19
19
|
// determined in advance. Note: ma_decoder_get_length_in_pcm_frames() always
|
|
20
20
|
// returns 0 for Vorbis decoders.
|
|
21
|
-
std::vector<float> AudioDecoder::readAllPcmFrames(
|
|
21
|
+
std::vector<float> AudioDecoder::readAllPcmFrames(
|
|
22
|
+
ma_decoder &decoder,
|
|
23
|
+
int outputChannels)
|
|
22
24
|
{
|
|
23
25
|
std::vector<float> buffer;
|
|
24
26
|
std::vector<float> temp(CHUNK_SIZE * outputChannels);
|
|
@@ -26,12 +28,16 @@ std::vector<float> AudioDecoder::readAllPcmFrames(ma_decoder &decoder, int outpu
|
|
|
26
28
|
|
|
27
29
|
while (true) {
|
|
28
30
|
ma_uint64 tempFramesDecoded = 0;
|
|
29
|
-
ma_decoder_read_pcm_frames(
|
|
31
|
+
ma_decoder_read_pcm_frames(
|
|
32
|
+
&decoder, temp.data(), CHUNK_SIZE, &tempFramesDecoded);
|
|
30
33
|
if (tempFramesDecoded == 0) {
|
|
31
34
|
break;
|
|
32
35
|
}
|
|
33
36
|
|
|
34
|
-
buffer.insert(
|
|
37
|
+
buffer.insert(
|
|
38
|
+
buffer.end(),
|
|
39
|
+
temp.data(),
|
|
40
|
+
temp.data() + tempFramesDecoded * outputChannels);
|
|
35
41
|
outFramesRead += tempFramesDecoded;
|
|
36
42
|
}
|
|
37
43
|
|
|
@@ -51,7 +57,8 @@ std::shared_ptr<AudioBuffer> AudioDecoder::makeAudioBufferFromFloatBuffer(
|
|
|
51
57
|
}
|
|
52
58
|
|
|
53
59
|
auto outputFrames = buffer.size() / outputChannels;
|
|
54
|
-
auto audioBus = std::make_shared<AudioBus>(
|
|
60
|
+
auto audioBus = std::make_shared<AudioBus>(
|
|
61
|
+
outputFrames, outputChannels, outputSampleRate);
|
|
55
62
|
|
|
56
63
|
for (int ch = 0; ch < outputChannels; ++ch) {
|
|
57
64
|
auto channelData = audioBus->getChannel(ch)->getData();
|
|
@@ -62,10 +69,13 @@ std::shared_ptr<AudioBuffer> AudioDecoder::makeAudioBufferFromFloatBuffer(
|
|
|
62
69
|
return std::make_shared<AudioBuffer>(audioBus);
|
|
63
70
|
}
|
|
64
71
|
|
|
65
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(
|
|
72
|
+
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(
|
|
73
|
+
const std::string &path,
|
|
74
|
+
float sampleRate)
|
|
66
75
|
{
|
|
67
76
|
if (AudioDecoder::pathHasExtension(path, {".mp4", ".m4a", ".aac"})) {
|
|
68
|
-
auto buffer =
|
|
77
|
+
auto buffer =
|
|
78
|
+
ffmpegdecoder::decodeWithFilePath(path, static_cast<int>(sampleRate));
|
|
69
79
|
if (buffer == nullptr) {
|
|
70
80
|
NSLog(@"Failed to decode with FFmpeg: %s", path.c_str());
|
|
71
81
|
return nullptr;
|
|
@@ -73,11 +83,14 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(const std::string
|
|
|
73
83
|
return buffer;
|
|
74
84
|
}
|
|
75
85
|
ma_decoder decoder;
|
|
76
|
-
ma_decoder_config config =
|
|
77
|
-
|
|
86
|
+
ma_decoder_config config =
|
|
87
|
+
ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
88
|
+
ma_decoding_backend_vtable *customBackends[] = {
|
|
89
|
+
ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
78
90
|
|
|
79
91
|
config.ppCustomBackendVTables = customBackends;
|
|
80
|
-
config.customBackendCount =
|
|
92
|
+
config.customBackendCount =
|
|
93
|
+
sizeof(customBackends) / sizeof(customBackends[0]);
|
|
81
94
|
|
|
82
95
|
if (ma_decoder_init_file(path.c_str(), &config, &decoder) != MA_SUCCESS) {
|
|
83
96
|
NSLog(@"Failed to initialize decoder for file: %s", path.c_str());
|
|
@@ -90,14 +103,20 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithFilePath(const std::string
|
|
|
90
103
|
|
|
91
104
|
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
92
105
|
ma_decoder_uninit(&decoder);
|
|
93
|
-
return makeAudioBufferFromFloatBuffer(
|
|
106
|
+
return makeAudioBufferFromFloatBuffer(
|
|
107
|
+
buffer, outputSampleRate, outputChannels);
|
|
94
108
|
}
|
|
95
109
|
|
|
96
|
-
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(
|
|
110
|
+
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(
|
|
111
|
+
const void *data,
|
|
112
|
+
size_t size,
|
|
113
|
+
float sampleRate)
|
|
97
114
|
{
|
|
98
115
|
const AudioFormat format = AudioDecoder::detectAudioFormat(data, size);
|
|
99
|
-
if (format == AudioFormat::MP4 || format == AudioFormat::M4A ||
|
|
100
|
-
|
|
116
|
+
if (format == AudioFormat::MP4 || format == AudioFormat::M4A ||
|
|
117
|
+
format == AudioFormat::AAC) {
|
|
118
|
+
auto buffer = ffmpegdecoder::decodeWithMemoryBlock(
|
|
119
|
+
data, size, static_cast<int>(sampleRate));
|
|
101
120
|
if (buffer == nullptr) {
|
|
102
121
|
NSLog(@"Failed to decode with FFmpeg");
|
|
103
122
|
return nullptr;
|
|
@@ -105,12 +124,15 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(const void *dat
|
|
|
105
124
|
return buffer;
|
|
106
125
|
}
|
|
107
126
|
ma_decoder decoder;
|
|
108
|
-
ma_decoder_config config =
|
|
127
|
+
ma_decoder_config config =
|
|
128
|
+
ma_decoder_config_init(ma_format_f32, 0, static_cast<int>(sampleRate));
|
|
109
129
|
|
|
110
|
-
ma_decoding_backend_vtable *customBackends[] = {
|
|
130
|
+
ma_decoding_backend_vtable *customBackends[] = {
|
|
131
|
+
ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
|
|
111
132
|
|
|
112
133
|
config.ppCustomBackendVTables = customBackends;
|
|
113
|
-
config.customBackendCount =
|
|
134
|
+
config.customBackendCount =
|
|
135
|
+
sizeof(customBackends) / sizeof(customBackends[0]);
|
|
114
136
|
|
|
115
137
|
if (ma_decoder_init_memory(data, size, &config, &decoder) != MA_SUCCESS) {
|
|
116
138
|
NSLog(@"Failed to initialize decoder for memory block");
|
|
@@ -123,7 +145,8 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithMemoryBlock(const void *dat
|
|
|
123
145
|
|
|
124
146
|
std::vector<float> buffer = readAllPcmFrames(decoder, outputChannels);
|
|
125
147
|
ma_decoder_uninit(&decoder);
|
|
126
|
-
return makeAudioBufferFromFloatBuffer(
|
|
148
|
+
return makeAudioBufferFromFloatBuffer(
|
|
149
|
+
buffer, outputSampleRate, outputChannels);
|
|
127
150
|
}
|
|
128
151
|
|
|
129
152
|
std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithPCMInBase64(
|
|
@@ -134,9 +157,11 @@ std::shared_ptr<AudioBuffer> AudioDecoder::decodeWithPCMInBase64(
|
|
|
134
157
|
{
|
|
135
158
|
auto decodedData = base64_decode(data, false);
|
|
136
159
|
const auto uint8Data = reinterpret_cast<uint8_t *>(decodedData.data());
|
|
137
|
-
size_t numFramesDecoded =
|
|
160
|
+
size_t numFramesDecoded =
|
|
161
|
+
decodedData.size() / (inputChannelCount * sizeof(int16_t));
|
|
138
162
|
|
|
139
|
-
auto audioBus = std::make_shared<AudioBus>(
|
|
163
|
+
auto audioBus = std::make_shared<AudioBus>(
|
|
164
|
+
numFramesDecoded, inputChannelCount, inputSampleRate);
|
|
140
165
|
|
|
141
166
|
for (int ch = 0; ch < inputChannelCount; ++ch) {
|
|
142
167
|
auto channelData = audioBus->getChannel(ch)->getData();
|
|
@@ -16,7 +16,8 @@
|
|
|
16
16
|
@property (nonatomic, strong) AVAudioSinkNode *inputNode;
|
|
17
17
|
@property (nonatomic, weak) AudioSessionManager *sessionManager;
|
|
18
18
|
|
|
19
|
-
- (instancetype)initWithAudioSessionManager:
|
|
19
|
+
- (instancetype)initWithAudioSessionManager:
|
|
20
|
+
(AudioSessionManager *)sessionManager;
|
|
20
21
|
|
|
21
22
|
+ (instancetype)sharedInstance;
|
|
22
23
|
- (void)cleanup;
|
|
@@ -30,7 +31,8 @@
|
|
|
30
31
|
- (void)unmarkAsInterrupted;
|
|
31
32
|
- (bool)isSupposedToRun;
|
|
32
33
|
|
|
33
|
-
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
34
|
+
- (NSString *)attachSourceNode:(AVAudioSourceNode *)sourceNode
|
|
35
|
+
format:(AVAudioFormat *)format;
|
|
34
36
|
- (void)detachSourceNodeWithId:(NSString *)sourceNodeId;
|
|
35
37
|
|
|
36
38
|
- (void)attachInputNode:(AVAudioSinkNode *)inputNode;
|