@livekit/react-native 2.5.0 → 2.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -3
- package/android/build.gradle +2 -1
- package/android/src/main/java/com/livekit/reactnative/LiveKitReactNative.kt +61 -5
- package/android/src/main/java/com/livekit/reactnative/LivekitReactNativeModule.kt +81 -4
- package/android/src/main/java/com/livekit/reactnative/audio/events/Events.kt +6 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/AudioFormat.kt +2 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/AudioProcessingController.kt +27 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/AudioProcessorInterface.kt +52 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/AudioRecordSamplesDispatcher.kt +72 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/AudioSinkManager.kt +75 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/CustomAudioProcessingFactory.kt +78 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/MultibandVolumeProcessor.kt +181 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/VolumeProcessor.kt +67 -0
- package/android/src/main/java/com/livekit/reactnative/audio/processing/fft/FFTAudioAnalyzer.kt +224 -0
- package/ios/LKAudioProcessingAdapter.h +26 -0
- package/ios/LKAudioProcessingAdapter.m +117 -0
- package/ios/LKAudioProcessingManager.h +34 -0
- package/ios/LKAudioProcessingManager.m +63 -0
- package/ios/LivekitReactNative-Bridging-Header.h +2 -0
- package/ios/LivekitReactNative.h +9 -4
- package/ios/LivekitReactNative.m +83 -5
- package/ios/Logging.swift +4 -0
- package/ios/audio/AVAudioPCMBuffer.swift +136 -0
- package/ios/audio/AudioProcessing.swift +163 -0
- package/ios/audio/AudioRendererManager.swift +72 -0
- package/ios/audio/FFTProcessor.swift +147 -0
- package/ios/audio/MultibandVolumeAudioRenderer.swift +65 -0
- package/ios/audio/RingBuffer.swift +51 -0
- package/ios/audio/VolumeAudioRenderer.swift +48 -0
- package/lib/commonjs/LKNativeModule.js +18 -0
- package/lib/commonjs/LKNativeModule.js.map +1 -0
- package/lib/commonjs/components/BarVisualizer.js +192 -0
- package/lib/commonjs/components/BarVisualizer.js.map +1 -0
- package/lib/commonjs/events/EventEmitter.js +45 -0
- package/lib/commonjs/events/EventEmitter.js.map +1 -0
- package/lib/commonjs/hooks/useMultibandTrackVolume.js +64 -0
- package/lib/commonjs/hooks/useMultibandTrackVolume.js.map +1 -0
- package/lib/commonjs/hooks/useTrackVolume.js +45 -0
- package/lib/commonjs/hooks/useTrackVolume.js.map +1 -0
- package/lib/commonjs/hooks.js +24 -0
- package/lib/commonjs/hooks.js.map +1 -1
- package/lib/commonjs/index.js +14 -0
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/LKNativeModule.js +12 -0
- package/lib/module/LKNativeModule.js.map +1 -0
- package/lib/module/components/BarVisualizer.js +182 -0
- package/lib/module/components/BarVisualizer.js.map +1 -0
- package/lib/module/events/EventEmitter.js +36 -0
- package/lib/module/events/EventEmitter.js.map +1 -0
- package/lib/module/hooks/useMultibandTrackVolume.js +58 -0
- package/lib/module/hooks/useMultibandTrackVolume.js.map +1 -0
- package/lib/module/hooks/useTrackVolume.js +39 -0
- package/lib/module/hooks/useTrackVolume.js.map +1 -0
- package/lib/module/hooks.js +2 -0
- package/lib/module/hooks.js.map +1 -1
- package/lib/module/index.js +3 -0
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/lib/commonjs/LKNativeModule.d.ts +3 -0
- package/lib/typescript/lib/commonjs/components/BarVisualizer.d.ts +32 -0
- package/lib/typescript/lib/commonjs/events/EventEmitter.d.ts +4 -0
- package/lib/typescript/lib/commonjs/hooks/useMultibandTrackVolume.d.ts +8 -0
- package/lib/typescript/lib/commonjs/hooks/useTrackVolume.d.ts +8 -0
- package/lib/typescript/lib/module/LKNativeModule.d.ts +2 -0
- package/lib/typescript/lib/module/components/BarVisualizer.d.ts +10 -0
- package/lib/typescript/lib/module/events/EventEmitter.d.ts +3 -0
- package/lib/typescript/lib/module/hooks/useMultibandTrackVolume.d.ts +7 -0
- package/lib/typescript/lib/module/hooks/useTrackVolume.d.ts +7 -0
- package/lib/typescript/lib/module/hooks.d.ts +2 -0
- package/lib/typescript/lib/module/index.d.ts +1 -0
- package/lib/typescript/src/LKNativeModule.d.ts +2 -0
- package/lib/typescript/src/components/BarVisualizer.d.ts +49 -0
- package/lib/typescript/src/events/EventEmitter.d.ts +6 -0
- package/lib/typescript/src/hooks/useMultibandTrackVolume.d.ts +31 -0
- package/lib/typescript/src/hooks/useTrackVolume.d.ts +9 -0
- package/lib/typescript/src/hooks.d.ts +2 -0
- package/lib/typescript/src/index.d.ts +1 -0
- package/livekit-react-native.podspec +1 -1
- package/package.json +7 -6
- package/src/LKNativeModule.ts +19 -0
- package/src/components/BarVisualizer.tsx +252 -0
- package/src/events/EventEmitter.ts +51 -0
- package/src/hooks/useMultibandTrackVolume.ts +97 -0
- package/src/hooks/useTrackVolume.ts +62 -0
- package/src/hooks.ts +2 -0
- package/src/index.tsx +3 -0
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
#import "LKAudioProcessingManager.h"
|
|
2
|
+
#import "LKAudioProcessingAdapter.h"
|
|
3
|
+
|
|
4
|
+
@implementation LKAudioProcessingManager
|
|
5
|
+
|
|
6
|
+
+ (instancetype)sharedInstance {
|
|
7
|
+
static dispatch_once_t onceToken;
|
|
8
|
+
static LKAudioProcessingManager* sharedInstance = nil;
|
|
9
|
+
dispatch_once(&onceToken, ^{
|
|
10
|
+
sharedInstance = [[self alloc] init];
|
|
11
|
+
});
|
|
12
|
+
return sharedInstance;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
- (instancetype)init {
|
|
16
|
+
if (self = [super init]) {
|
|
17
|
+
_audioProcessingModule = [[RTCDefaultAudioProcessingModule alloc] init];
|
|
18
|
+
_capturePostProcessingAdapter = [[LKAudioProcessingAdapter alloc] init];
|
|
19
|
+
_renderPreProcessingAdapter = [[LKAudioProcessingAdapter alloc] init];
|
|
20
|
+
_audioProcessingModule.capturePostProcessingDelegate = _capturePostProcessingAdapter;
|
|
21
|
+
_audioProcessingModule.renderPreProcessingDelegate = _renderPreProcessingAdapter;
|
|
22
|
+
}
|
|
23
|
+
return self;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
- (void)addLocalAudioRenderer:(nonnull id<RTCAudioRenderer>)renderer {
|
|
27
|
+
[_capturePostProcessingAdapter addAudioRenderer:renderer];
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
- (void)removeLocalAudioRenderer:(nonnull id<RTCAudioRenderer>)renderer {
|
|
31
|
+
[_capturePostProcessingAdapter removeAudioRenderer:renderer];
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
- (void)addRemoteAudioRenderer:(nonnull id<RTCAudioRenderer>)renderer {
|
|
35
|
+
[_renderPreProcessingAdapter addAudioRenderer:renderer];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
- (void)removeRemoteAudioRenderer:(nonnull id<RTCAudioRenderer>)renderer {
|
|
39
|
+
[_renderPreProcessingAdapter removeAudioRenderer:renderer];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
- (void)addCapturePostProcessor:(nonnull id<LKExternalAudioProcessingDelegate>)processor {
|
|
43
|
+
[_capturePostProcessingAdapter addProcessing:processor];
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
- (void)removeCapturePostProcessor:(nonnull id<LKExternalAudioProcessingDelegate>)processor {
|
|
47
|
+
[_capturePostProcessingAdapter removeProcessing:processor];
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
- (void)addRenderPreProcessor:(nonnull id<LKExternalAudioProcessingDelegate>)processor {
|
|
51
|
+
[_renderPreProcessingAdapter addProcessing:processor];
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
- (void)removeRenderPreProcessor:(nonnull id<LKExternalAudioProcessingDelegate>)processor {
|
|
55
|
+
[_renderPreProcessingAdapter removeProcessing:processor];
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
- (void)clearProcessors {
|
|
59
|
+
// TODO
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@end
|
package/ios/LivekitReactNative.h
CHANGED
|
@@ -3,12 +3,17 @@
|
|
|
3
3
|
// LivekitReactNative
|
|
4
4
|
//
|
|
5
5
|
// Created by David Liu on 9/4/22.
|
|
6
|
-
// Copyright © 2022 LiveKit. All rights reserved.
|
|
6
|
+
// Copyright © 2022-2025 LiveKit. All rights reserved.
|
|
7
7
|
//
|
|
8
8
|
#import <React/RCTBridgeModule.h>
|
|
9
|
+
#import <WebRTC/WebRTC.h>
|
|
10
|
+
#import <React/RCTEventEmitter.h>
|
|
9
11
|
|
|
10
|
-
@
|
|
11
|
-
|
|
12
|
+
@class AudioRendererManager;
|
|
13
|
+
@interface LivekitReactNative : RCTEventEmitter <RCTBridgeModule>
|
|
14
|
+
@property(nonatomic, strong) AudioRendererManager* _Nonnull audioRendererManager;
|
|
12
15
|
+(void)setup;
|
|
13
|
-
|
|
14
16
|
@end
|
|
17
|
+
|
|
18
|
+
extern NSString * _Nonnull const kEventVolumeProcessed;
|
|
19
|
+
extern NSString * _Nonnull const kEventMultibandProcessed;
|
package/ios/LivekitReactNative.m
CHANGED
|
@@ -1,15 +1,23 @@
|
|
|
1
1
|
#import "AudioUtils.h"
|
|
2
2
|
#import "LivekitReactNative.h"
|
|
3
|
+
#import "LKAudioProcessingManager.h"
|
|
3
4
|
#import "WebRTCModule.h"
|
|
4
5
|
#import "WebRTCModuleOptions.h"
|
|
5
6
|
#import <WebRTC/RTCAudioSession.h>
|
|
6
7
|
#import <WebRTC/RTCAudioSessionConfiguration.h>
|
|
7
8
|
#import <AVFAudio/AVFAudio.h>
|
|
8
9
|
#import <AVKit/AVKit.h>
|
|
10
|
+
#import "livekit_react_native-Swift.h"
|
|
11
|
+
|
|
12
|
+
NSString *const kEventVolumeProcessed = @"LK_VOLUME_PROCESSED";
|
|
13
|
+
NSString *const kEventMultibandProcessed = @"LK_MULTIBAND_PROCESSED";
|
|
9
14
|
|
|
10
15
|
@implementation LivekitReactNative
|
|
16
|
+
|
|
17
|
+
|
|
11
18
|
RCT_EXPORT_MODULE();
|
|
12
19
|
|
|
20
|
+
|
|
13
21
|
-(instancetype)init {
|
|
14
22
|
if(self = [super init]) {
|
|
15
23
|
|
|
@@ -38,6 +46,7 @@ RCT_EXPORT_MODULE();
|
|
|
38
46
|
RTCVideoEncoderFactorySimulcast *simulcastVideoEncoderFactory = [[RTCVideoEncoderFactorySimulcast alloc] initWithPrimary:videoEncoderFactory fallback:videoEncoderFactory];
|
|
39
47
|
WebRTCModuleOptions *options = [WebRTCModuleOptions sharedInstance];
|
|
40
48
|
options.videoEncoderFactory = simulcastVideoEncoderFactory;
|
|
49
|
+
options.audioProcessingModule = LKAudioProcessingManager.sharedInstance.audioProcessingModule;
|
|
41
50
|
}
|
|
42
51
|
|
|
43
52
|
/// Configure default audio config for WebRTC
|
|
@@ -123,13 +132,13 @@ RCT_EXPORT_METHOD(selectAudioOutput:(NSString *)deviceId
|
|
|
123
132
|
RCT_EXPORT_METHOD(setAppleAudioConfiguration:(NSDictionary *) configuration){
|
|
124
133
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
|
125
134
|
RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
|
|
126
|
-
|
|
135
|
+
|
|
127
136
|
NSString* appleAudioCategory = configuration[@"audioCategory"];
|
|
128
137
|
NSArray* appleAudioCategoryOptions = configuration[@"audioCategoryOptions"];
|
|
129
138
|
NSString* appleAudioMode = configuration[@"audioMode"];
|
|
130
139
|
|
|
131
140
|
[session lockForConfiguration];
|
|
132
|
-
|
|
141
|
+
|
|
133
142
|
NSError* error = nil;
|
|
134
143
|
BOOL categoryChanged = NO;
|
|
135
144
|
if(appleAudioCategoryOptions != nil) {
|
|
@@ -151,7 +160,7 @@ RCT_EXPORT_METHOD(setAppleAudioConfiguration:(NSDictionary *) configuration){
|
|
|
151
160
|
}
|
|
152
161
|
}
|
|
153
162
|
}
|
|
154
|
-
|
|
163
|
+
|
|
155
164
|
if(appleAudioCategory != nil) {
|
|
156
165
|
categoryChanged = YES;
|
|
157
166
|
config.category = [AudioUtils audioSessionCategoryFromString:appleAudioCategory];
|
|
@@ -164,7 +173,7 @@ RCT_EXPORT_METHOD(setAppleAudioConfiguration:(NSDictionary *) configuration){
|
|
|
164
173
|
error = nil;
|
|
165
174
|
}
|
|
166
175
|
}
|
|
167
|
-
|
|
176
|
+
|
|
168
177
|
if(appleAudioMode != nil) {
|
|
169
178
|
config.mode = [AudioUtils audioSessionModeFromString:appleAudioMode];
|
|
170
179
|
[session setMode:config.mode error:&error];
|
|
@@ -173,7 +182,76 @@ RCT_EXPORT_METHOD(setAppleAudioConfiguration:(NSDictionary *) configuration){
|
|
|
173
182
|
error = nil;
|
|
174
183
|
}
|
|
175
184
|
}
|
|
176
|
-
|
|
185
|
+
|
|
177
186
|
[session unlockForConfiguration];
|
|
178
187
|
}
|
|
188
|
+
|
|
189
|
+
-(AudioRendererManager *)audioRendererManager {
|
|
190
|
+
if(!_audioRendererManager) {
|
|
191
|
+
_audioRendererManager = [[AudioRendererManager alloc] initWithBridge:self.bridge];
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
return _audioRendererManager;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(createVolumeProcessor:(nonnull NSNumber *)pcId
|
|
198
|
+
trackId:(nonnull NSString *)trackId) {
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
VolumeAudioRenderer *renderer = [[VolumeAudioRenderer alloc] initWithIntervalMs:40.0 eventEmitter:self];
|
|
202
|
+
|
|
203
|
+
NSString *reactTag = [self.audioRendererManager registerRenderer:renderer];
|
|
204
|
+
renderer.reactTag = reactTag;
|
|
205
|
+
[self.audioRendererManager attachWithRenderer:renderer pcId:pcId trackId:trackId];
|
|
206
|
+
return reactTag;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(deleteVolumeProcessor:(nonnull NSString *)reactTag
|
|
210
|
+
pcId:(nonnull NSNumber *)pcId
|
|
211
|
+
trackId:(nonnull NSString *)trackId) {
|
|
212
|
+
|
|
213
|
+
[self.audioRendererManager detachWithRendererByTag:reactTag pcId:pcId trackId:trackId];
|
|
214
|
+
[self.audioRendererManager unregisterRendererForReactTag:reactTag];
|
|
215
|
+
|
|
216
|
+
return nil;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(createMultibandVolumeProcessor:(NSDictionary *)options
|
|
220
|
+
pcId:(nonnull NSNumber *)pcId
|
|
221
|
+
trackId:(nonnull NSString *)trackId) {
|
|
222
|
+
|
|
223
|
+
NSInteger bands = [(NSNumber *)options[@"bands"] integerValue];
|
|
224
|
+
float minFrequency = [(NSNumber *)options[@"minFrequency"] floatValue];
|
|
225
|
+
float maxFrequency = [(NSNumber *)options[@"maxFrequency"] floatValue];
|
|
226
|
+
float intervalMs = [(NSNumber *)options[@"updateInterval"] floatValue];
|
|
227
|
+
MultibandVolumeAudioRenderer *renderer = [[MultibandVolumeAudioRenderer alloc] initWithBands:bands
|
|
228
|
+
minFrequency:minFrequency
|
|
229
|
+
maxFrequency:maxFrequency
|
|
230
|
+
intervalMs:intervalMs
|
|
231
|
+
eventEmitter:self];
|
|
232
|
+
|
|
233
|
+
NSString *reactTag = [self.audioRendererManager registerRenderer:renderer];
|
|
234
|
+
renderer.reactTag = reactTag;
|
|
235
|
+
[self.audioRendererManager attachWithRenderer:renderer pcId:pcId trackId:trackId];
|
|
236
|
+
return reactTag;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(deleteMultibandVolumeProcessor:(nonnull NSString *)reactTag
|
|
240
|
+
pcId:(nonnull NSNumber *)pcId
|
|
241
|
+
trackId:(nonnull NSString *)trackId) {
|
|
242
|
+
|
|
243
|
+
[self.audioRendererManager detachWithRendererByTag:reactTag pcId:pcId trackId:trackId];
|
|
244
|
+
[self.audioRendererManager unregisterRendererForReactTag:reactTag];
|
|
245
|
+
|
|
246
|
+
return nil;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
- (NSArray<NSString *> *)supportedEvents {
|
|
251
|
+
return @[
|
|
252
|
+
kEventVolumeProcessed,
|
|
253
|
+
kEventMultibandProcessed,
|
|
254
|
+
];
|
|
255
|
+
}
|
|
256
|
+
|
|
179
257
|
@end
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Copyright 2025 LiveKit
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import Accelerate
|
|
18
|
+
import AVFoundation
|
|
19
|
+
|
|
20
|
+
public extension AVAudioPCMBuffer {
|
|
21
|
+
func resample(toSampleRate targetSampleRate: Double) -> AVAudioPCMBuffer? {
|
|
22
|
+
let sourceFormat = format
|
|
23
|
+
|
|
24
|
+
if sourceFormat.sampleRate == targetSampleRate {
|
|
25
|
+
// Already targetSampleRate.
|
|
26
|
+
return self
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Define the source format (from the input buffer) and the target format.
|
|
30
|
+
guard let targetFormat = AVAudioFormat(commonFormat: sourceFormat.commonFormat,
|
|
31
|
+
sampleRate: targetSampleRate,
|
|
32
|
+
channels: sourceFormat.channelCount,
|
|
33
|
+
interleaved: sourceFormat.isInterleaved)
|
|
34
|
+
else {
|
|
35
|
+
print("Failed to create target format.")
|
|
36
|
+
return nil
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
guard let converter = AVAudioConverter(from: sourceFormat, to: targetFormat) else {
|
|
40
|
+
print("Failed to create audio converter.")
|
|
41
|
+
return nil
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
let capacity = targetFormat.sampleRate * Double(frameLength) / sourceFormat.sampleRate
|
|
45
|
+
|
|
46
|
+
guard let convertedBuffer = AVAudioPCMBuffer(pcmFormat: targetFormat, frameCapacity: AVAudioFrameCount(capacity)) else {
|
|
47
|
+
print("Failed to create converted buffer.")
|
|
48
|
+
return nil
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
var isDone = false
|
|
52
|
+
let inputBlock: AVAudioConverterInputBlock = { _, outStatus in
|
|
53
|
+
if isDone {
|
|
54
|
+
outStatus.pointee = .noDataNow
|
|
55
|
+
return nil
|
|
56
|
+
}
|
|
57
|
+
outStatus.pointee = .haveData
|
|
58
|
+
isDone = true
|
|
59
|
+
return self
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
var error: NSError?
|
|
63
|
+
let status = converter.convert(to: convertedBuffer, error: &error, withInputFrom: inputBlock)
|
|
64
|
+
|
|
65
|
+
if status == .error {
|
|
66
|
+
print("Conversion failed: \(error?.localizedDescription ?? "Unknown error")")
|
|
67
|
+
return nil
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Adjust frame length to the actual amount of data written
|
|
71
|
+
convertedBuffer.frameLength = convertedBuffer.frameCapacity
|
|
72
|
+
|
|
73
|
+
return convertedBuffer
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/// Convert PCM buffer to specified common format.
|
|
77
|
+
/// Currently supports conversion from Int16 to Float32.
|
|
78
|
+
func convert(toCommonFormat commonFormat: AVAudioCommonFormat) -> AVAudioPCMBuffer? {
|
|
79
|
+
// Check if conversion is needed
|
|
80
|
+
guard format.commonFormat != commonFormat else {
|
|
81
|
+
return self
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Check if the conversion is supported
|
|
85
|
+
guard format.commonFormat == .pcmFormatInt16, commonFormat == .pcmFormatFloat32 else {
|
|
86
|
+
print("Unsupported conversion: only Int16 to Float32 is supported")
|
|
87
|
+
return nil
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Create output format
|
|
91
|
+
guard let outputFormat = AVAudioFormat(commonFormat: commonFormat,
|
|
92
|
+
sampleRate: format.sampleRate,
|
|
93
|
+
channels: format.channelCount,
|
|
94
|
+
interleaved: false)
|
|
95
|
+
else {
|
|
96
|
+
print("Failed to create output audio format")
|
|
97
|
+
return nil
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Create output buffer
|
|
101
|
+
guard let outputBuffer = AVAudioPCMBuffer(pcmFormat: outputFormat,
|
|
102
|
+
frameCapacity: frameCapacity)
|
|
103
|
+
else {
|
|
104
|
+
print("Failed to create output PCM buffer")
|
|
105
|
+
return nil
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
outputBuffer.frameLength = frameLength
|
|
109
|
+
|
|
110
|
+
let channelCount = Int(format.channelCount)
|
|
111
|
+
let frameCount = Int(frameLength)
|
|
112
|
+
|
|
113
|
+
// Ensure the source buffer has Int16 data
|
|
114
|
+
guard let int16Data = int16ChannelData else {
|
|
115
|
+
print("Source buffer doesn't contain Int16 data")
|
|
116
|
+
return nil
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Ensure the output buffer has Float32 data
|
|
120
|
+
guard let floatData = outputBuffer.floatChannelData else {
|
|
121
|
+
print("Failed to get float channel data from output buffer")
|
|
122
|
+
return nil
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// Convert Int16 to Float32 and normalize to [-1.0, 1.0]
|
|
126
|
+
let scale = Float(Int16.max)
|
|
127
|
+
var scalar = 1.0 / scale
|
|
128
|
+
|
|
129
|
+
for channel in 0 ..< channelCount {
|
|
130
|
+
vDSP_vflt16(int16Data[channel], 1, floatData[channel], 1, vDSP_Length(frameCount))
|
|
131
|
+
vDSP_vsmul(floatData[channel], 1, &scalar, floatData[channel], 1, vDSP_Length(frameCount))
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return outputBuffer
|
|
135
|
+
}
|
|
136
|
+
}
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Copyright 2025 LiveKit
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import Accelerate
|
|
18
|
+
import AVFoundation
|
|
19
|
+
import Foundation
|
|
20
|
+
import WebRTC
|
|
21
|
+
|
|
22
|
+
public struct AudioLevel {
|
|
23
|
+
/// Linear Scale RMS Value
|
|
24
|
+
public let average: Float
|
|
25
|
+
public let peak: Float
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
public extension RTCAudioBuffer {
|
|
29
|
+
/// Convert to AVAudioPCMBuffer Int16 format.
|
|
30
|
+
@objc
|
|
31
|
+
func toAVAudioPCMBuffer() -> AVAudioPCMBuffer? {
|
|
32
|
+
guard let audioFormat = AVAudioFormat(commonFormat: .pcmFormatInt16,
|
|
33
|
+
sampleRate: Double(frames * 100),
|
|
34
|
+
channels: AVAudioChannelCount(channels),
|
|
35
|
+
interleaved: false),
|
|
36
|
+
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat,
|
|
37
|
+
frameCapacity: AVAudioFrameCount(frames))
|
|
38
|
+
else { return nil }
|
|
39
|
+
|
|
40
|
+
pcmBuffer.frameLength = AVAudioFrameCount(frames)
|
|
41
|
+
|
|
42
|
+
guard let targetBufferPointer = pcmBuffer.int16ChannelData else { return nil }
|
|
43
|
+
|
|
44
|
+
for i in 0 ..< channels {
|
|
45
|
+
let sourceBuffer = rawBuffer(forChannel: i)
|
|
46
|
+
let targetBuffer = targetBufferPointer[i]
|
|
47
|
+
// sourceBuffer is in the format of [Int16] but is stored in 32-bit alignment, we need to pack the Int16 data correctly.
|
|
48
|
+
|
|
49
|
+
for frame in 0 ..< frames {
|
|
50
|
+
// Cast and pack the source 32-bit Int16 data into the target 16-bit buffer
|
|
51
|
+
let clampedValue = max(Float(Int16.min), min(Float(Int16.max), sourceBuffer[frame]))
|
|
52
|
+
targetBuffer[frame] = Int16(clampedValue)
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return pcmBuffer
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
public extension AVAudioPCMBuffer {
|
|
61
|
+
/// Computes Peak and Linear Scale RMS Value (Average) for all channels.
|
|
62
|
+
func audioLevels() -> [AudioLevel] {
|
|
63
|
+
var result: [AudioLevel] = []
|
|
64
|
+
guard let data = floatChannelData else {
|
|
65
|
+
// Not containing float data
|
|
66
|
+
return result
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
for i in 0 ..< Int(format.channelCount) {
|
|
70
|
+
let channelData = data[i]
|
|
71
|
+
var max: Float = 0.0
|
|
72
|
+
vDSP_maxv(channelData, stride, &max, vDSP_Length(frameLength))
|
|
73
|
+
var rms: Float = 0.0
|
|
74
|
+
vDSP_rmsqv(channelData, stride, &rms, vDSP_Length(frameLength))
|
|
75
|
+
|
|
76
|
+
// No conversion to dB, return linear scale values directly
|
|
77
|
+
result.append(AudioLevel(average: rms, peak: max))
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
public extension Sequence where Iterator.Element == AudioLevel {
|
|
85
|
+
/// Combines all elements into a single audio level by computing the average value of all elements.
|
|
86
|
+
func combine() -> AudioLevel? {
|
|
87
|
+
var count = 0
|
|
88
|
+
let totalSums: (averageSum: Float, peakSum: Float) = reduce((averageSum: 0.0, peakSum: 0.0)) { totals, audioLevel in
|
|
89
|
+
count += 1
|
|
90
|
+
return (totals.averageSum + audioLevel.average,
|
|
91
|
+
totals.peakSum + audioLevel.peak)
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
guard count > 0 else { return nil }
|
|
95
|
+
|
|
96
|
+
return AudioLevel(average: totalSums.averageSum / Float(count),
|
|
97
|
+
peak: totalSums.peakSum / Float(count))
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
public class AudioVisualizeProcessor {
|
|
102
|
+
static let bufferSize = 1024
|
|
103
|
+
|
|
104
|
+
// MARK: - Public
|
|
105
|
+
|
|
106
|
+
public let minFrequency: Float
|
|
107
|
+
public let maxFrequency: Float
|
|
108
|
+
public let minDB: Float
|
|
109
|
+
public let maxDB: Float
|
|
110
|
+
public let bandsCount: Int
|
|
111
|
+
|
|
112
|
+
private var bands: [Float]?
|
|
113
|
+
|
|
114
|
+
// MARK: - Private
|
|
115
|
+
|
|
116
|
+
private let ringBuffer = RingBuffer<Float>(size: AudioVisualizeProcessor.bufferSize)
|
|
117
|
+
private let processor: FFTProcessor
|
|
118
|
+
|
|
119
|
+
public init(minFrequency: Float = 10,
|
|
120
|
+
maxFrequency: Float = 8000,
|
|
121
|
+
minDB: Float = -32.0,
|
|
122
|
+
maxDB: Float = 32.0,
|
|
123
|
+
bandsCount: Int = 100)
|
|
124
|
+
{
|
|
125
|
+
self.minFrequency = minFrequency
|
|
126
|
+
self.maxFrequency = maxFrequency
|
|
127
|
+
self.minDB = minDB
|
|
128
|
+
self.maxDB = maxDB
|
|
129
|
+
self.bandsCount = bandsCount
|
|
130
|
+
|
|
131
|
+
processor = FFTProcessor(bufferSize: Self.bufferSize)
|
|
132
|
+
bands = [Float](repeating: 0.0, count: bandsCount)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
public func process(pcmBuffer: AVAudioPCMBuffer) -> [Float]? {
|
|
136
|
+
guard let pcmBuffer = pcmBuffer.convert(toCommonFormat: .pcmFormatFloat32) else { return nil }
|
|
137
|
+
guard let floatChannelData = pcmBuffer.floatChannelData else { return nil }
|
|
138
|
+
|
|
139
|
+
// Get the float array.
|
|
140
|
+
let floats = Array(UnsafeBufferPointer(start: floatChannelData[0], count: Int(pcmBuffer.frameLength)))
|
|
141
|
+
ringBuffer.write(floats)
|
|
142
|
+
|
|
143
|
+
// Get full-size buffer if available, otherwise return
|
|
144
|
+
guard let buffer = ringBuffer.read() else { return nil }
|
|
145
|
+
|
|
146
|
+
// Process FFT and compute frequency bands
|
|
147
|
+
let fftRes = processor.process(buffer: buffer)
|
|
148
|
+
let bands = fftRes.computeBands(
|
|
149
|
+
minFrequency: minFrequency,
|
|
150
|
+
maxFrequency: maxFrequency,
|
|
151
|
+
bandsCount: bandsCount,
|
|
152
|
+
sampleRate: Float(pcmBuffer.format.sampleRate)
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
let headroom = maxDB - minDB
|
|
156
|
+
|
|
157
|
+
// Normalize magnitudes (already in decibels)
|
|
158
|
+
return bands.magnitudes.map { magnitude in
|
|
159
|
+
let adjustedMagnitude = max(0, magnitude + abs(minDB))
|
|
160
|
+
return min(1.0, adjustedMagnitude / headroom)
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import Foundation
|
|
2
|
+
import WebRTC
|
|
3
|
+
|
|
4
|
+
public class AudioRendererManager: NSObject {
|
|
5
|
+
private let bridge: RCTBridge
|
|
6
|
+
public private(set) var renderers: [String: RTCAudioRenderer] = [:]
|
|
7
|
+
|
|
8
|
+
@objc
|
|
9
|
+
public init(bridge: RCTBridge) {
|
|
10
|
+
self.bridge = bridge
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
@objc
|
|
14
|
+
public func registerRenderer(_ audioRenderer: RTCAudioRenderer) -> String {
|
|
15
|
+
let reactTag = NSUUID().uuidString
|
|
16
|
+
self.renderers[reactTag] = audioRenderer
|
|
17
|
+
return reactTag
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
@objc
|
|
21
|
+
public func unregisterRenderer(forReactTag: String) {
|
|
22
|
+
self.renderers.removeValue(forKey: forReactTag)
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
@objc
|
|
26
|
+
public func unregisterRenderer(_ audioRenderer: RTCAudioRenderer) {
|
|
27
|
+
self.renderers = self.renderers.filter({ $0.value !== audioRenderer })
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
@objc
|
|
31
|
+
public func attach(renderer: RTCAudioRenderer, pcId: NSNumber, trackId: String) {
|
|
32
|
+
let webrtcModule = self.bridge.module(for: WebRTCModule.self) as! WebRTCModule
|
|
33
|
+
guard let track = webrtcModule.track(forId: trackId, pcId: pcId) as? RTCAudioTrack
|
|
34
|
+
else {
|
|
35
|
+
lklog("couldn't find audio track: pcId: \(pcId), trackId: \(trackId)")
|
|
36
|
+
return
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if (pcId == -1) {
|
|
40
|
+
LKAudioProcessingManager.sharedInstance().addLocalAudioRenderer(renderer);
|
|
41
|
+
} else {
|
|
42
|
+
track.add(renderer)
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
@objc
|
|
47
|
+
public func detach(rendererByTag reactTag:String, pcId: NSNumber, trackId: String){
|
|
48
|
+
guard let renderer = self.renderers[reactTag]
|
|
49
|
+
else {
|
|
50
|
+
lklog("couldn't find renderer: tag: \(reactTag)")
|
|
51
|
+
return
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
detach(renderer: renderer, pcId: pcId, trackId: trackId)
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
@objc
|
|
58
|
+
public func detach(renderer: RTCAudioRenderer, pcId: NSNumber, trackId: String) {
|
|
59
|
+
let webrtcModule = self.bridge.module(for: WebRTCModule.self) as! WebRTCModule
|
|
60
|
+
guard let track = webrtcModule.track(forId: trackId, pcId: pcId) as? RTCAudioTrack
|
|
61
|
+
else {
|
|
62
|
+
lklog("couldn't find audio track: pcId: \(pcId), trackId: \(trackId)")
|
|
63
|
+
return
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (pcId == -1) {
|
|
67
|
+
LKAudioProcessingManager.sharedInstance().removeLocalAudioRenderer(renderer);
|
|
68
|
+
} else {
|
|
69
|
+
track.remove(renderer)
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|