@kaltura-sdk/rtc-core 1.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +237 -0
- package/index.d.ts +1 -0
- package/index.esm.js +15293 -0
- package/package.json +22 -0
- package/src/index.d.ts +6 -0
- package/src/lib/core-utils/BrowserDetector.d.ts +101 -0
- package/src/lib/core-utils/DevicesUtils.d.ts +1 -0
- package/src/lib/core-utils/GlobalEventEmitter.d.ts +1 -0
- package/src/lib/core-utils/MediaUtils.d.ts +45 -0
- package/src/lib/core-utils/Mutex.d.ts +8 -0
- package/src/lib/core-utils/SdpUtils.d.ts +25 -0
- package/src/lib/core-utils/UserBrowserUtils.d.ts +37 -0
- package/src/lib/core-utils/UtilityFunctions.d.ts +21 -0
- package/src/lib/core-utils/index.d.ts +4 -0
- package/src/lib/core-utils/webrtcAudioStats.d.ts +56 -0
- package/src/lib/core-utils/webrtcStatsTypes.d.ts +76 -0
- package/src/lib/core-utils/webrtcVideoStats.d.ts +6 -0
- package/src/lib/devices/DevicesManager.d.ts +137 -0
- package/src/lib/devices/UserMediaManager.d.ts +52 -0
- package/src/lib/devices/index.d.ts +2 -0
- package/src/lib/media/AudioAnalyzer.d.ts +60 -0
- package/src/lib/media/GetUserMediaManager.d.ts +178 -0
- package/src/lib/media/MediaSources.d.ts +105 -0
- package/src/lib/media/index.d.ts +4 -0
- package/src/lib/media/media-processing/AudioProcessing.d.ts +42 -0
- package/src/lib/media/media-processing/AudioProcessingModes.d.ts +76 -0
- package/src/lib/media/media-processing/index.d.ts +4 -0
- package/src/lib/media/media-processing/types.d.ts +83 -0
- package/src/lib/media/types.d.ts +73 -0
- package/src/lib/peer-connection/IceHandler.d.ts +60 -0
- package/src/lib/peer-connection/PeerConnection.d.ts +153 -0
- package/src/lib/peer-connection/PeerConnectionBeacon.d.ts +208 -0
- package/src/lib/peer-connection/PeerConnectionManager.d.ts +57 -0
- package/src/lib/peer-connection/PeerConnectionWebrtcStats.d.ts +32 -0
- package/src/lib/peer-connection/constants.d.ts +89 -0
- package/src/lib/peer-connection/index.d.ts +10 -0
- package/src/lib/peer-connection/types.d.ts +167 -0
- package/src/lib/session/CoreRTCSession.d.ts +215 -0
- package/src/lib/session/index.d.ts +2 -0
- package/src/lib/simulcast/simulcastCommonPublisher.d.ts +4 -0
- package/src/lib/unisphere-sample.d.ts +50 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import { GetUserMediaEveObj, MediaSourceStreamObj, MediaSourceType, userMediaStatus } from '../media/types';
|
|
2
|
+
import { MediaSources, TrackKind } from './MediaSources';
|
|
3
|
+
import { DeviceManager } from '../devices/DevicesManager';
|
|
4
|
+
import { AudioProcessingModes } from './media-processing/AudioProcessingModes';
|
|
5
|
+
export declare const MAX_GET_USER_MEDIA_ATTEMPTS = 5;
|
|
6
|
+
/**
|
|
7
|
+
* Callback for updating peer connection senders with new track
|
|
8
|
+
* Handles track replacement in all active peer connections with simulcast support
|
|
9
|
+
*
|
|
10
|
+
* MATCHES ORIGINAL: Logic from mdReplaceStopSenderTrack with simulcast handling
|
|
11
|
+
*
|
|
12
|
+
* @param trackKind - 'audio' or 'video'
|
|
13
|
+
* @param newTrack - New track to replace with
|
|
14
|
+
* @returns Promise<boolean> - True if track was replaced successfully
|
|
15
|
+
*/
|
|
16
|
+
export type UpdatePeerConnectionSendersCallback = (trackKind: 'audio' | 'video', newTrack: MediaStreamTrack, isInitial: boolean, shouldStopTrack: boolean, addIfNotExist: boolean) => Promise<boolean>;
|
|
17
|
+
/**
|
|
18
|
+
* Callback for complete media device release
|
|
19
|
+
* Handles full device release flow including PC senders, stream cleanup, and processing stop
|
|
20
|
+
*
|
|
21
|
+
* MATCHES ORIGINAL: releaseMediaDevice function from production code
|
|
22
|
+
*
|
|
23
|
+
* @param deviceType - 'audio' or 'video'
|
|
24
|
+
* @param replaceWithEmptyTracks - If true, replace with silent/dummy track in PC
|
|
25
|
+
* @returns Promise<void>
|
|
26
|
+
*/
|
|
27
|
+
export type ReleaseMediaDeviceCallback = (deviceType: 'audio' | 'video', replaceWithEmptyTracks: boolean, detachSoundInd: boolean) => Promise<void>;
|
|
28
|
+
/**
|
|
29
|
+
* Callback to restart audio analysis after audio track changes
|
|
30
|
+
* Called when g_audioAnalysisTrack is updated (device change, NR toggle, etc.)
|
|
31
|
+
*/
|
|
32
|
+
export type RestartAudioAnalysisCallback = () => void;
|
|
33
|
+
/**
|
|
34
|
+
* Callback to update debug audio loopback after audio track changes
|
|
35
|
+
* Called when g_pubAudioInputTrack is updated (device change, NR toggle, etc.)
|
|
36
|
+
*/
|
|
37
|
+
export type UpdateAudioLoopbackCallback = () => void;
|
|
38
|
+
export declare class GetUserMediaManager {
|
|
39
|
+
private logger;
|
|
40
|
+
private gumMutex;
|
|
41
|
+
private mediaSources;
|
|
42
|
+
private devicesManager;
|
|
43
|
+
audioProcessingModes: AudioProcessingModes;
|
|
44
|
+
private onAudioContextErrorCallback;
|
|
45
|
+
private updatePeerConnectionSendersCallback;
|
|
46
|
+
private releaseMediaDeviceCallback;
|
|
47
|
+
private restartAudioAnalysisCallback;
|
|
48
|
+
private updateAudioLoopbackCallback;
|
|
49
|
+
private alwaysSendMicSilence;
|
|
50
|
+
constructor(mediaSources: MediaSources, devicesManager: DeviceManager, onAudioContextError: () => Promise<any>, updatePeerConnectionSenders: UpdatePeerConnectionSendersCallback, releaseMediaDevice: ReleaseMediaDeviceCallback, restartAudioAnalysis: RestartAudioAnalysisCallback, updateAudioLoopback: UpdateAudioLoopbackCallback, alwaysSendMicSilence: boolean);
|
|
51
|
+
awaitGetUserMediaOver(): Promise<boolean>;
|
|
52
|
+
/**
|
|
53
|
+
* Update audio analysis track with new track
|
|
54
|
+
* Clones the raw audio track for speaking indicator analysis
|
|
55
|
+
* @param audioInTrack - Raw audio track from getUserMedia
|
|
56
|
+
* @param processAudioInTrack - Processed track (after NR) for logging only
|
|
57
|
+
* @private
|
|
58
|
+
*/
|
|
59
|
+
private updateAudioAnalysisTrack;
|
|
60
|
+
/**
|
|
61
|
+
* Process audio track through noise cancellation if enabled
|
|
62
|
+
*
|
|
63
|
+
* CRITICAL: Use resetContext=false for device changes to keep worklet alive!
|
|
64
|
+
* Only use resetContext=true when enabling/disabling NC or changing modes.
|
|
65
|
+
*
|
|
66
|
+
* @param rawTrack - Raw audio track from getUserMedia
|
|
67
|
+
* @param resetContext - If true, recreate worklet/context. If false, just reconnect audio graph.
|
|
68
|
+
* @returns Processed audio track if NC enabled, or raw track if NC disabled/failed
|
|
69
|
+
*/
|
|
70
|
+
createProcessAudioTrack(rawTrack: MediaStreamTrack | null, resetContext?: boolean): Promise<MediaStreamTrack | null>;
|
|
71
|
+
getUserMedia(config: any, constraints: MediaStreamConstraints, attempt?: number): Promise<GetUserMediaEveObj>;
|
|
72
|
+
private convertDevicesInConstraints;
|
|
73
|
+
handleGetUserMediaSuccess(gumWebcamFailed: boolean, webcamTrack: MediaStreamTrack, gumAudioInFailed: boolean, audioInTrack: MediaStreamTrack, config: any): Promise<{
|
|
74
|
+
status: userMediaStatus;
|
|
75
|
+
reason: string;
|
|
76
|
+
}>;
|
|
77
|
+
private validateDevicesAfterGUM;
|
|
78
|
+
createGetUserMediaRequest(mediaSourceType: MediaSourceType, constraints: boolean | MediaTrackConstraints): {
|
|
79
|
+
gumPromise: Promise<unknown>;
|
|
80
|
+
mediaSrc: MediaSourceStreamObj;
|
|
81
|
+
};
|
|
82
|
+
getSingleUserMedia(mediaSrc: MediaSourceStreamObj): Promise<{
|
|
83
|
+
status: boolean;
|
|
84
|
+
mediaSource: MediaSourceStreamObj;
|
|
85
|
+
}>;
|
|
86
|
+
handleSingleGetUserMediaError(mediaSrc: MediaSourceStreamObj, err: {
|
|
87
|
+
name: string;
|
|
88
|
+
message?: string;
|
|
89
|
+
}): Promise<{
|
|
90
|
+
status: boolean;
|
|
91
|
+
mediaSource: MediaSourceStreamObj;
|
|
92
|
+
}>;
|
|
93
|
+
handleSingleMediaOverConstraintsError(mediaSrc: MediaSourceStreamObj, err: {
|
|
94
|
+
name: string;
|
|
95
|
+
message?: string | undefined;
|
|
96
|
+
constraint?: any;
|
|
97
|
+
}): Promise<{
|
|
98
|
+
status: boolean;
|
|
99
|
+
mediaSource: MediaSourceStreamObj;
|
|
100
|
+
}>;
|
|
101
|
+
userConfirmedMediaDevices(checkAudio?: boolean, checkVideo?: boolean): Promise<boolean>;
|
|
102
|
+
removeConstraintByType(constraint: string, constraints: unknown, type: string): void;
|
|
103
|
+
/**
|
|
104
|
+
* Detects the current echoCancellation constraint value
|
|
105
|
+
* @param constraints The media track constraints object
|
|
106
|
+
* @returns 'all', true, or null depending on the echoCancellation value
|
|
107
|
+
*/
|
|
108
|
+
private getEchoCancellationValue;
|
|
109
|
+
/**
|
|
110
|
+
* Checks if browser supports latency constraint
|
|
111
|
+
* @returns true if latency constraint is supported by the browser
|
|
112
|
+
*/
|
|
113
|
+
private isLatencyConstraintSupported;
|
|
114
|
+
/**
|
|
115
|
+
* Checks if latency constraint is present in constraints
|
|
116
|
+
* @param constraints The media track constraints object
|
|
117
|
+
* @returns true if latency constraint exists
|
|
118
|
+
*/
|
|
119
|
+
private hasLatencyConstraint;
|
|
120
|
+
handleSingleMediaDeviceErrors(mediaSrc: MediaSourceStreamObj, err: {
|
|
121
|
+
name: string;
|
|
122
|
+
message?: string | undefined;
|
|
123
|
+
constraint?: string;
|
|
124
|
+
}): Promise<{
|
|
125
|
+
status: boolean;
|
|
126
|
+
mediaSource: MediaSourceStreamObj;
|
|
127
|
+
}>;
|
|
128
|
+
changeConstraintsDeviceToNextDeviceByType(constraints: {
|
|
129
|
+
audio?: any | boolean;
|
|
130
|
+
video?: any | boolean;
|
|
131
|
+
}, type: 'audio' | 'video'): {
|
|
132
|
+
audio?: any | boolean;
|
|
133
|
+
video?: any | boolean;
|
|
134
|
+
};
|
|
135
|
+
getWebcamMicNullUserMedia(requestAudio?: boolean, requestVideo?: boolean): Promise<true | undefined>;
|
|
136
|
+
handleInitialMediaRequest(config: {
|
|
137
|
+
videoDisabledByDefault: boolean;
|
|
138
|
+
audioDisabledByDefault: boolean;
|
|
139
|
+
shouldStartStreaming: boolean;
|
|
140
|
+
peerConnection: any;
|
|
141
|
+
}, gumWebcamFailed: boolean, webcamTrack: MediaStreamTrack, webcamProcessTrack: MediaStreamTrack, gumAudioInFailed: boolean, audioInTrack: MediaStreamTrack, processAudioInTrack: MediaStreamTrack | null): Promise<void>;
|
|
142
|
+
handleDeviceChangedStreams(config: {
|
|
143
|
+
videoDisabledByDefault?: boolean;
|
|
144
|
+
audioDisabledByDefault?: boolean;
|
|
145
|
+
shouldStartStreaming?: boolean;
|
|
146
|
+
peerConnection?: any;
|
|
147
|
+
}, gumWebcamFailed: boolean, webcamTrack: MediaStreamTrack | null, webcamProcessTrack: MediaStreamTrack | null, gumAudioInFailed: boolean, audioInTrack: MediaStreamTrack | null, processAudioInTrack: MediaStreamTrack | null): Promise<void>;
|
|
148
|
+
updateStreamTracks(initial: boolean, config: {
|
|
149
|
+
videoDisabledByDefault?: boolean;
|
|
150
|
+
audioDisabledByDefault?: boolean;
|
|
151
|
+
shouldStartStreaming?: boolean;
|
|
152
|
+
audioDeviceId?: any;
|
|
153
|
+
videoDeviceId?: any;
|
|
154
|
+
}, disableWebcam: boolean, disableAudio: boolean, audioInTrack: MediaStreamTrack | null, processAudioInTrack: MediaStreamTrack | null, newWebcamTrack: MediaStreamTrack | null, newTransWebcamTrack: MediaStreamTrack | null): Promise<void>;
|
|
155
|
+
initTracksMuteEventListeners(stream: MediaStream): void;
|
|
156
|
+
handleTrackMute(trackType: string): void;
|
|
157
|
+
handleTrackUnmute(trackType: string): void;
|
|
158
|
+
onTrackMutedStateChanged(payload: {
|
|
159
|
+
muted: boolean;
|
|
160
|
+
track: MediaStreamTrack;
|
|
161
|
+
}): Promise<void>;
|
|
162
|
+
checkMutedTracks(audioTrack: MediaStreamTrack, videoTrack: MediaStreamTrack): void;
|
|
163
|
+
initPublisherTracksEndEventListeners(stream: MediaStream): void;
|
|
164
|
+
handlePublisherTrackEnd(track: MediaStreamTrack): void;
|
|
165
|
+
checkPublisherTracksReadyState(stream: MediaStream): void;
|
|
166
|
+
stopAudioTrackProcessing(): Promise<void>;
|
|
167
|
+
/**
|
|
168
|
+
* Generic method to mute a track by stopping it and optionally replacing with a dummy track
|
|
169
|
+
*/
|
|
170
|
+
muteTrack(trackKind: TrackKind, useDummyTrack?: boolean): Promise<boolean>;
|
|
171
|
+
/**
|
|
172
|
+
* Generic method to unmute a track by updating state
|
|
173
|
+
* Note: Actual getUserMedia call should come from EselfSession to preserve constraints
|
|
174
|
+
* @param trackKind - TrackKind.AUDIO or TrackKind.VIDEO
|
|
175
|
+
* @returns Promise<boolean> - true if successful
|
|
176
|
+
*/
|
|
177
|
+
unmuteTrack(trackKind: TrackKind): Promise<boolean>;
|
|
178
|
+
}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { AudioProcessing } from './media-processing/AudioProcessing';
|
|
2
|
+
import { DimensionsObj, GetUserMediaEveObj, GetUserMediaSuccessResponse, MediaSourceStreamObj, MediaSourceType, userMediaErrors } from './types';
|
|
3
|
+
export declare enum TrackState {
|
|
4
|
+
ACTIVE = "ACTIVE",
|
|
5
|
+
STOPPED = "STOPPED"
|
|
6
|
+
}
|
|
7
|
+
export declare enum TrackKind {
|
|
8
|
+
AUDIO = "audio",
|
|
9
|
+
VIDEO = "video"
|
|
10
|
+
}
|
|
11
|
+
export declare class MediaSources {
|
|
12
|
+
private logger;
|
|
13
|
+
private audioTrackState;
|
|
14
|
+
private videoTrackState;
|
|
15
|
+
private mediaSourceUniqueId;
|
|
16
|
+
g_gumAudioInputTrack: MediaStreamTrack | null;
|
|
17
|
+
g_gumAudioInFailed: boolean | null;
|
|
18
|
+
g_audioProcessingObj: AudioProcessing | null;
|
|
19
|
+
g_pubAudioInputTrack: MediaStreamTrack | null;
|
|
20
|
+
g_audioAnalysisTrack: MediaStreamTrack | null;
|
|
21
|
+
g_gumWebcamTrack: MediaStreamTrack | null;
|
|
22
|
+
g_gumWebcamFailed: boolean;
|
|
23
|
+
g_pubVideoTrack: MediaStreamTrack | ReadableStream | null;
|
|
24
|
+
g_pubVideoTrackWidth: number | null;
|
|
25
|
+
g_pubVideoTrackHeight: number | null;
|
|
26
|
+
g_pubVideoTrackFps: number | null;
|
|
27
|
+
g_pubAudioVideoStream: MediaStream | null;
|
|
28
|
+
g_desktopStream: MediaStream | null;
|
|
29
|
+
g_audioProcessingPipelineFailure: boolean;
|
|
30
|
+
g_audioProcessingPipelineInitialized: boolean;
|
|
31
|
+
g_videoProcessingPipelineFailure: boolean;
|
|
32
|
+
g_videoProcessingPipelineInitialized: boolean;
|
|
33
|
+
g_videoProcessingPipelineFallbackPipe: boolean;
|
|
34
|
+
g_videoProcessingPipelineWebGlFailure: boolean;
|
|
35
|
+
g_videoProcessingFallbackNumTries: number;
|
|
36
|
+
g_videoDeviceMsProcessor: ReadableStream | null;
|
|
37
|
+
g_videoDeviceMsGenerator: WritableStream | null;
|
|
38
|
+
g_fallbackWebcamVideoElement: HTMLVideoElement | null;
|
|
39
|
+
g_fallbackOutputFrameCanvas: HTMLCanvasElement | null;
|
|
40
|
+
g_fallbackVideoDeviceTrack: MediaStreamTrack | null;
|
|
41
|
+
g_fallbackTimerId: number | null;
|
|
42
|
+
g_dummyBlackVideoInputTrack: MediaStreamTrack | null;
|
|
43
|
+
g_silentAudioInputTrack: MediaStreamTrack | null;
|
|
44
|
+
private mediaSources;
|
|
45
|
+
constructor();
|
|
46
|
+
handleFinalGetUserMediaResults(gumSuccessResponse: GetUserMediaSuccessResponse, videoMediaSrc: MediaSourceStreamObj, audioMediaSrc: MediaSourceStreamObj): GetUserMediaEveObj;
|
|
47
|
+
returnGumGeneralFailureResult(webcamFailed: boolean, audioInFailed: boolean, failErr: userMediaErrors, failReason: string, failErrName: string): GetUserMediaEveObj;
|
|
48
|
+
returnGumActiveResult(webcamFailed: boolean, audioInFailed: boolean): GetUserMediaEveObj;
|
|
49
|
+
addGetMediaSource(type: MediaSourceType, constraints: any): MediaSourceStreamObj;
|
|
50
|
+
getMediaSource(type: MediaSourceType): MediaSourceStreamObj | null;
|
|
51
|
+
mediaSrcLogStr(mediaSrc: MediaSourceStreamObj): string;
|
|
52
|
+
webcamTrackIsGenerator(track: MediaStreamTrack | ReadableStream): boolean;
|
|
53
|
+
videoStreamIsTrackGenerator(stream: MediaStream): boolean;
|
|
54
|
+
getWebcamMaxSize(videoTrack: MediaStreamTrack): DimensionsObj;
|
|
55
|
+
getAudioTrackState(): TrackState;
|
|
56
|
+
getVideoTrackState(): TrackState;
|
|
57
|
+
isAudioMuted(): boolean;
|
|
58
|
+
isVideoMuted(): boolean;
|
|
59
|
+
setAudioTrackState(state: TrackState): void;
|
|
60
|
+
setVideoTrackState(state: TrackState): void;
|
|
61
|
+
getDummyBlackVideoInputTrack(): MediaStreamTrack;
|
|
62
|
+
getSilentAudioInputTrack(stopSilentTrack?: boolean): MediaStreamTrack;
|
|
63
|
+
isTrackAudioDestNode(track: MediaStreamTrack): boolean;
|
|
64
|
+
/**
|
|
65
|
+
* Check if track is a dummy track
|
|
66
|
+
*/
|
|
67
|
+
trackIsDummy(track: MediaStreamTrack | null): boolean;
|
|
68
|
+
trackStopAllowed(track: MediaStreamTrack): boolean;
|
|
69
|
+
/**
|
|
70
|
+
* Stop a media track with proper cleanup
|
|
71
|
+
* This respects track stop policies (won't stop dummy tracks or audio processing destination tracks)
|
|
72
|
+
* @param track The track to stop
|
|
73
|
+
*/
|
|
74
|
+
mdStopTrack(track: MediaStreamTrack | null): void;
|
|
75
|
+
/**
|
|
76
|
+
* Stop and remove all tracks from a stream
|
|
77
|
+
* @param stream The stream to remove tracks from
|
|
78
|
+
* @param trackKind Optional track kind filter ('audio' or 'video')
|
|
79
|
+
* @returns true if any tracks were removed
|
|
80
|
+
*/
|
|
81
|
+
mdStopAndRemoveStreamTracks(stream: MediaStream | null, trackKind: string | null): boolean;
|
|
82
|
+
/**
|
|
83
|
+
* Stop and replace a track in an existing stream
|
|
84
|
+
* @param existingStream The stream to modify
|
|
85
|
+
* @param newTrack The new track to add
|
|
86
|
+
* @param addIfNotExist Whether to add the track if it doesn't exist
|
|
87
|
+
* @param toStop Whether to stop the existing track
|
|
88
|
+
*/
|
|
89
|
+
mdStopAndReplaceStreamTrack(existingStream: MediaStream, newTrack: MediaStreamTrack, addIfNotExist: boolean, toStop: boolean): void;
|
|
90
|
+
/**
|
|
91
|
+
* Replace and optionally stop a sender track in a peer connection
|
|
92
|
+
* @param pc The RTCPeerConnection
|
|
93
|
+
* @param newTrack The new track to use
|
|
94
|
+
* @param shouldStopTrack Whether to stop the previous track
|
|
95
|
+
* @param addIfNotExist Whether to add the track if no sender exists
|
|
96
|
+
* @param addToPcStream The stream to add the track to if adding
|
|
97
|
+
* @returns Promise<boolean> indicating if the track was replaced
|
|
98
|
+
*/
|
|
99
|
+
mdReplaceStopSenderTrack(pc: RTCPeerConnection, newTrack: MediaStreamTrack, shouldStopTrack: boolean, addIfNotExist: boolean, addToPcStream: MediaStream | null): Promise<boolean>;
|
|
100
|
+
/**
|
|
101
|
+
* Clean up audio analysis track
|
|
102
|
+
* Called when stopping audio analysis or destroying session
|
|
103
|
+
*/
|
|
104
|
+
cleanupAudioAnalysisTrack(): void;
|
|
105
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { AudioProcessingModes } from './AudioProcessingModes';
|
|
2
|
+
import { AudioNrMode } from './types';
|
|
3
|
+
import { MediaSources } from '../MediaSources';
|
|
4
|
+
export declare enum AudioProcessingEvents {
|
|
5
|
+
noiseReductionDisabled = "noiseReductionDisabled"
|
|
6
|
+
}
|
|
7
|
+
/**
|
|
8
|
+
* Callback for audio processing context errors that need getUserMedia retry
|
|
9
|
+
*/
|
|
10
|
+
export type AudioProcessingErrorCallback = () => Promise<any>;
|
|
11
|
+
declare class AudioProcessing {
|
|
12
|
+
private logger;
|
|
13
|
+
private modesManager;
|
|
14
|
+
private mediaSources;
|
|
15
|
+
private onErrorCallback;
|
|
16
|
+
private audioContext;
|
|
17
|
+
private workletNode;
|
|
18
|
+
private audioInStream;
|
|
19
|
+
private audioSrcStream;
|
|
20
|
+
private audioDestStream;
|
|
21
|
+
private outputAudioTrack;
|
|
22
|
+
private nrMode;
|
|
23
|
+
private contextErrorsCount;
|
|
24
|
+
constructor(modesManager: AudioProcessingModes, mediaSources: MediaSources, onErrorCallback?: AudioProcessingErrorCallback);
|
|
25
|
+
/**
|
|
26
|
+
* Load worklet module with automatic fallback to production CDN on failure
|
|
27
|
+
* @param audioContext - The audio context to load the worklet into
|
|
28
|
+
* @param workletPath - Primary worklet path to attempt loading from
|
|
29
|
+
* @param nrMode - The noise reduction mode (used to get filename for fallback)
|
|
30
|
+
*/
|
|
31
|
+
private loadWorkletWithFallback;
|
|
32
|
+
/**
|
|
33
|
+
* Thoroughly cleanup audio worklet and context to prevent memory leaks
|
|
34
|
+
* Removes event handlers, closes message ports, disconnects nodes, and closes context
|
|
35
|
+
*/
|
|
36
|
+
private cleanupAudioProcessing;
|
|
37
|
+
private initializeModel;
|
|
38
|
+
init(audioTrack: MediaStreamTrack, modeNr: AudioNrMode, resetContext: boolean): Promise<MediaStreamTrack>;
|
|
39
|
+
isTrackAudioDestNode(track: MediaStreamTrack | null): boolean;
|
|
40
|
+
stop(): Promise<void>;
|
|
41
|
+
}
|
|
42
|
+
export { AudioProcessing };
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import { AudioNrMode } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* Manages audio processing modes, configuration, and browser compatibility
|
|
4
|
+
* Based on original audioProcessingModes from kme-web-sdk
|
|
5
|
+
*/
|
|
6
|
+
export declare class AudioProcessingModes {
|
|
7
|
+
private logger;
|
|
8
|
+
private currentNrEnableState;
|
|
9
|
+
private currentMode;
|
|
10
|
+
private workletBasePath;
|
|
11
|
+
private browserSupported;
|
|
12
|
+
constructor();
|
|
13
|
+
/**
|
|
14
|
+
* Determine the worklet base path based on the current environment
|
|
15
|
+
* - localhost: Load from local repo worklets
|
|
16
|
+
* - QA (URL contains .qa.kaltura): Load from QA CDN
|
|
17
|
+
* - Production (default): Load from production CDN
|
|
18
|
+
* - Fallback: Always fallback to production on error
|
|
19
|
+
*/
|
|
20
|
+
private determineWorkletBasePath;
|
|
21
|
+
/**
|
|
22
|
+
* Check if the browser supports AudioWorklet for noise reduction
|
|
23
|
+
*/
|
|
24
|
+
private checkBrowserSupport;
|
|
25
|
+
/**
|
|
26
|
+
* Get current noise reduction mode
|
|
27
|
+
* Replaces original: session.modeNr
|
|
28
|
+
*/
|
|
29
|
+
getNrMode(): AudioNrMode;
|
|
30
|
+
/**
|
|
31
|
+
* Get whether noise reduction is enabled
|
|
32
|
+
* Replaces original: session.enableNr
|
|
33
|
+
*/
|
|
34
|
+
getNrEnabled(): boolean;
|
|
35
|
+
/**
|
|
36
|
+
* Set noise reduction mode and enabled state
|
|
37
|
+
* MATCHES ORIGINAL SIGNATURE: setNrMode(nrEnabled: boolean, nrMode: AudioNrModes)
|
|
38
|
+
* This is the primary method from production code
|
|
39
|
+
*/
|
|
40
|
+
setNrMode(nrEnabled: boolean, nrMode: AudioNrMode): void;
|
|
41
|
+
/**
|
|
42
|
+
* Set noise reduction enabled state
|
|
43
|
+
* Calls setNrMode with current mode (matches original behavior)
|
|
44
|
+
*/
|
|
45
|
+
setNrEnabled(nrEnabled: boolean): void;
|
|
46
|
+
/**
|
|
47
|
+
* Set noise reduction enabled state
|
|
48
|
+
* Calls setNrMode with current mode (matches original behavior)
|
|
49
|
+
*/
|
|
50
|
+
setMode(nrMode: AudioNrMode): void;
|
|
51
|
+
/**
|
|
52
|
+
* Check if noise reduction can be enabled in this browser
|
|
53
|
+
*/
|
|
54
|
+
canEnableNoiseReduction(): boolean;
|
|
55
|
+
/**
|
|
56
|
+
* Get the worklet file name for a specific mode
|
|
57
|
+
* MATCHES ORIGINAL: getAudioWorkletFileName()
|
|
58
|
+
*/
|
|
59
|
+
private getAudioWorkletFileName;
|
|
60
|
+
/**
|
|
61
|
+
* Get the worklet file path for a specific mode
|
|
62
|
+
* The path is determined by the environment (localhost, QA, or production)
|
|
63
|
+
* Note: If loading from the returned path fails, AudioProcessing will automatically
|
|
64
|
+
* fallback to the production CDN
|
|
65
|
+
* In original: getAudioProcessingWorkletUrlPath()
|
|
66
|
+
*/
|
|
67
|
+
getWorkletPath(nrMode: AudioNrMode): string;
|
|
68
|
+
/**
|
|
69
|
+
* Get just the worklet file name (for external use)
|
|
70
|
+
*/
|
|
71
|
+
getWorkletFileName(mode: AudioNrMode): string;
|
|
72
|
+
/**
|
|
73
|
+
* Set the worklet base path (for serving worklets from custom location)
|
|
74
|
+
*/
|
|
75
|
+
setWorkletBasePath(path: string): void;
|
|
76
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Audio Noise Reduction Mode Enum
|
|
3
|
+
* Defines available RNN noise reduction modes
|
|
4
|
+
*/
|
|
5
|
+
export declare enum AudioNrMode {
|
|
6
|
+
Off = -1,
|
|
7
|
+
Mode11 = 11,// NewFullTry - production mode
|
|
8
|
+
Mode13 = 13,// NewFullTry_1 - with rn disabling code
|
|
9
|
+
Mode14 = 14,// NewFull_12 - new model with improved quality (30/1/25)
|
|
10
|
+
Mode15 = 15,// NewFull_12_small - smaller model for low bandwidth (30/1/25 small)
|
|
11
|
+
Default = 13
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Configuration for Audio Processing
|
|
15
|
+
*/
|
|
16
|
+
export interface AudioProcessingConfig {
|
|
17
|
+
/**
|
|
18
|
+
* Base path for worklet files
|
|
19
|
+
* If not provided, will use default path based on package structure
|
|
20
|
+
*/
|
|
21
|
+
workletBasePath?: string;
|
|
22
|
+
/**
|
|
23
|
+
* Enable noise reduction by default
|
|
24
|
+
*/
|
|
25
|
+
enabled?: boolean;
|
|
26
|
+
/**
|
|
27
|
+
* Default noise reduction mode
|
|
28
|
+
*/
|
|
29
|
+
mode?: AudioNrMode;
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Audio Processing state
|
|
33
|
+
*/
|
|
34
|
+
export interface AudioProcessingState {
|
|
35
|
+
enabled: boolean;
|
|
36
|
+
mode: AudioNrMode;
|
|
37
|
+
initialized: boolean;
|
|
38
|
+
failed: boolean;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Constants for audio processing
|
|
42
|
+
*/
|
|
43
|
+
export declare const AudioProcessingConstants: {
|
|
44
|
+
/**
|
|
45
|
+
* Message type sent from worklet when performance issues detected
|
|
46
|
+
*/
|
|
47
|
+
readonly WORKLET_DELAY_MSG: "delay";
|
|
48
|
+
/**
|
|
49
|
+
* Message type to reset noise suppression state
|
|
50
|
+
*/
|
|
51
|
+
readonly WORKLET_RESET_MSG: "resetNs";
|
|
52
|
+
/**
|
|
53
|
+
* Maximum number of audio context errors before permanent disable
|
|
54
|
+
*/
|
|
55
|
+
readonly MAX_CONTEXT_ERRORS: 5;
|
|
56
|
+
/**
|
|
57
|
+
* PCM frequency for audio processing (48kHz)
|
|
58
|
+
*/
|
|
59
|
+
readonly PCM_FREQUENCY: 48000;
|
|
60
|
+
/**
|
|
61
|
+
* Timeout for audio context resume operation (5 seconds)
|
|
62
|
+
*/
|
|
63
|
+
readonly RESUME_TIMEOUT_MS: 5000;
|
|
64
|
+
/**
|
|
65
|
+
* Worklet base paths for different environments
|
|
66
|
+
*/
|
|
67
|
+
readonly WORKLET_BASE_PATHS: {
|
|
68
|
+
readonly PRODUCTION: "https://flow-assets.avatar.us.kaltura.ai/static";
|
|
69
|
+
readonly QA: "https://flow-assets.avatar.qa.kaltura.ai/static";
|
|
70
|
+
readonly LOCAL: "./public/worklets";
|
|
71
|
+
};
|
|
72
|
+
/**
|
|
73
|
+
* localStorage keys
|
|
74
|
+
*/
|
|
75
|
+
readonly STORAGE_KEYS: {
|
|
76
|
+
readonly NR_ENABLED: "unisphere_nr_enabled";
|
|
77
|
+
readonly NR_MODE: "unisphere_nr_mode";
|
|
78
|
+
};
|
|
79
|
+
};
|
|
80
|
+
/**
|
|
81
|
+
* Worklet file names by mode
|
|
82
|
+
*/
|
|
83
|
+
export declare const WorkletFiles: Record<AudioNrMode, string>;
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
export declare enum StreamLockType {
|
|
2
|
+
RESOLUTION = "1",
|
|
3
|
+
FPS = "2"
|
|
4
|
+
}
|
|
5
|
+
export declare enum MediaSourceType {
|
|
6
|
+
Webcam = "Webcam",
|
|
7
|
+
Desktop = "Desktop",
|
|
8
|
+
AudioIn = "AudioIn",
|
|
9
|
+
AudioOut = "AudioOut"
|
|
10
|
+
}
|
|
11
|
+
export declare enum MediaSourceStatus {
|
|
12
|
+
NotInitialized = 0,
|
|
13
|
+
Active = 1,
|
|
14
|
+
Failed = 2
|
|
15
|
+
}
|
|
16
|
+
export declare enum userMediaErrors {
|
|
17
|
+
permissionsError = "permissionsError",
|
|
18
|
+
deviceError = "deviceError",
|
|
19
|
+
streamingError = "streamingError",
|
|
20
|
+
noMediaError = "noMediaError",
|
|
21
|
+
otherGumActive = "otherGumActive",
|
|
22
|
+
generalErr = "generalErr",
|
|
23
|
+
none = ""
|
|
24
|
+
}
|
|
25
|
+
export type MediaSourceStreamObj = {
|
|
26
|
+
id: number;
|
|
27
|
+
type: MediaSourceType;
|
|
28
|
+
constraints: any;
|
|
29
|
+
mediaKind: getUserMediaEveMediaKindsType;
|
|
30
|
+
attempt: number;
|
|
31
|
+
stream: MediaStream | null;
|
|
32
|
+
track: MediaStreamTrack | null;
|
|
33
|
+
Status: MediaSourceStatus;
|
|
34
|
+
failureReason: string;
|
|
35
|
+
failureError: userMediaErrors;
|
|
36
|
+
failureErrName: string;
|
|
37
|
+
};
|
|
38
|
+
export type DimensionsObj = {
|
|
39
|
+
width: number;
|
|
40
|
+
height: number;
|
|
41
|
+
};
|
|
42
|
+
export type SelectedDevicesObj = {
|
|
43
|
+
audioDevice: MediaDeviceInfo | null;
|
|
44
|
+
videoDevice: MediaDeviceInfo | null;
|
|
45
|
+
audioOutputDevice: MediaDeviceInfo | null;
|
|
46
|
+
};
|
|
47
|
+
export type MediaDeviceNameObj = string;
|
|
48
|
+
export declare enum userMediaStatus {
|
|
49
|
+
success = "success",
|
|
50
|
+
fail = "fail",
|
|
51
|
+
partial = "partial"
|
|
52
|
+
}
|
|
53
|
+
export declare enum getUserMediaEveRequestType {
|
|
54
|
+
singular = "singular",
|
|
55
|
+
multiple = "multiple"
|
|
56
|
+
}
|
|
57
|
+
export declare enum getUserMediaEveMediaKindsType {
|
|
58
|
+
video = "video",
|
|
59
|
+
audio = "audio"
|
|
60
|
+
}
|
|
61
|
+
export type GetUserMediaEveObj = {
|
|
62
|
+
status: userMediaStatus;
|
|
63
|
+
requestType: getUserMediaEveRequestType;
|
|
64
|
+
failedMediaTypes: getUserMediaEveMediaKindsType[];
|
|
65
|
+
succeedMediaTypes: getUserMediaEveMediaKindsType[];
|
|
66
|
+
failReasons: string[];
|
|
67
|
+
failErrors: userMediaErrors[];
|
|
68
|
+
errNames: string[];
|
|
69
|
+
};
|
|
70
|
+
export type GetUserMediaSuccessResponse = {
|
|
71
|
+
status: userMediaStatus;
|
|
72
|
+
reason: string;
|
|
73
|
+
};
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { UnisphereLoggerType } from '@unisphere/core/src/lib/logger/types';
|
|
2
|
+
import { IceCandidateDataObj } from './types';
|
|
3
|
+
export type IceDisconnectionObj = {
|
|
4
|
+
disconnectionTime: number;
|
|
5
|
+
disconnectReason: number;
|
|
6
|
+
duration: number;
|
|
7
|
+
totalBytes: number;
|
|
8
|
+
wasConnected: boolean;
|
|
9
|
+
};
|
|
10
|
+
export type IceCandidateInfoObj = {
|
|
11
|
+
round: number;
|
|
12
|
+
c: RTCIceCandidate;
|
|
13
|
+
};
|
|
14
|
+
type OnConnectionFail = (iceFailure: boolean, reason: number, subReason: number, reasonStr: string) => void;
|
|
15
|
+
type OnIceCandidate = (iceCandidate: IceCandidateDataObj | null) => void;
|
|
16
|
+
declare class IceHandler {
|
|
17
|
+
private logger;
|
|
18
|
+
private streamerId;
|
|
19
|
+
iceStatsAllStr: string;
|
|
20
|
+
private iceStatsTimerCheckId;
|
|
21
|
+
iceStatsGatheringStartTime: number;
|
|
22
|
+
iceStatsConnState: string;
|
|
23
|
+
iceStatsIceConnState: string;
|
|
24
|
+
iceStatsIceGatherState: string;
|
|
25
|
+
private iceStatsSuggestDisconnectStr;
|
|
26
|
+
iceStatsSuggestDisconnectTime: number;
|
|
27
|
+
iceStatsNewChangeTime: number;
|
|
28
|
+
iceDisconnections: IceDisconnectionObj[];
|
|
29
|
+
iceStatsNumGatheredCands: number;
|
|
30
|
+
private iceStatsCandidates;
|
|
31
|
+
private iceStatsGatheringRound;
|
|
32
|
+
private iceStatsNumCandsUntilLastRound;
|
|
33
|
+
iceStatsGatheringDoneTime: number;
|
|
34
|
+
iceStatsGatheringToFirstTime: number;
|
|
35
|
+
private iceNoCandidatesChecked;
|
|
36
|
+
private kIceStatsSecStatesNew;
|
|
37
|
+
private onConnectionFail;
|
|
38
|
+
private onIceCandidate?;
|
|
39
|
+
kIceStatsLogEveryCoonState: boolean;
|
|
40
|
+
constructor(streamerId: string, logger: UnisphereLoggerType, onConnectionFail: OnConnectionFail, onIceCandidate?: OnIceCandidate);
|
|
41
|
+
disconnectionReasonIsIceRelated(disconnectReason: number): boolean;
|
|
42
|
+
updateStreamId(streamKey: string): void;
|
|
43
|
+
numNetworkDisconn(deltaMs: number, currentTime: number): number;
|
|
44
|
+
connectionStateToNumber(connectionStateStr: string): number;
|
|
45
|
+
private getIceGatheringStateNumber;
|
|
46
|
+
resetIceGatheringStats(): void;
|
|
47
|
+
iceStatsCollect(peerConnected: boolean, callOnReset: boolean, callByTimer: boolean, endReasonStr: string, logOnly: boolean): void;
|
|
48
|
+
clearIceStatsTimer(): void;
|
|
49
|
+
startIceGatheringRound(): void;
|
|
50
|
+
handleICECandidateEvent(event: any): void;
|
|
51
|
+
handleIceCandidateError(event: any): void;
|
|
52
|
+
/**
|
|
53
|
+
* Add a remote ICE candidate to the peer connection
|
|
54
|
+
* Used for trickle ICE to add candidates as they arrive from the remote peer
|
|
55
|
+
* @param peerConnection - The RTCPeerConnection instance
|
|
56
|
+
* @param iceCandidate - The ICE candidate data, or null to signal end of candidates
|
|
57
|
+
*/
|
|
58
|
+
addRemoteIceCandidate(peerConnection: RTCPeerConnection, iceCandidate: IceCandidateDataObj | null): Promise<void>;
|
|
59
|
+
}
|
|
60
|
+
export default IceHandler;
|