@hamsa-ai/voice-agents-sdk 0.4.0-beta.1 → 0.4.0-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +351 -123
- package/dist/index.cjs.js +2 -1
- package/dist/index.cjs.js.map +1 -0
- package/dist/index.esm.js +2 -1
- package/dist/index.esm.js.map +1 -0
- package/dist/index.umd.js +2 -1
- package/dist/index.umd.js.map +1 -0
- package/package.json +22 -13
- package/types/classes/livekit-analytics.d.ts +370 -0
- package/types/classes/livekit-audio-manager.d.ts +738 -0
- package/types/classes/livekit-connection.d.ts +318 -0
- package/types/classes/livekit-manager.d.ts +527 -0
- package/types/classes/livekit-tool-registry.d.ts +607 -0
- package/types/classes/{screen_wake_lock.d.ts → screen-wake-lock.d.ts} +4 -4
- package/types/classes/types.d.ts +325 -0
- package/types/main.d.ts +679 -56
- package/types/classes/livekit_manager.d.ts +0 -118
|
@@ -0,0 +1,738 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LiveKitAudioManager - Advanced audio stream management for voice agent communication
|
|
3
|
+
*
|
|
4
|
+
* This class provides comprehensive management of audio streams, tracks, and playback
|
|
5
|
+
* for voice agent conversations. It handles the complex WebRTC audio pipeline including
|
|
6
|
+
* track subscription/unsubscription, HTML audio element management, volume control,
|
|
7
|
+
* and real-time audio activity detection.
|
|
8
|
+
*
|
|
9
|
+
* Key Features:
|
|
10
|
+
* - **Smart Track Management**: Automatic handling of audio track lifecycle
|
|
11
|
+
* - **Dynamic Volume Control**: Real-time volume adjustment across all audio streams
|
|
12
|
+
* - **Audio Activity Detection**: Speaking/listening state detection with events
|
|
13
|
+
* - **Performance Monitoring**: Comprehensive track statistics and analytics
|
|
14
|
+
* - **Robust Error Handling**: Graceful handling of audio playback issues
|
|
15
|
+
* - **Memory Management**: Automatic cleanup of audio resources and DOM elements
|
|
16
|
+
* - **Cross-browser Compatibility**: Works across modern browsers with WebRTC support
|
|
17
|
+
*
|
|
18
|
+
* Audio Pipeline Flow:
|
|
19
|
+
* 1. **Track Subscription**: Incoming audio tracks from voice agents
|
|
20
|
+
* 2. **Element Creation**: HTML audio elements for each track
|
|
21
|
+
* 3. **Volume Application**: Consistent volume across all streams
|
|
22
|
+
* 4. **Activity Monitoring**: Real-time speaking/listening detection
|
|
23
|
+
* 5. **Statistics Collection**: Performance and quality metrics
|
|
24
|
+
* 6. **Cleanup Management**: Proper resource disposal
|
|
25
|
+
*
|
|
26
|
+
* @example Basic Audio Management
|
|
27
|
+
* ```typescript
|
|
28
|
+
* const audioManager = new LiveKitAudioManager();
|
|
29
|
+
*
|
|
30
|
+
* // Set up audio event listeners
|
|
31
|
+
* audioManager.on('trackSubscribed', ({ track, participant }) => {
|
|
32
|
+
* console.log(`Audio track from ${participant} is now playing`);
|
|
33
|
+
* showAudioIndicator(participant);
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* audioManager.on('trackUnsubscribed', ({ participant }) => {
|
|
37
|
+
* console.log(`Audio track from ${participant} stopped`);
|
|
38
|
+
* hideAudioIndicator(participant);
|
|
39
|
+
* });
|
|
40
|
+
*
|
|
41
|
+
* audioManager.on('speaking', () => {
|
|
42
|
+
* showAgentSpeakingIndicator();
|
|
43
|
+
* });
|
|
44
|
+
*
|
|
45
|
+
* audioManager.on('listening', () => {
|
|
46
|
+
* hideAgentSpeakingIndicator();
|
|
47
|
+
* });
|
|
48
|
+
*
|
|
49
|
+
* // Control volume
|
|
50
|
+
* audioManager.setVolume(0.8);
|
|
51
|
+
* ```
|
|
52
|
+
*
|
|
53
|
+
* @example Volume Control Integration
|
|
54
|
+
* ```typescript
|
|
55
|
+
* // Volume slider integration
|
|
56
|
+
* const volumeSlider = document.getElementById('volume');
|
|
57
|
+
* volumeSlider.addEventListener('input', (e) => {
|
|
58
|
+
* const volume = parseFloat(e.target.value);
|
|
59
|
+
* audioManager.setVolume(volume);
|
|
60
|
+
* });
|
|
61
|
+
*
|
|
62
|
+
* // Listen for volume changes
|
|
63
|
+
* audioManager.on('volumeChanged', (newVolume) => {
|
|
64
|
+
* volumeSlider.value = newVolume.toString();
|
|
65
|
+
* updateVolumeDisplay(newVolume);
|
|
66
|
+
* });
|
|
67
|
+
*
|
|
68
|
+
* // Mute/unmute functionality
|
|
69
|
+
* muteButton.addEventListener('click', () => {
|
|
70
|
+
* const currentVolume = audioManager.volume;
|
|
71
|
+
* audioManager.setVolume(currentVolume > 0 ? 0 : 0.8);
|
|
72
|
+
* });
|
|
73
|
+
* ```
|
|
74
|
+
*
|
|
75
|
+
* @example Audio Statistics Monitoring
|
|
76
|
+
* ```typescript
|
|
77
|
+
* // Monitor audio track statistics
|
|
78
|
+
* const stats = audioManager.getTrackStats();
|
|
79
|
+
* console.log(`Active tracks: ${stats.activeTracks}`);
|
|
80
|
+
* console.log(`Audio elements: ${stats.audioElements}`);
|
|
81
|
+
*
|
|
82
|
+
* // Inspect individual track details
|
|
83
|
+
* stats.trackDetails.forEach(([trackId, data]) => {
|
|
84
|
+
* console.log(`Track ${trackId}:`);
|
|
85
|
+
* console.log(` Participant: ${data.participant}`);
|
|
86
|
+
* console.log(` Source: ${data.source}`);
|
|
87
|
+
* console.log(` Muted: ${data.muted}`);
|
|
88
|
+
* console.log(` Subscription time: ${new Date(data.subscriptionTime)}`);
|
|
89
|
+
* });
|
|
90
|
+
*
|
|
91
|
+
* // Check for audio issues
|
|
92
|
+
* if (stats.activeTracks === 0 && expectedAgentPresent) {
|
|
93
|
+
* console.warn('No active audio tracks - agent may not be speaking');
|
|
94
|
+
* showAudioTroubleshootingHint();
|
|
95
|
+
* }
|
|
96
|
+
* ```
|
|
97
|
+
*
|
|
98
|
+
* @example Conversation Flow Control
|
|
99
|
+
* ```typescript
|
|
100
|
+
* // Pause audio during interruptions
|
|
101
|
+
* phoneRinging.addEventListener('ring', () => {
|
|
102
|
+
* audioManager.pauseAllAudio();
|
|
103
|
+
* showPausedIndicator();
|
|
104
|
+
* });
|
|
105
|
+
*
|
|
106
|
+
* // Resume when ready
|
|
107
|
+
* phoneRinging.addEventListener('end', () => {
|
|
108
|
+
* audioManager.resumeAllAudio();
|
|
109
|
+
* hidePausedIndicator();
|
|
110
|
+
* });
|
|
111
|
+
*
|
|
112
|
+
* // Handle page visibility changes
|
|
113
|
+
* document.addEventListener('visibilitychange', () => {
|
|
114
|
+
* if (document.hidden) {
|
|
115
|
+
* audioManager.pauseAllAudio();
|
|
116
|
+
* } else {
|
|
117
|
+
* audioManager.resumeAllAudio();
|
|
118
|
+
* }
|
|
119
|
+
* });
|
|
120
|
+
* ```
|
|
121
|
+
*
|
|
122
|
+
* @example Error Handling
|
|
123
|
+
* ```typescript
|
|
124
|
+
* audioManager.on('error', (error) => {
|
|
125
|
+
* console.error('Audio error:', error.message);
|
|
126
|
+
*
|
|
127
|
+
* if (error.message.includes('volume')) {
|
|
128
|
+
* showVolumeError();
|
|
129
|
+
* } else if (error.message.includes('track')) {
|
|
130
|
+
* handleTrackError();
|
|
131
|
+
* // Audio manager will automatically retry
|
|
132
|
+
* }
|
|
133
|
+
* });
|
|
134
|
+
*
|
|
135
|
+
* // Monitor for audio playback issues
|
|
136
|
+
* audioManager.on('trackSubscribed', ({ track }) => {
|
|
137
|
+
* // Set up timeout to detect audio playback issues
|
|
138
|
+
* setTimeout(() => {
|
|
139
|
+
* const stats = audioManager.getTrackStats();
|
|
140
|
+
* if (stats.audioElements === 0) {
|
|
141
|
+
* console.warn('Audio elements not created - possible browser restriction');
|
|
142
|
+
* promptUserInteraction();
|
|
143
|
+
* }
|
|
144
|
+
* }, 1000);
|
|
145
|
+
* });
|
|
146
|
+
* ```
|
|
147
|
+
*
|
|
148
|
+
* Technical Implementation:
|
|
149
|
+
* - Uses native HTML5 audio elements for cross-browser compatibility
|
|
150
|
+
* - Implements automatic volume normalization across all tracks
|
|
151
|
+
* - Provides real-time audio activity detection through DOM events
|
|
152
|
+
* - Maintains comprehensive track metadata for analytics
|
|
153
|
+
* - Includes robust error handling for browser audio restrictions
|
|
154
|
+
* - Manages DOM element lifecycle to prevent memory leaks
|
|
155
|
+
*/
|
|
156
|
+
import { EventEmitter } from 'events';
|
|
157
|
+
import { type RemoteParticipant, type RemoteTrack, type RemoteTrackPublication, type Room } from 'livekit-client';
|
|
158
|
+
import type { TrackStatsData, TrackStatsResult } from './types';
|
|
159
|
+
/**
|
|
160
|
+
* LiveKitAudioManager class for comprehensive audio stream management
|
|
161
|
+
*
|
|
162
|
+
* Extends EventEmitter to provide real-time audio event notifications and
|
|
163
|
+
* enable reactive audio management in voice agent applications.
|
|
164
|
+
*/
|
|
165
|
+
export declare class LiveKitAudioManager extends EventEmitter {
|
|
166
|
+
#private;
|
|
167
|
+
/** Set of active HTML audio elements currently playing agent audio */
|
|
168
|
+
audioElements: Set<HTMLAudioElement>;
|
|
169
|
+
/** Map of track statistics and metadata for analytics and monitoring */
|
|
170
|
+
trackStats: Map<string, TrackStatsData>;
|
|
171
|
+
/** Current volume level for all audio playback (0.0 to 1.0) */
|
|
172
|
+
volume: number;
|
|
173
|
+
/** Reference to LiveKit room for device/mic control (set by manager) */
|
|
174
|
+
private room;
|
|
175
|
+
/** Optional WebAudio context and analysers (reserved for future use) */
|
|
176
|
+
private audioContext;
|
|
177
|
+
private inputAnalyser;
|
|
178
|
+
private outputAnalyser;
|
|
179
|
+
/**
|
|
180
|
+
* Provides the LiveKit Room to the audio manager for microphone control.
|
|
181
|
+
*/
|
|
182
|
+
setRoom(room: Room | null): void;
|
|
183
|
+
/**
|
|
184
|
+
* Adjusts the volume level for all active audio streams
|
|
185
|
+
*
|
|
186
|
+
* Sets the playback volume for all currently active audio elements and
|
|
187
|
+
* automatically applies the same volume to any new audio tracks that
|
|
188
|
+
* are subscribed in the future. Volume is normalized to ensure it stays
|
|
189
|
+
* within valid bounds and handles invalid input gracefully.
|
|
190
|
+
*
|
|
191
|
+
* @param volume - Desired volume level (0.0 = muted, 1.0 = full volume)
|
|
192
|
+
*
|
|
193
|
+
* @fires volumeChanged When volume is successfully changed
|
|
194
|
+
* @fires error When volume setting fails
|
|
195
|
+
*
|
|
196
|
+
* @example
|
|
197
|
+
* ```typescript
|
|
198
|
+
* // Set to half volume
|
|
199
|
+
* audioManager.setVolume(0.5);
|
|
200
|
+
*
|
|
201
|
+
* // Mute all audio
|
|
202
|
+
* audioManager.setVolume(0);
|
|
203
|
+
*
|
|
204
|
+
* // Set to maximum volume
|
|
205
|
+
* audioManager.setVolume(1.0);
|
|
206
|
+
*
|
|
207
|
+
* // Listen for volume changes
|
|
208
|
+
* audioManager.on('volumeChanged', (newVolume) => {
|
|
209
|
+
* console.log(`Volume changed to ${newVolume * 100}%`);
|
|
210
|
+
* updateVolumeSlider(newVolume);
|
|
211
|
+
* });
|
|
212
|
+
*
|
|
213
|
+
* // Handle volume errors
|
|
214
|
+
* audioManager.on('error', (error) => {
|
|
215
|
+
* if (error.message.includes('volume')) {
|
|
216
|
+
* console.error('Failed to change volume:', error.message);
|
|
217
|
+
* }
|
|
218
|
+
* });
|
|
219
|
+
* ```
|
|
220
|
+
*/
|
|
221
|
+
setVolume(volume: number): void;
|
|
222
|
+
/**
|
|
223
|
+
* Gets the current output volume level
|
|
224
|
+
*
|
|
225
|
+
* Returns the current volume setting for audio playback (agent voice output).
|
|
226
|
+
* This is the same value set by setVolume() and represents the playback volume
|
|
227
|
+
* for all voice agent audio streams.
|
|
228
|
+
*
|
|
229
|
+
* @returns Current output volume level (0.0 = muted, 1.0 = full volume)
|
|
230
|
+
*
|
|
231
|
+
* @example
|
|
232
|
+
* ```typescript
|
|
233
|
+
* const currentVolume = audioManager.getOutputVolume();
|
|
234
|
+
* console.log(`Current volume: ${Math.round(currentVolume * 100)}%`);
|
|
235
|
+
*
|
|
236
|
+
* // Check if muted
|
|
237
|
+
* if (currentVolume === 0) {
|
|
238
|
+
* console.log('Audio is muted');
|
|
239
|
+
* }
|
|
240
|
+
*
|
|
241
|
+
* // Create volume indicator
|
|
242
|
+
* const volumeBars = Math.round(currentVolume * 5);
|
|
243
|
+
* console.log('Volume: ' + '█'.repeat(volumeBars) + '░'.repeat(5 - volumeBars));
|
|
244
|
+
* ```
|
|
245
|
+
*/
|
|
246
|
+
getOutputVolume(): number;
|
|
247
|
+
/**
|
|
248
|
+
* Gets the current input volume level from the user's microphone
|
|
249
|
+
*
|
|
250
|
+
* Returns the current microphone input level as detected by the audio context.
|
|
251
|
+
* This represents the user's voice input strength and can be used for visual
|
|
252
|
+
* feedback like voice activity indicators or input level meters.
|
|
253
|
+
*
|
|
254
|
+
* Note: This requires an active audio context and microphone stream.
|
|
255
|
+
* Returns 0.0 if no microphone access or audio context is unavailable.
|
|
256
|
+
*
|
|
257
|
+
* @returns Current input volume level (0.0 = no input, 1.0 = maximum input)
|
|
258
|
+
*
|
|
259
|
+
* @example
|
|
260
|
+
* ```typescript
|
|
261
|
+
* // Show user voice activity
|
|
262
|
+
* setInterval(() => {
|
|
263
|
+
* const inputLevel = audioManager.getInputVolume();
|
|
264
|
+
* const percentage = Math.round(inputLevel * 100);
|
|
265
|
+
* updateMicrophoneIndicator(percentage);
|
|
266
|
+
*
|
|
267
|
+
* // Detect if user is speaking
|
|
268
|
+
* if (inputLevel > 0.1) {
|
|
269
|
+
* showUserSpeakingIndicator();
|
|
270
|
+
* } else {
|
|
271
|
+
* hideUserSpeakingIndicator();
|
|
272
|
+
* }
|
|
273
|
+
* }, 100);
|
|
274
|
+
* ```
|
|
275
|
+
*/
|
|
276
|
+
getInputVolume(): number;
|
|
277
|
+
/**
|
|
278
|
+
* Mutes or unmutes the user's microphone
|
|
279
|
+
*
|
|
280
|
+
* Controls the user's microphone input to the voice agent conversation.
|
|
281
|
+
* When muted, the user's voice will not be transmitted to the agent.
|
|
282
|
+
* This is useful for temporary muting during interruptions or background noise.
|
|
283
|
+
*
|
|
284
|
+
* @param muted - True to mute microphone, false to unmute
|
|
285
|
+
*
|
|
286
|
+
* @fires micMuted When microphone is successfully muted
|
|
287
|
+
* @fires micUnmuted When microphone is successfully unmuted
|
|
288
|
+
* @fires error When microphone control fails
|
|
289
|
+
*
|
|
290
|
+
* @example
|
|
291
|
+
* ```typescript
|
|
292
|
+
* // Mute microphone
|
|
293
|
+
* audioManager.setMicMuted(true);
|
|
294
|
+
*
|
|
295
|
+
* // Unmute microphone
|
|
296
|
+
* audioManager.setMicMuted(false);
|
|
297
|
+
*
|
|
298
|
+
* // Toggle microphone state
|
|
299
|
+
* const currentMuted = audioManager.isMicMuted();
|
|
300
|
+
* audioManager.setMicMuted(!currentMuted);
|
|
301
|
+
*
|
|
302
|
+
* // Listen for mute events
|
|
303
|
+
* audioManager.on('micMuted', () => {
|
|
304
|
+
* showMutedIndicator();
|
|
305
|
+
* });
|
|
306
|
+
*
|
|
307
|
+
* audioManager.on('micUnmuted', () => {
|
|
308
|
+
* hideMutedIndicator();
|
|
309
|
+
* });
|
|
310
|
+
* ```
|
|
311
|
+
*/
|
|
312
|
+
setMicMuted(muted: boolean): void;
|
|
313
|
+
/**
|
|
314
|
+
* Checks if the user's microphone is currently muted
|
|
315
|
+
*
|
|
316
|
+
* Returns the current mute state of the user's microphone input.
|
|
317
|
+
* This can be used to display the correct microphone status in the UI.
|
|
318
|
+
*
|
|
319
|
+
* @returns True if microphone is muted, false if unmuted
|
|
320
|
+
*
|
|
321
|
+
* @example
|
|
322
|
+
* ```typescript
|
|
323
|
+
* // Update UI based on microphone state
|
|
324
|
+
* const updateMicButton = () => {
|
|
325
|
+
* const isMuted = audioManager.isMicMuted();
|
|
326
|
+
* const micButton = document.getElementById('micButton');
|
|
327
|
+
* micButton.textContent = isMuted ? '🔇' : '🎤';
|
|
328
|
+
* micButton.classList.toggle('muted', isMuted);
|
|
329
|
+
* };
|
|
330
|
+
*
|
|
331
|
+
* // Check microphone state before important actions
|
|
332
|
+
* if (audioManager.isMicMuted()) {
|
|
333
|
+
* showUnmutePrompt('Please unmute to continue conversation');
|
|
334
|
+
* }
|
|
335
|
+
* ```
|
|
336
|
+
*/
|
|
337
|
+
isMicMuted(): boolean;
|
|
338
|
+
/**
|
|
339
|
+
* Gets the current input frequency data from the user's microphone
|
|
340
|
+
*
|
|
341
|
+
* Returns frequency domain data for the microphone input as a Uint8Array.
|
|
342
|
+
* This can be used to create audio visualizations, voice activity detection,
|
|
343
|
+
* or advanced audio analysis features.
|
|
344
|
+
*
|
|
345
|
+
* Note: This requires an active audio context and microphone stream.
|
|
346
|
+
* Returns empty array if no microphone access or audio context is unavailable.
|
|
347
|
+
*
|
|
348
|
+
* @returns Uint8Array containing frequency data (0-255 per frequency bin)
|
|
349
|
+
*
|
|
350
|
+
* @example
|
|
351
|
+
* ```typescript
|
|
352
|
+
* // Create audio visualizer
|
|
353
|
+
* const canvas = document.getElementById('audioVisualizer');
|
|
354
|
+
* const ctx = canvas.getContext('2d');
|
|
355
|
+
*
|
|
356
|
+
* function drawVisualizer() {
|
|
357
|
+
* const frequencyData = audioManager.getInputByteFrequencyData();
|
|
358
|
+
*
|
|
359
|
+
* ctx.clearRect(0, 0, canvas.width, canvas.height);
|
|
360
|
+
* const barWidth = canvas.width / frequencyData.length;
|
|
361
|
+
*
|
|
362
|
+
* for (let i = 0; i < frequencyData.length; i++) {
|
|
363
|
+
* const barHeight = (frequencyData[i] / 255) * canvas.height;
|
|
364
|
+
* ctx.fillRect(i * barWidth, canvas.height - barHeight, barWidth, barHeight);
|
|
365
|
+
* }
|
|
366
|
+
*
|
|
367
|
+
* requestAnimationFrame(drawVisualizer);
|
|
368
|
+
* }
|
|
369
|
+
* drawVisualizer();
|
|
370
|
+
* ```
|
|
371
|
+
*/
|
|
372
|
+
getInputByteFrequencyData(): Uint8Array;
|
|
373
|
+
/**
|
|
374
|
+
* Gets the current output frequency data from agent audio
|
|
375
|
+
*
|
|
376
|
+
* Returns frequency domain data for the agent's audio output as a Uint8Array.
|
|
377
|
+
* This can be used to create audio visualizations that show the agent's
|
|
378
|
+
* voice characteristics or for advanced audio processing.
|
|
379
|
+
*
|
|
380
|
+
* Note: This requires active audio playback from the agent.
|
|
381
|
+
* Returns empty array if no agent audio is currently playing.
|
|
382
|
+
*
|
|
383
|
+
* @returns Uint8Array containing frequency data (0-255 per frequency bin)
|
|
384
|
+
*
|
|
385
|
+
* @example
|
|
386
|
+
* ```typescript
|
|
387
|
+
* // Show agent voice characteristics
|
|
388
|
+
* function analyzeAgentVoice() {
|
|
389
|
+
* const frequencyData = audioManager.getOutputByteFrequencyData();
|
|
390
|
+
*
|
|
391
|
+
* if (frequencyData.length > 0) {
|
|
392
|
+
* // Calculate dominant frequency
|
|
393
|
+
* const maxIndex = frequencyData.indexOf(Math.max(...frequencyData));
|
|
394
|
+
* const dominantFreq = (maxIndex / frequencyData.length) * 22050; // Assume 44.1kHz sample rate
|
|
395
|
+
*
|
|
396
|
+
* console.log(`Agent voice dominant frequency: ${dominantFreq}Hz`);
|
|
397
|
+
* updateVoiceCharacteristics(dominantFreq);
|
|
398
|
+
* }
|
|
399
|
+
* }
|
|
400
|
+
*
|
|
401
|
+
* // Analyze during agent speech
|
|
402
|
+
* audioManager.on('speaking', () => {
|
|
403
|
+
* const analysisInterval = setInterval(() => {
|
|
404
|
+
* analyzeAgentVoice();
|
|
405
|
+
* }, 100);
|
|
406
|
+
*
|
|
407
|
+
* audioManager.once('listening', () => {
|
|
408
|
+
* clearInterval(analysisInterval);
|
|
409
|
+
* });
|
|
410
|
+
* });
|
|
411
|
+
* ```
|
|
412
|
+
*/
|
|
413
|
+
getOutputByteFrequencyData(): Uint8Array;
|
|
414
|
+
/**
|
|
415
|
+
* Processes new audio track subscriptions from voice agents
|
|
416
|
+
*
|
|
417
|
+
* Handles the complete lifecycle of audio track subscription including HTML
|
|
418
|
+
* audio element creation, volume application, activity monitoring setup,
|
|
419
|
+
* statistics tracking, and DOM management. This method is called automatically
|
|
420
|
+
* by LiveKit when new audio tracks become available.
|
|
421
|
+
*
|
|
422
|
+
* @param track - The LiveKit remote audio track to process
|
|
423
|
+
* @param publication - Track publication metadata from LiveKit
|
|
424
|
+
* @param participant - Participant who owns this audio track
|
|
425
|
+
*
|
|
426
|
+
* @fires trackSubscribed When track is successfully processed and ready for playback
|
|
427
|
+
* @fires speaking When audio playback begins (agent starts talking)
|
|
428
|
+
* @fires listening When audio playback ends (agent stops talking)
|
|
429
|
+
*
|
|
430
|
+
* @example
|
|
431
|
+
* ```typescript
|
|
432
|
+
* // Listen for new audio tracks
|
|
433
|
+
* audioManager.on('trackSubscribed', ({ track, participant, trackStats }) => {
|
|
434
|
+
* console.log(`Audio track from ${participant} is now available`);
|
|
435
|
+
*
|
|
436
|
+
* if (participant.includes('agent')) {
|
|
437
|
+
* showAgentAudioIndicator();
|
|
438
|
+
* logAudioTrackDetails(trackStats);
|
|
439
|
+
* }
|
|
440
|
+
* });
|
|
441
|
+
*
|
|
442
|
+
* // Monitor speaking activity
|
|
443
|
+
* audioManager.on('speaking', () => {
|
|
444
|
+
* console.log('Agent is speaking');
|
|
445
|
+
* showSpeakingAnimation();
|
|
446
|
+
* });
|
|
447
|
+
*
|
|
448
|
+
* audioManager.on('listening', () => {
|
|
449
|
+
* console.log('Agent finished speaking');
|
|
450
|
+
* hideSpeakingAnimation();
|
|
451
|
+
* });
|
|
452
|
+
* ```
|
|
453
|
+
*/
|
|
454
|
+
handleTrackSubscribed(track: RemoteTrack, publication: RemoteTrackPublication, participant: RemoteParticipant): void;
|
|
455
|
+
/**
|
|
456
|
+
* Processes audio track unsubscription and cleanup
|
|
457
|
+
*
|
|
458
|
+
* Handles the complete cleanup process when audio tracks from voice agents
|
|
459
|
+
* are unsubscribed, including statistics removal, DOM element cleanup,
|
|
460
|
+
* and event notification. This method is called automatically by LiveKit
|
|
461
|
+
* when audio tracks are no longer available.
|
|
462
|
+
*
|
|
463
|
+
* @param track - The LiveKit remote audio track being unsubscribed
|
|
464
|
+
* @param publication - Track publication metadata from LiveKit
|
|
465
|
+
* @param participant - Participant who owns this audio track
|
|
466
|
+
*
|
|
467
|
+
* @fires trackUnsubscribed When track cleanup is completed successfully
|
|
468
|
+
*
|
|
469
|
+
* @example
|
|
470
|
+
* ```typescript
|
|
471
|
+
* // Listen for track unsubscription events
|
|
472
|
+
* audioManager.on('trackUnsubscribed', ({ participant }) => {
|
|
473
|
+
* console.log(`Audio track from ${participant} has been removed`);
|
|
474
|
+
*
|
|
475
|
+
* if (participant.includes('agent')) {
|
|
476
|
+
* hideAgentAudioIndicator();
|
|
477
|
+
* updateAudioTrackCount();
|
|
478
|
+
* }
|
|
479
|
+
* });
|
|
480
|
+
*
|
|
481
|
+
* // Check if any tracks remain active
|
|
482
|
+
* audioManager.on('trackUnsubscribed', () => {
|
|
483
|
+
* const stats = audioManager.getTrackStats();
|
|
484
|
+
* if (stats.activeTracks === 0) {
|
|
485
|
+
* console.log('No active audio tracks remaining');
|
|
486
|
+
* showNoAudioWarning();
|
|
487
|
+
* }
|
|
488
|
+
* });
|
|
489
|
+
* ```
|
|
490
|
+
*/
|
|
491
|
+
handleTrackUnsubscribed(track: RemoteTrack, publication: RemoteTrackPublication, participant: RemoteParticipant): void;
|
|
492
|
+
/**
|
|
493
|
+
* Pauses playback of all active audio streams
|
|
494
|
+
*
|
|
495
|
+
* Temporarily stops playback of all HTML audio elements currently managed
|
|
496
|
+
* by the audio manager. This is typically used during conversation pauses,
|
|
497
|
+
* interruptions, or when the user needs to temporarily halt audio without
|
|
498
|
+
* ending the entire conversation. Audio can be resumed later using resumeAllAudio().
|
|
499
|
+
*
|
|
500
|
+
* @example
|
|
501
|
+
* ```typescript
|
|
502
|
+
* // Pause during phone call interruption
|
|
503
|
+
* phoneCall.addEventListener('incoming', () => {
|
|
504
|
+
* audioManager.pauseAllAudio();
|
|
505
|
+
* showPausedIndicator('Incoming call - audio paused');
|
|
506
|
+
* });
|
|
507
|
+
*
|
|
508
|
+
* // Pause when user tabs away (optional behavior)
|
|
509
|
+
* document.addEventListener('visibilitychange', () => {
|
|
510
|
+
* if (document.hidden) {
|
|
511
|
+
* audioManager.pauseAllAudio();
|
|
512
|
+
* }
|
|
513
|
+
* });
|
|
514
|
+
*
|
|
515
|
+
* // Pause during recording
|
|
516
|
+
* startRecordingButton.addEventListener('click', () => {
|
|
517
|
+
* audioManager.pauseAllAudio();
|
|
518
|
+
* startUserRecording();
|
|
519
|
+
* });
|
|
520
|
+
*
|
|
521
|
+
* // Manual pause for user control
|
|
522
|
+
* pauseButton.addEventListener('click', () => {
|
|
523
|
+
* audioManager.pauseAllAudio();
|
|
524
|
+
* updatePlayPauseButtonState('paused');
|
|
525
|
+
* });
|
|
526
|
+
* ```
|
|
527
|
+
*/
|
|
528
|
+
pauseAllAudio(): void;
|
|
529
|
+
/**
|
|
530
|
+
* Resumes playback of all paused audio streams
|
|
531
|
+
*
|
|
532
|
+
* Restarts playback of all HTML audio elements that were previously paused
|
|
533
|
+
* by pauseAllAudio(). This method gracefully handles browser audio policies
|
|
534
|
+
* and permission requirements, automatically catching and ignoring play errors
|
|
535
|
+
* that may occur due to user interaction requirements.
|
|
536
|
+
*
|
|
537
|
+
* @example
|
|
538
|
+
* ```typescript
|
|
539
|
+
* // Resume after phone call ends
|
|
540
|
+
* phoneCall.addEventListener('ended', () => {
|
|
541
|
+
* audioManager.resumeAllAudio();
|
|
542
|
+
* hidePausedIndicator();
|
|
543
|
+
* });
|
|
544
|
+
*
|
|
545
|
+
* // Resume when user returns to tab
|
|
546
|
+
* document.addEventListener('visibilitychange', () => {
|
|
547
|
+
* if (!document.hidden) {
|
|
548
|
+
* audioManager.resumeAllAudio();
|
|
549
|
+
* }
|
|
550
|
+
* });
|
|
551
|
+
*
|
|
552
|
+
* // Resume after recording ends
|
|
553
|
+
* stopRecordingButton.addEventListener('click', () => {
|
|
554
|
+
* stopUserRecording();
|
|
555
|
+
* audioManager.resumeAllAudio();
|
|
556
|
+
* });
|
|
557
|
+
*
|
|
558
|
+
* // Manual resume for user control
|
|
559
|
+
* resumeButton.addEventListener('click', () => {
|
|
560
|
+
* audioManager.resumeAllAudio();
|
|
561
|
+
* updatePlayPauseButtonState('playing');
|
|
562
|
+
* });
|
|
563
|
+
*
|
|
564
|
+
* // Handle browser audio policy restrictions
|
|
565
|
+
* userInteractionButton.addEventListener('click', () => {
|
|
566
|
+
* // Browser requires user interaction for audio playback
|
|
567
|
+
* audioManager.resumeAllAudio();
|
|
568
|
+
* });
|
|
569
|
+
* ```
|
|
570
|
+
*/
|
|
571
|
+
resumeAllAudio(): void;
|
|
572
|
+
/**
|
|
573
|
+
* Retrieves comprehensive statistics about all active audio tracks
|
|
574
|
+
*
|
|
575
|
+
* Provides detailed information about the current state of audio track management,
|
|
576
|
+
* including counts of active tracks, HTML audio elements, and detailed metadata
|
|
577
|
+
* for each track. This method is essential for monitoring audio system health,
|
|
578
|
+
* debugging audio issues, and providing analytics data.
|
|
579
|
+
*
|
|
580
|
+
* @returns Complete track statistics including counts and detailed track information
|
|
581
|
+
*
|
|
582
|
+
* @example Basic Statistics Monitoring
|
|
583
|
+
* ```typescript
|
|
584
|
+
* const stats = audioManager.getTrackStats();
|
|
585
|
+
*
|
|
586
|
+
* // Display basic track information
|
|
587
|
+
* console.log(`Active tracks: ${stats.activeTracks}`);
|
|
588
|
+
* console.log(`Total tracks processed: ${stats.totalTracks}`);
|
|
589
|
+
* console.log(`HTML audio elements: ${stats.audioElements}`);
|
|
590
|
+
*
|
|
591
|
+
* // Update UI indicators
|
|
592
|
+
* updateTrackCountDisplay(stats.activeTracks);
|
|
593
|
+
* updateAudioElementCount(stats.audioElements);
|
|
594
|
+
*
|
|
595
|
+
* // Check system health
|
|
596
|
+
* if (stats.activeTracks === 0 && expectedAgentPresent) {
|
|
597
|
+
* showNoAudioTracksWarning();
|
|
598
|
+
* }
|
|
599
|
+
* ```
|
|
600
|
+
*
|
|
601
|
+
* @example Detailed Track Analysis
|
|
602
|
+
* ```typescript
|
|
603
|
+
* const stats = audioManager.getTrackStats();
|
|
604
|
+
*
|
|
605
|
+
* // Analyze each track in detail
|
|
606
|
+
* stats.trackDetails.forEach(([trackId, trackData]) => {
|
|
607
|
+
* console.log(`\n--- Track ${trackId} ---`);
|
|
608
|
+
* console.log(`Participant: ${trackData.participant}`);
|
|
609
|
+
* console.log(`Source: ${trackData.source}`);
|
|
610
|
+
* console.log(`Muted: ${trackData.muted}`);
|
|
611
|
+
* console.log(`Enabled: ${trackData.enabled}`);
|
|
612
|
+
* console.log(`Subscription time: ${new Date(trackData.subscriptionTime)}`);
|
|
613
|
+
*
|
|
614
|
+
* if (trackData.dimensions) {
|
|
615
|
+
* console.log(`Dimensions: ${trackData.dimensions.width}x${trackData.dimensions.height}`);
|
|
616
|
+
* }
|
|
617
|
+
*
|
|
618
|
+
* // Check track health
|
|
619
|
+
* if (trackData.muted) {
|
|
620
|
+
* logTrackIssue('muted_track', trackData);
|
|
621
|
+
* }
|
|
622
|
+
* });
|
|
623
|
+
* ```
|
|
624
|
+
*
|
|
625
|
+
* @example Dashboard Integration
|
|
626
|
+
* ```typescript
|
|
627
|
+
* // Real-time dashboard updates
|
|
628
|
+
* setInterval(() => {
|
|
629
|
+
* const stats = audioManager.getTrackStats();
|
|
630
|
+
*
|
|
631
|
+
* updateDashboard({
|
|
632
|
+
* activeAudioTracks: stats.activeTracks,
|
|
633
|
+
* audioElements: stats.audioElements,
|
|
634
|
+
* trackHealth: stats.activeTracks > 0 ? 'healthy' : 'no_audio',
|
|
635
|
+
* lastUpdated: Date.now()
|
|
636
|
+
* });
|
|
637
|
+
* }, 1000);
|
|
638
|
+
*
|
|
639
|
+
* // Alert on track count changes
|
|
640
|
+
* let lastTrackCount = 0;
|
|
641
|
+
* const checkTrackChanges = () => {
|
|
642
|
+
* const stats = audioManager.getTrackStats();
|
|
643
|
+
* if (stats.activeTracks !== lastTrackCount) {
|
|
644
|
+
* console.log(`Track count changed: ${lastTrackCount} → ${stats.activeTracks}`);
|
|
645
|
+
* lastTrackCount = stats.activeTracks;
|
|
646
|
+
*
|
|
647
|
+
* if (stats.activeTracks === 0) {
|
|
648
|
+
* notifyUser('Audio tracks disconnected');
|
|
649
|
+
* }
|
|
650
|
+
* }
|
|
651
|
+
* };
|
|
652
|
+
* ```
|
|
653
|
+
*
|
|
654
|
+
* @example Audio Quality Monitoring
|
|
655
|
+
* ```typescript
|
|
656
|
+
* const stats = audioManager.getTrackStats();
|
|
657
|
+
*
|
|
658
|
+
* // Check for potential audio issues
|
|
659
|
+
* const currentTime = Date.now();
|
|
660
|
+
* const audioIssues = [];
|
|
661
|
+
*
|
|
662
|
+
* stats.trackDetails.forEach(([trackId, data]) => {
|
|
663
|
+
* // Check if track has been muted for too long
|
|
664
|
+
* if (data.muted) {
|
|
665
|
+
* audioIssues.push(`Track ${trackId} is muted`);
|
|
666
|
+
* }
|
|
667
|
+
*
|
|
668
|
+
* // Check if track is old (potential stale connection)
|
|
669
|
+
* const trackAge = currentTime - data.subscriptionTime;
|
|
670
|
+
* if (trackAge > 300000) { // 5 minutes
|
|
671
|
+
* audioIssues.push(`Track ${trackId} is ${Math.round(trackAge/60000)}m old`);
|
|
672
|
+
* }
|
|
673
|
+
* });
|
|
674
|
+
*
|
|
675
|
+
* // Report issues
|
|
676
|
+
* if (audioIssues.length > 0) {
|
|
677
|
+
* console.warn('Audio issues detected:', audioIssues);
|
|
678
|
+
* reportAudioQualityIssues(audioIssues);
|
|
679
|
+
* }
|
|
680
|
+
* ```
|
|
681
|
+
*/
|
|
682
|
+
getTrackStats(): TrackStatsResult;
|
|
683
|
+
/**
|
|
684
|
+
* Performs comprehensive cleanup of all audio resources and DOM elements
|
|
685
|
+
*
|
|
686
|
+
* Safely removes all HTML audio elements from the DOM, clears internal data
|
|
687
|
+
* structures, and resets the audio manager to its initial state. This method
|
|
688
|
+
* is essential for preventing memory leaks and ensuring proper resource
|
|
689
|
+
* management when disconnecting or reinitializing the audio system.
|
|
690
|
+
*
|
|
691
|
+
* Cleanup operations performed:
|
|
692
|
+
* - Removes all HTML audio elements from DOM
|
|
693
|
+
* - Clears the audioElements Set
|
|
694
|
+
* - Resets all track statistics and metadata
|
|
695
|
+
* - Gracefully handles DOM manipulation errors
|
|
696
|
+
*
|
|
697
|
+
* @example Manual Cleanup
|
|
698
|
+
* ```typescript
|
|
699
|
+
* // Explicit cleanup when component unmounts
|
|
700
|
+
* useEffect(() => {
|
|
701
|
+
* return () => {
|
|
702
|
+
* audioManager.cleanup();
|
|
703
|
+
* console.log('Audio manager cleaned up');
|
|
704
|
+
* };
|
|
705
|
+
* }, []);
|
|
706
|
+
*
|
|
707
|
+
* // Cleanup before reinitializing with new configuration
|
|
708
|
+
* const reinitializeAudio = () => {
|
|
709
|
+
* audioManager.cleanup();
|
|
710
|
+
* audioManager = new LiveKitAudioManager();
|
|
711
|
+
* setupAudioEventListeners();
|
|
712
|
+
* };
|
|
713
|
+
* ```
|
|
714
|
+
*
|
|
715
|
+
* @example Cleanup Verification
|
|
716
|
+
* ```typescript
|
|
717
|
+
* // Verify cleanup completed successfully
|
|
718
|
+
* audioManager.cleanup();
|
|
719
|
+
*
|
|
720
|
+
* const stats = audioManager.getTrackStats();
|
|
721
|
+
* console.assert(stats.activeTracks === 0, 'All tracks should be cleaned up');
|
|
722
|
+
* console.assert(stats.audioElements === 0, 'All audio elements should be removed');
|
|
723
|
+
* console.assert(audioManager.audioElements.size === 0, 'Audio elements set should be empty');
|
|
724
|
+
* ```
|
|
725
|
+
*
|
|
726
|
+
* @example Error-Safe Cleanup
|
|
727
|
+
* ```typescript
|
|
728
|
+
* // Cleanup is safe to call multiple times
|
|
729
|
+
* audioManager.cleanup(); // First cleanup
|
|
730
|
+
* audioManager.cleanup(); // Safe to call again
|
|
731
|
+
*
|
|
732
|
+
* // Cleanup is safe even if DOM elements are already removed
|
|
733
|
+
* document.body.innerHTML = ''; // Clear all DOM
|
|
734
|
+
* audioManager.cleanup(); // Still safe to call
|
|
735
|
+
* ```
|
|
736
|
+
*/
|
|
737
|
+
cleanup(): void;
|
|
738
|
+
}
|