@hamsa-ai/voice-agents-sdk 0.4.0-beta.1 → 0.4.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/types/main.d.ts CHANGED
@@ -1,92 +1,715 @@
1
- export default HamsaVoiceAgent;
2
- export class HamsaVoiceAgent extends EventEmitter<[never]> {
1
+ import { EventEmitter } from 'events';
2
+ import LiveKitManager, { type AudioLevelsResult, type CallAnalyticsResult, type ConnectionStatsResult, type ParticipantData, type PerformanceMetricsResult, type TrackStatsResult } from './classes/livekit-manager';
3
+ import ScreenWakeLock from './classes/screen-wake-lock';
4
+ /**
5
+ * Configuration options for the HamsaVoiceAgent constructor
6
+ * Allows customization of API endpoints and other global settings
7
+ */
8
+ type HamsaVoiceAgentConfig = {
9
+ /** Base URL for the Hamsa API. Defaults to 'https://api.tryhamsa.com' */
10
+ API_URL?: string;
11
+ };
12
+ /**
13
+ * Configuration options for starting a voice agent conversation
14
+ *
15
+ * Defines the agent to use, conversation parameters, voice capabilities,
16
+ * and client-side tools that will be available during the conversation.
17
+ */
18
+ type ConnectionDelays = {
19
+ /** Delay in milliseconds for Android devices */
20
+ android?: number;
21
+ /** Delay in milliseconds for iOS devices */
22
+ ios?: number;
23
+ /** Default delay in milliseconds for other devices */
24
+ default?: number;
25
+ };
26
+ type StartOptions = {
27
+ /** Unique identifier of the voice agent to start (from Hamsa dashboard) */
28
+ agentId: string;
3
29
  /**
4
- * Creates a new HamsaVoiceAgent instance.
5
- *
6
- * @param {string} apiKey - API key.
7
- * @param {object} [config] - Optional config.
8
- * @param {string} [config.API_URL="https://api.tryhamsa.com"] - API URL.
30
+ * Optional parameters to pass to the agent for conversation customization
31
+ * These can be referenced in agent prompts using {{parameter_name}} syntax
32
+ * @example { userName: "John", orderNumber: "12345", userTier: "premium" }
9
33
  */
10
- constructor(apiKey: string, { API_URL, }?: {
11
- API_URL?: string;
12
- });
13
- liveKitManager: LiveKitManager;
34
+ params?: Record<string, unknown>;
35
+ /** Whether to enable voice interactions. If false, agent runs in text-only mode */
36
+ voiceEnablement?: boolean;
37
+ /** Array of client-side tools that the agent can call during conversations */
38
+ tools?: Tool[];
39
+ /** Optional user identifier for tracking and analytics */
40
+ userId?: string;
41
+ /** Force headphones usage on iOS devices when available */
42
+ preferHeadphonesForIosDevices?: boolean;
43
+ /** Platform-specific connection delays to prevent audio cutoff */
44
+ connectionDelay?: ConnectionDelays;
45
+ /** Disable wake lock to allow device sleep during conversation */
46
+ disableWakeLock?: boolean;
47
+ };
48
+ /**
49
+ * Definition of a client-side tool that can be called by the voice agent
50
+ *
51
+ * Tools allow agents to execute custom functions in the client environment,
52
+ * such as retrieving user data, making API calls, or performing calculations.
53
+ */
54
+ type Tool = {
55
+ /** Unique name for the function (used by agent to identify the tool) */
56
+ function_name: string;
57
+ /** Clear description of what the function does (helps agent decide when to use it) */
58
+ description: string;
59
+ /** Array of parameters the function accepts */
60
+ parameters?: ToolParameter[];
61
+ /** Array of parameter names that are required for the function */
62
+ required?: string[];
63
+ /** Internal function mapping (used for tool execution) */
64
+ func_map?: Record<string, unknown>;
65
+ };
66
+ /**
67
+ * Definition of a parameter for a client-side tool
68
+ * Describes the input that the function expects from the agent
69
+ */
70
+ type ToolParameter = {
71
+ /** Name of the parameter */
72
+ name: string;
73
+ /** Data type of the parameter (e.g., 'string', 'number', 'boolean') */
74
+ type: string;
75
+ /** Description of what the parameter represents */
76
+ description: string;
77
+ };
78
+ /**
79
+ * Response format for job details from the Hamsa API
80
+ *
81
+ * Returned by getJobDetails() method to check conversation completion
82
+ * status and retrieve additional job metadata.
83
+ */
84
+ type JobDetails = {
85
+ /** Current status of the job (e.g., 'COMPLETED', 'IN_PROGRESS', 'FAILED') */
86
+ status: string;
87
+ /** Additional job properties that may be returned by the API */
88
+ [key: string]: unknown;
89
+ };
90
+ /**
91
+ * HamsaVoiceAgent - Main SDK class for voice agent integration
92
+ *
93
+ * This class provides the primary interface for integrating Hamsa voice agents
94
+ * into web applications. It handles authentication, connection management,
95
+ * conversation lifecycle, analytics, and client-side tool execution.
96
+ *
97
+ * Key features:
98
+ * - Real-time voice communication with AI agents
99
+ * - Comprehensive analytics and quality monitoring
100
+ * - Client-side tool integration for extended functionality
101
+ * - Automatic screen wake lock management during calls
102
+ * - Event-driven architecture for reactive applications
103
+ * - Built-in error handling and reconnection logic
104
+ *
105
+ * @example Basic Usage
106
+ * ```typescript
107
+ * import { HamsaVoiceAgent } from '@hamsa-ai/voice-agents-sdk';
108
+ *
109
+ * const agent = new HamsaVoiceAgent('your_api_key');
110
+ *
111
+ * // Listen for events
112
+ * agent.on('callStarted', () => console.log('Call started'));
113
+ * agent.on('answerReceived', (text) => console.log('Agent said:', text));
114
+ * agent.on('transcriptionReceived', (text) => console.log('User said:', text));
115
+ *
116
+ * // Start conversation
117
+ * await agent.start({
118
+ * agentId: 'your_agent_id',
119
+ * voiceEnablement: true,
120
+ * params: { userName: 'John', context: 'support_inquiry' }
121
+ * });
122
+ * ```
123
+ *
124
+ * @example With Client-side Tools
125
+ * ```typescript
126
+ * const weatherTool = {
127
+ * function_name: 'getCurrentWeather',
128
+ * description: 'Gets current weather for a location',
129
+ * parameters: [
130
+ * { name: 'location', type: 'string', description: 'City name' }
131
+ * ],
132
+ * required: ['location'],
133
+ * fn: async (location) => {
134
+ * const response = await fetch(`/api/weather?city=${location}`);
135
+ * return response.json();
136
+ * }
137
+ * };
138
+ *
139
+ * await agent.start({
140
+ * agentId: 'weather_agent_id',
141
+ * tools: [weatherTool],
142
+ * voiceEnablement: true
143
+ * });
144
+ * ```
145
+ *
146
+ * @example Analytics Monitoring
147
+ * ```typescript
148
+ * // Real-time quality monitoring
149
+ * agent.on('connectionQualityChanged', ({ quality, metrics }) => {
150
+ * if (quality === 'poor') {
151
+ * showNetworkWarning();
152
+ * }
153
+ * });
154
+ *
155
+ * // Periodic analytics updates
156
+ * agent.on('analyticsUpdated', (analytics) => {
157
+ * updateDashboard({
158
+ * duration: analytics.performanceMetrics.callDuration,
159
+ * quality: analytics.connectionStats.quality,
160
+ * latency: analytics.connectionStats.latency
161
+ * });
162
+ * });
163
+ *
164
+ * // Get analytics snapshot anytime
165
+ * const analytics = agent.getCallAnalytics();
166
+ * ```
167
+ */
168
+ declare class HamsaVoiceAgent extends EventEmitter {
169
+ #private;
170
+ /** Default fallback output volume when not connected */
171
+ private static readonly DEFAULT_OUTPUT_VOLUME;
172
+ /** Default fallback input volume when not connected */
173
+ private static readonly DEFAULT_INPUT_VOLUME;
174
+ /** Internal LiveKit manager instance for WebRTC communication */
175
+ liveKitManager: LiveKitManager | null;
176
+ /** Hamsa API key for authentication */
14
177
  apiKey: string;
178
+ /** Base URL for Hamsa API endpoints */
15
179
  API_URL: string;
16
- jobId: any;
180
+ /** Job ID for tracking conversation completion status */
181
+ jobId: string | null;
182
+ /** Screen wake lock manager to prevent device sleep during calls */
17
183
  wakeLockManager: ScreenWakeLock;
18
184
  /**
19
- * Sets the volume for the audio playback.
20
- * @param {number} volume - Volume level between 0.0 and 1.0.
185
+ * Creates a new HamsaVoiceAgent instance
186
+ *
187
+ * @param apiKey - Your Hamsa API key (get from https://dashboard.tryhamsa.com)
188
+ * @param config - Optional configuration settings
189
+ * @param config.API_URL - Custom API endpoint URL (defaults to https://api.tryhamsa.com)
190
+ *
191
+ * @example
192
+ * ```typescript
193
+ * // Using default API endpoint
194
+ * const agent = new HamsaVoiceAgent('hamsa_api_key_here');
195
+ *
196
+ * // Using custom API endpoint
197
+ * const agent = new HamsaVoiceAgent('hamsa_api_key_here', {
198
+ * API_URL: 'https://custom-api.example.com'
199
+ * });
200
+ * ```
201
+ *
202
+ * @throws {Error} If apiKey is not provided or invalid
203
+ */
204
+ constructor(apiKey: string, { API_URL }?: HamsaVoiceAgentConfig);
205
+ /**
206
+ * Adjusts the volume level for voice agent audio playback
207
+ *
208
+ * Controls the volume of the voice agent's speech output. This affects
209
+ * all audio playback from the agent but does not change the user's
210
+ * microphone input level.
211
+ *
212
+ * @param volume - Volume level between 0.0 (muted) and 1.0 (full volume)
213
+ *
214
+ * @example
215
+ * ```typescript
216
+ * // Set to half volume
217
+ * agent.setVolume(0.5);
218
+ *
219
+ * // Mute agent completely
220
+ * agent.setVolume(0);
221
+ *
222
+ * // Full volume
223
+ * agent.setVolume(1.0);
224
+ *
225
+ * // Can be called during active conversation
226
+ * agent.on('callStarted', () => {
227
+ * agent.setVolume(0.8); // Slightly quieter
228
+ * });
229
+ * ```
21
230
  */
22
231
  setVolume(volume: number): void;
23
232
  /**
24
- * Starts a new voice agent call.
25
- * @param {object} options - Configuration options for the call.
26
- * @param {string} options.agentId - The ID of the voice agent to start.
27
- * @param {object} [options.params={}] - Optional parameters to pass to the agent.
28
- * @param {boolean} [options.voiceEnablement=false] - Whether to enable voice interaction.
29
- * @param {Array<object>} [options.tools=[]] - Array of client-side tools to register.
233
+ * Gets the current output volume level
234
+ *
235
+ * Returns the current volume setting for voice agent audio playback.
236
+ * This represents the playback volume for all voice agent audio streams.
237
+ *
238
+ * @returns Current output volume level (0.0 = muted, 1.0 = full volume)
239
+ *
240
+ * @example
241
+ * ```typescript
242
+ * const currentVolume = agent.getOutputVolume();
243
+ * console.log(`Volume: ${Math.round(currentVolume * 100)}%`);
244
+ * ```
245
+ */
246
+ getOutputVolume(): number;
247
+ /**
248
+ * Gets the current input volume level from the user's microphone
249
+ *
250
+ * Returns the current microphone input level for voice activity detection.
251
+ * Can be used to create visual feedback for user speaking indicators.
252
+ *
253
+ * @returns Current input volume level (0.0 = no input, 1.0 = maximum input)
254
+ *
255
+ * @example
256
+ * ```typescript
257
+ * // Create voice activity indicator
258
+ * setInterval(() => {
259
+ * const inputLevel = agent.getInputVolume();
260
+ * updateMicrophoneIndicator(inputLevel);
261
+ * }, 100);
262
+ * ```
263
+ */
264
+ getInputVolume(): number;
265
+ /**
266
+ * Mutes or unmutes the user's microphone
267
+ *
268
+ * Controls the user's microphone input to the voice agent conversation.
269
+ * When muted, the user's voice will not be transmitted to the agent.
270
+ *
271
+ * @param muted - True to mute microphone, false to unmute
272
+ *
273
+ * @example
274
+ * ```typescript
275
+ * // Mute microphone
276
+ * agent.setMicMuted(true);
277
+ *
278
+ * // Toggle microphone
279
+ * const isMuted = agent.isMicMuted();
280
+ * agent.setMicMuted(!isMuted);
281
+ * ```
282
+ */
283
+ setMicMuted(muted: boolean): void;
284
+ /**
285
+ * Checks if the user's microphone is currently muted
286
+ *
287
+ * @returns True if microphone is muted, false if unmuted
288
+ *
289
+ * @example
290
+ * ```typescript
291
+ * if (agent.isMicMuted()) {
292
+ * showUnmutePrompt();
293
+ * }
294
+ * ```
295
+ */
296
+ isMicMuted(): boolean;
297
+ /**
298
+ * @internal
299
+ * Notifies the agent about user activity
300
+ *
301
+ * Prevents the agent from interrupting when the user is actively interacting
302
+ * with the interface. The agent will not attempt to speak for at least 2 seconds
303
+ * after user activity is detected.
304
+ *
305
+ * @example
306
+ * ```typescript
307
+ * // Prevent interruptions while user is typing
308
+ * textInput.addEventListener('input', () => {
309
+ * agent.sendUserActivity();
310
+ * });
311
+ *
312
+ * // Prevent interruptions during UI interactions
313
+ * document.addEventListener('click', () => {
314
+ * agent.sendUserActivity();
315
+ * });
316
+ * ```
317
+ */
318
+ sendUserActivity(): void;
319
+ /**
320
+ * @internal
321
+ * Sends a contextual update to the agent
322
+ *
323
+ * Informs the agent about user actions or state changes that are not direct
324
+ * conversation messages but may influence the agent's responses. Unlike regular
325
+ * messages, contextual updates don't trigger the agent to take its turn in
326
+ * the conversation.
327
+ *
328
+ * @param context - Contextual information to send to the agent
329
+ *
330
+ * @example
331
+ * ```typescript
332
+ * // Inform agent about navigation
333
+ * agent.sendContextualUpdate("User navigated to checkout page");
334
+ *
335
+ * // Inform about app state changes
336
+ * agent.sendContextualUpdate("User's cart total: $127.50");
337
+ *
338
+ * // Inform about user preferences
339
+ * agent.sendContextualUpdate("User selected dark mode theme");
340
+ * ```
341
+ */
342
+ sendContextualUpdate(context: string): void;
343
+ /**
344
+ * Gets frequency data from the user's microphone input
345
+ *
346
+ * Returns frequency domain data for audio visualization and analysis.
347
+ * Can be used to create voice activity indicators, audio visualizers,
348
+ * or advanced voice processing features.
349
+ *
350
+ * @returns Uint8Array containing frequency data (0-255 per frequency bin)
351
+ *
352
+ * @example
353
+ * ```typescript
354
+ * // Create simple audio visualizer
355
+ * function updateVisualizer() {
356
+ * const frequencyData = agent.getInputByteFrequencyData();
357
+ * const average = frequencyData.reduce((a, b) => a + b) / frequencyData.length;
358
+ * const percentage = Math.round((average / 255) * 100);
359
+ * document.getElementById('micLevel').style.width = `${percentage}%`;
360
+ * }
361
+ * setInterval(updateVisualizer, 50);
362
+ * ```
363
+ */
364
+ getInputByteFrequencyData(): Uint8Array;
365
+ /**
366
+ * Gets frequency data from the agent's audio output
367
+ *
368
+ * Returns frequency domain data from the agent's voice for analysis
369
+ * and visualization. Useful for creating voice characteristic displays
370
+ * or audio processing features.
371
+ *
372
+ * @returns Uint8Array containing frequency data (0-255 per frequency bin)
373
+ *
374
+ * @example
375
+ * ```typescript
376
+ * // Analyze agent voice characteristics
377
+ * agent.on('speaking', () => {
378
+ * const interval = setInterval(() => {
379
+ * const frequencyData = agent.getOutputByteFrequencyData();
380
+ * const dominantFreq = findDominantFrequency(frequencyData);
381
+ * updateVoiceAnalysis(dominantFreq);
382
+ * }, 100);
383
+ *
384
+ * agent.once('listening', () => clearInterval(interval));
385
+ * });
386
+ * ```
387
+ */
388
+ getOutputByteFrequencyData(): Uint8Array;
389
+ /**
390
+ * Initiates a new voice agent conversation
391
+ *
392
+ * This is the primary method for starting interactions with a voice agent.
393
+ * It handles authentication, connection establishment, tool registration,
394
+ * and event forwarding. The method is asynchronous and will emit events
395
+ * to indicate connection status and conversation progress.
396
+ *
397
+ * @param options - Configuration options for the conversation
398
+ * @param options.agentId - Unique identifier of the voice agent (from Hamsa dashboard)
399
+ * @param options.params - Parameters to customize the conversation context
400
+ * @param options.voiceEnablement - Enable voice interactions (default: false for text-only)
401
+ * @param options.tools - Client-side tools available to the agent
402
+ *
403
+ * @throws {Error} Authentication failures, network errors, or invalid configuration
404
+ *
405
+ * @example Basic voice conversation
406
+ * ```typescript
407
+ * try {
408
+ * await agent.start({
409
+ * agentId: 'agent_12345',
410
+ * voiceEnablement: true,
411
+ * params: {
412
+ * userName: 'Alice',
413
+ * userTier: 'premium',
414
+ * sessionContext: 'product_support'
415
+ * }
416
+ * });
417
+ * console.log('Voice agent conversation started');
418
+ * } catch (error) {
419
+ * console.error('Failed to start conversation:', error);
420
+ * }
421
+ * ```
422
+ *
423
+ * @example With custom tools
424
+ * ```typescript
425
+ * const customerDataTool = {
426
+ * function_name: 'getCustomerData',
427
+ * description: 'Retrieves customer account information',
428
+ * parameters: [
429
+ * { name: 'customerId', type: 'string', description: 'Customer ID' }
430
+ * ],
431
+ * required: ['customerId'],
432
+ * fn: async (customerId) => {
433
+ * return await customerAPI.getProfile(customerId);
434
+ * }
435
+ * };
436
+ *
437
+ * await agent.start({
438
+ * agentId: 'support_agent',
439
+ * voiceEnablement: true,
440
+ * tools: [customerDataTool],
441
+ * params: { department: 'billing' }
442
+ * });
443
+ * ```
444
+ *
445
+ * @example Event handling
446
+ * ```typescript
447
+ * // Set up event listeners before starting
448
+ * agent.on('callStarted', () => {
449
+ * console.log('Conversation began');
450
+ * startRecordingMetrics();
451
+ * });
452
+ *
453
+ * agent.on('error', (error) => {
454
+ * console.error('Conversation error:', error);
455
+ * handleConversationError(error);
456
+ * });
457
+ *
458
+ * await agent.start({ agentId: 'my_agent', voiceEnablement: true });
459
+ * ```
30
460
  */
31
- start({ agentId, params, voiceEnablement, tools, }: {
32
- agentId: string;
33
- params?: object;
34
- voiceEnablement?: boolean;
35
- tools?: Array<object>;
36
- }): Promise<void>;
461
+ start({ agentId, params, voiceEnablement, tools, userId: _userId, preferHeadphonesForIosDevices: _preferHeadphonesForIosDevices, connectionDelay: _connectionDelay, disableWakeLock: _disableWakeLock, }: StartOptions): Promise<void>;
37
462
  /**
38
- * Ends the current voice agent call.
463
+ * Terminates the current voice agent conversation
464
+ *
465
+ * Safely ends the conversation, disconnects from the WebRTC session,
466
+ * releases system resources (including screen wake lock), and performs
467
+ * cleanup. This method should be called when the conversation is complete.
468
+ *
469
+ * @example
470
+ * ```typescript
471
+ * // End conversation when user clicks hang up
472
+ * hangupButton.addEventListener('click', () => {
473
+ * agent.end();
474
+ * });
475
+ *
476
+ * // End conversation after timeout
477
+ * setTimeout(() => {
478
+ * agent.end();
479
+ * console.log('Conversation ended due to timeout');
480
+ * }, 300000); // 5 minutes
481
+ *
482
+ * // Listen for end event
483
+ * agent.on('callEnded', () => {
484
+ * console.log('Conversation terminated');
485
+ * updateUI('disconnected');
486
+ * saveConversationSummary();
487
+ * });
488
+ * ```
39
489
  */
40
490
  end(): void;
41
491
  /**
42
- * Pauses the current voice agent call.
492
+ * Temporarily pauses the voice agent conversation
493
+ *
494
+ * Pauses audio transmission and reception while maintaining the underlying
495
+ * connection. The conversation can be resumed later using resume(). This
496
+ * is useful for temporary interruptions without ending the entire session.
497
+ *
498
+ * @example
499
+ * ```typescript
500
+ * // Pause when user needs to take another call
501
+ * pauseButton.addEventListener('click', () => {
502
+ * agent.pause();
503
+ * console.log('Conversation paused');
504
+ * });
505
+ *
506
+ * // Auto-pause after period of silence
507
+ * let silenceTimeout;
508
+ * agent.on('listening', () => {
509
+ * silenceTimeout = setTimeout(() => {
510
+ * agent.pause();
511
+ * showResumePrompt();
512
+ * }, 60000); // 1 minute of silence
513
+ * });
514
+ *
515
+ * agent.on('speaking', () => {
516
+ * clearTimeout(silenceTimeout);
517
+ * });
518
+ *
519
+ * // Listen for pause event
520
+ * agent.on('callPaused', () => {
521
+ * showPausedIndicator();
522
+ * disableMicrophone();
523
+ * });
524
+ * ```
43
525
  */
44
526
  pause(): void;
45
527
  /**
46
- * Resumes the paused voice agent call.
528
+ * Resumes a paused voice agent conversation
529
+ *
530
+ * Restores audio transmission and reception, continuing the conversation
531
+ * from where it was paused. Re-acquires screen wake lock to prevent
532
+ * device sleep during active conversation.
533
+ *
534
+ * @example
535
+ * ```typescript
536
+ * // Resume when user is ready to continue
537
+ * resumeButton.addEventListener('click', () => {
538
+ * agent.resume();
539
+ * console.log('Conversation resumed');
540
+ * });
541
+ *
542
+ * // Resume automatically after user interaction
543
+ * document.addEventListener('click', () => {
544
+ * if (agent.isPaused) {
545
+ * agent.resume();
546
+ * }
547
+ * });
548
+ *
549
+ * // Listen for resume event
550
+ * agent.on('callResumed', () => {
551
+ * hidePausedIndicator();
552
+ * enableMicrophone();
553
+ * showActiveIndicator();
554
+ * });
555
+ * ```
47
556
  */
48
557
  resume(): void;
49
558
  /**
50
559
  * Retrieves job details from the Hamsa API using the stored jobId.
51
560
  * Implements retry logic with exponential backoff.
52
- * @param {number} [maxRetries=5] - Maximum number of retry attempts.
53
- * @param {number} [initialRetryInterval=1000] - Initial delay between retries in milliseconds.
54
- * @param {number} [backoffFactor=2] - Factor by which the retry interval increases each attempt.
55
- * @returns {Promise<Object>} Job details object.
561
+ * @param maxRetries - Maximum number of retry attempts.
562
+ * @param initialRetryInterval - Initial delay between retries in milliseconds.
563
+ * @param backoffFactor - Factor by which the retry interval increases each attempt.
564
+ * @returns Job details object.
56
565
  */
57
- getJobDetails(maxRetries?: number, initialRetryInterval?: number, backoffFactor?: number): Promise<any>;
566
+ getJobDetails(maxRetries?: number, initialRetryInterval?: number, backoffFactor?: number): Promise<JobDetails>;
58
567
  /**
59
- * Gets current connection statistics.
60
- * @returns {Object} Connection statistics including latency, quality, attempts
568
+ * Retrieves current network connection statistics and quality metrics
569
+ *
570
+ * @returns Connection statistics object or null if not connected
571
+ *
572
+ * @example
573
+ * ```typescript
574
+ * const stats = agent.getConnectionStats();
575
+ * if (stats) {
576
+ * console.log(`Latency: ${stats.latency}ms`);
577
+ * console.log(`Quality: ${stats.quality}`);
578
+ * console.log(`Packet Loss: ${stats.packetLoss}%`);
579
+ *
580
+ * // Show network warning for poor quality
581
+ * if (stats.quality === 'poor') {
582
+ * showNetworkWarning(stats);
583
+ * }
584
+ * }
585
+ * ```
61
586
  */
62
- getConnectionStats(): any;
587
+ getConnectionStats(): ConnectionStatsResult | null;
63
588
  /**
64
- * Gets current audio levels and metrics.
65
- * @returns {Object} Audio metrics including levels, volume, speaking time
589
+ * Retrieves current audio levels and quality metrics for both user and agent
590
+ *
591
+ * @returns Audio metrics object or null if not connected
592
+ *
593
+ * @example
594
+ * ```typescript
595
+ * const audio = agent.getAudioLevels();
596
+ * if (audio) {
597
+ * // Update UI audio level meters
598
+ * updateAudioMeter('user', audio.userAudioLevel);
599
+ * updateAudioMeter('agent', audio.agentAudioLevel);
600
+ *
601
+ * // Display speaking time statistics
602
+ * const userMinutes = Math.floor(audio.userSpeakingTime / 60000);
603
+ * const agentMinutes = Math.floor(audio.agentSpeakingTime / 60000);
604
+ * console.log(`User spoke for ${userMinutes} minutes`);
605
+ * console.log(`Agent spoke for ${agentMinutes} minutes`);
606
+ * }
607
+ * ```
66
608
  */
67
- getAudioLevels(): any;
609
+ getAudioLevels(): AudioLevelsResult | null;
68
610
  /**
69
- * Gets current performance metrics.
70
- * @returns {Object} Performance metrics including response times, latency
611
+ * Retrieves current performance metrics including response times and call duration
612
+ *
613
+ * @returns Performance metrics object or null if not connected
614
+ *
615
+ * @example
616
+ * ```typescript
617
+ * const perf = agent.getPerformanceMetrics();
618
+ * if (perf) {
619
+ * // Monitor response time for quality assurance
620
+ * if (perf.responseTime > 3000) {
621
+ * console.warn('High response time:', perf.responseTime + 'ms');
622
+ * }
623
+ *
624
+ * // Display call duration
625
+ * const minutes = Math.floor(perf.callDuration / 60000);
626
+ * const seconds = Math.floor((perf.callDuration % 60000) / 1000);
627
+ * updateTimer(`${minutes}:${seconds.toString().padStart(2, '0')}`);
628
+ * }
629
+ * ```
71
630
  */
72
- getPerformanceMetrics(): any;
631
+ getPerformanceMetrics(): PerformanceMetricsResult | null;
73
632
  /**
74
- * Gets current participant information.
75
- * @returns {Array} Array of participant data
633
+ * Retrieves information about all participants in the conversation
634
+ *
635
+ * @returns Array of participant data objects (empty array if not connected)
636
+ *
637
+ * @example
638
+ * ```typescript
639
+ * const participants = agent.getParticipants();
640
+ *
641
+ * participants.forEach(participant => {
642
+ * console.log(`Participant: ${participant.identity}`);
643
+ * console.log(`Connected: ${new Date(participant.connectionTime)}`);
644
+ *
645
+ * // Display participant info in UI
646
+ * if (participant.identity.includes('agent')) {
647
+ * showAgentStatus('connected', participant.metadata);
648
+ * }
649
+ * });
650
+ *
651
+ * // Check if agent is present
652
+ * const hasAgent = participants.some(p => p.identity.includes('agent'));
653
+ * ```
76
654
  */
77
- getParticipants(): any[];
655
+ getParticipants(): ParticipantData[];
78
656
  /**
79
- * Gets current track statistics.
80
- * @returns {Object} Track statistics and details
657
+ * Retrieves current audio track statistics and stream information
658
+ *
659
+ * @returns Track statistics object or null if not connected
660
+ *
661
+ * @example
662
+ * ```typescript
663
+ * const trackStats = agent.getTrackStats();
664
+ * if (trackStats) {
665
+ * console.log(`Active tracks: ${trackStats.activeTracks}/${trackStats.totalTracks}`);
666
+ * console.log(`Audio elements: ${trackStats.audioElements}`);
667
+ *
668
+ * // Check track health
669
+ * if (trackStats.activeTracks === 0) {
670
+ * console.warn('No active audio tracks');
671
+ * showAudioWarning();
672
+ * }
673
+ * }
674
+ * ```
81
675
  */
82
- getTrackStats(): any;
676
+ getTrackStats(): TrackStatsResult | null;
83
677
  /**
84
- * Gets comprehensive call analytics.
85
- * @returns {Object} Complete analytics data including all metrics
678
+ * Retrieves comprehensive analytics combining all metrics into a single snapshot
679
+ *
680
+ * This is the primary method for accessing complete conversation analytics,
681
+ * combining connection statistics, audio metrics, performance data, participant
682
+ * information, and track statistics into a unified result.
683
+ *
684
+ * @returns Complete analytics object or null if not connected
685
+ *
686
+ * @example
687
+ * ```typescript
688
+ * const analytics = agent.getCallAnalytics();
689
+ * if (analytics) {
690
+ * // Log comprehensive conversation summary
691
+ * console.log('=== Conversation Analytics ===');
692
+ * console.log(`Duration: ${analytics.performanceMetrics.callDuration}ms`);
693
+ * console.log(`Quality: ${analytics.connectionStats.quality}`);
694
+ * console.log(`Latency: ${analytics.connectionStats.latency}ms`);
695
+ * console.log(`Participants: ${analytics.participants.length}`);
696
+ *
697
+ * // Send to analytics service
698
+ * analyticsService.recordConversation({
699
+ * sessionId: generateSessionId(),
700
+ * agentId: currentAgentId,
701
+ * timestamp: Date.now(),
702
+ * metrics: analytics
703
+ * });
704
+ *
705
+ * // Check for quality issues
706
+ * if (analytics.connectionStats.packetLoss > 5) {
707
+ * reportNetworkIssue(analytics);
708
+ * }
709
+ * }
710
+ * ```
86
711
  */
87
- getCallAnalytics(): any;
88
- #private;
712
+ getCallAnalytics(): CallAnalyticsResult | null;
89
713
  }
90
- import { EventEmitter } from "events";
91
- import LiveKitManager from "./classes/livekit_manager";
92
- import ScreenWakeLock from "./classes/screen_wake_lock";
714
+ export { HamsaVoiceAgent };
715
+ export default HamsaVoiceAgent;