@volley/recognition-client-sdk 0.1.296 → 0.1.297

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2302 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Provider types and enums for recognition services
5
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
6
+ */
7
+ /**
8
+ * Supported speech recognition providers
9
+ */
10
+ declare enum RecognitionProvider {
11
+ ASSEMBLYAI = "assemblyai",
12
+ DEEPGRAM = "deepgram",
13
+ GOOGLE = "google",
14
+ GEMINI_BATCH = "gemini-batch",
15
+ OPENAI_BATCH = "openai-batch"
16
+ }
17
+ /**
18
+ * ASR API type - distinguishes between streaming and file-based transcription APIs
19
+ * - STREAMING: Real-time streaming APIs (Deepgram, AssemblyAI, Google)
20
+ * - FILE_BASED: File upload/batch APIs (OpenAI Batch, Gemini Batch)
21
+ */
22
+ declare enum ASRApiType {
23
+ STREAMING = "streaming",
24
+ FILE_BASED = "file-based"
25
+ }
26
+ /**
27
+ * Deepgram model names
28
+ */
29
+ declare enum DeepgramModel {
30
+ NOVA_2 = "nova-2",
31
+ NOVA_3 = "nova-3",
32
+ FLUX_GENERAL_EN = "flux-general-en"
33
+ }
34
+ /**
35
+ * Google Cloud Speech models
36
+ * @see https://cloud.google.com/speech-to-text/docs/transcription-model
37
+ */
38
+ declare enum GoogleModel {
39
+ LATEST_LONG = "latest_long",
40
+ LATEST_SHORT = "latest_short",
41
+ TELEPHONY = "telephony",
42
+ TELEPHONY_SHORT = "telephony_short",
43
+ MEDICAL_DICTATION = "medical_dictation",
44
+ MEDICAL_CONVERSATION = "medical_conversation",
45
+ DEFAULT = "default",
46
+ COMMAND_AND_SEARCH = "command_and_search",
47
+ PHONE_CALL = "phone_call",
48
+ VIDEO = "video"
49
+ }
50
+ /**
51
+ * Type alias for any model from any provider
52
+ */
53
+ type RecognitionModel = DeepgramModel | GoogleModel | string;
54
+
55
+ /**
56
+ * Audio encoding types
57
+ */
58
+ declare enum AudioEncoding {
59
+ ENCODING_UNSPECIFIED = 0,
60
+ LINEAR16 = 1,
61
+ OGG_OPUS = 2,
62
+ FLAC = 3,
63
+ MULAW = 4,
64
+ ALAW = 5
65
+ }
66
+ declare namespace AudioEncoding {
67
+ /**
68
+ * Convert numeric ID to AudioEncoding enum
69
+ * @param id - Numeric encoding identifier (0-5)
70
+ * @returns AudioEncoding enum value or undefined if invalid
71
+ */
72
+ function fromId(id: number): AudioEncoding | undefined;
73
+ /**
74
+ * Convert string name to AudioEncoding enum
75
+ * @param nameStr - String name like "linear16", "LINEAR16", "ogg_opus", "OGG_OPUS", etc. (case insensitive)
76
+ * @returns AudioEncoding enum value or undefined if invalid
77
+ */
78
+ function fromName(nameStr: string): AudioEncoding | undefined;
79
+ /**
80
+ * Convert AudioEncoding enum to numeric ID
81
+ * @param encoding - AudioEncoding enum value
82
+ * @returns Numeric ID (0-5)
83
+ */
84
+ function toId(encoding: AudioEncoding): number;
85
+ /**
86
+ * Convert AudioEncoding enum to string name
87
+ * @param encoding - AudioEncoding enum value
88
+ * @returns String name like "LINEAR16", "MULAW", etc.
89
+ */
90
+ function toName(encoding: AudioEncoding): string;
91
+ /**
92
+ * Check if a numeric ID is a valid encoding
93
+ * @param id - Numeric identifier to validate
94
+ * @returns true if valid encoding ID
95
+ */
96
+ function isIdValid(id: number): boolean;
97
+ /**
98
+ * Check if a string name is a valid encoding
99
+ * @param nameStr - String name to validate
100
+ * @returns true if valid encoding name
101
+ */
102
+ function isNameValid(nameStr: string): boolean;
103
+ }
104
+ /**
105
+ * Common sample rates (in Hz)
106
+ */
107
+ declare enum SampleRate {
108
+ RATE_8000 = 8000,
109
+ RATE_16000 = 16000,
110
+ RATE_22050 = 22050,
111
+ RATE_24000 = 24000,
112
+ RATE_32000 = 32000,
113
+ RATE_44100 = 44100,
114
+ RATE_48000 = 48000
115
+ }
116
+ declare namespace SampleRate {
117
+ /**
118
+ * Convert Hz value to SampleRate enum
119
+ * @param hz - Sample rate in Hz (8000, 16000, etc.)
120
+ * @returns SampleRate enum value or undefined if invalid
121
+ */
122
+ function fromHz(hz: number): SampleRate | undefined;
123
+ /**
124
+ * Convert string name to SampleRate enum
125
+ * @param nameStr - String name like "rate_8000", "RATE_16000", etc. (case insensitive)
126
+ * @returns SampleRate enum value or undefined if invalid
127
+ */
128
+ function fromName(nameStr: string): SampleRate | undefined;
129
+ /**
130
+ * Convert SampleRate enum to Hz value
131
+ * @param rate - SampleRate enum value
132
+ * @returns Hz value (8000, 16000, etc.)
133
+ */
134
+ function toHz(rate: SampleRate): number;
135
+ /**
136
+ * Convert SampleRate enum to string name
137
+ * @param rate - SampleRate enum value
138
+ * @returns String name like "RATE_8000", "RATE_16000", etc.
139
+ */
140
+ function toName(rate: SampleRate): string;
141
+ /**
142
+ * Check if a numeric Hz value is a valid sample rate
143
+ * @param hz - Hz value to validate
144
+ * @returns true if valid sample rate
145
+ */
146
+ function isHzValid(hz: number): boolean;
147
+ /**
148
+ * Check if a string name is a valid sample rate
149
+ * @param nameStr - String name to validate
150
+ * @returns true if valid sample rate name
151
+ */
152
+ function isNameValid(nameStr: string): boolean;
153
+ }
154
+ /**
155
+ * Supported languages for recognition
156
+ * Using BCP-47 language tags
157
+ */
158
+ declare enum Language {
159
+ ENGLISH_US = "en-US",
160
+ ENGLISH_GB = "en-GB",
161
+ SPANISH_ES = "es-ES",
162
+ SPANISH_MX = "es-MX",
163
+ FRENCH_FR = "fr-FR",
164
+ GERMAN_DE = "de-DE",
165
+ ITALIAN_IT = "it-IT",
166
+ PORTUGUESE_BR = "pt-BR",
167
+ JAPANESE_JP = "ja-JP",
168
+ KOREAN_KR = "ko-KR",
169
+ CHINESE_CN = "zh-CN",
170
+ CHINESE_TW = "zh-TW"
171
+ }
172
+
173
+ /**
174
+ * Recognition Result Types V1
175
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
176
+ * Types and schemas for recognition results sent to SDK clients
177
+ */
178
+
179
+ /**
180
+ * Message type discriminator for recognition results V1
181
+ */
182
+ declare enum RecognitionResultTypeV1 {
183
+ TRANSCRIPTION = "Transcription",// Transcript message contains all in the history. result of STT(Speech to text)
184
+ FUNCTION_CALL = "FunctionCall",// Not supported in P1.result of STF(Speedch to function call) Function call schema
185
+ METADATA = "Metadata",// Metadata message contains all the timestamps, provider info, and ASR config
186
+ ERROR = "Error",// Error message contains the error details
187
+ CLIENT_CONTROL_MESSAGE = "ClientControlMessage"
188
+ }
189
+ /**
190
+ * Transcription result V1 - contains transcript message
191
+ * In the long run game side should not need to know it. In the short run it is send back to client.
192
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
193
+ */
194
+ declare const TranscriptionResultSchemaV1: z.ZodObject<{
195
+ type: z.ZodLiteral<RecognitionResultTypeV1.TRANSCRIPTION>;
196
+ audioUtteranceId: z.ZodString;
197
+ finalTranscript: z.ZodString;
198
+ finalTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
199
+ pendingTranscript: z.ZodOptional<z.ZodString>;
200
+ pendingTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
201
+ is_finished: z.ZodBoolean;
202
+ voiceStart: z.ZodOptional<z.ZodNumber>;
203
+ voiceDuration: z.ZodOptional<z.ZodNumber>;
204
+ voiceEnd: z.ZodOptional<z.ZodNumber>;
205
+ startTimestamp: z.ZodOptional<z.ZodNumber>;
206
+ endTimestamp: z.ZodOptional<z.ZodNumber>;
207
+ receivedAtMs: z.ZodOptional<z.ZodNumber>;
208
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
209
+ }, "strip", z.ZodTypeAny, {
210
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
211
+ audioUtteranceId: string;
212
+ finalTranscript: string;
213
+ is_finished: boolean;
214
+ finalTranscriptConfidence?: number | undefined;
215
+ pendingTranscript?: string | undefined;
216
+ pendingTranscriptConfidence?: number | undefined;
217
+ voiceStart?: number | undefined;
218
+ voiceDuration?: number | undefined;
219
+ voiceEnd?: number | undefined;
220
+ startTimestamp?: number | undefined;
221
+ endTimestamp?: number | undefined;
222
+ receivedAtMs?: number | undefined;
223
+ accumulatedAudioTimeMs?: number | undefined;
224
+ }, {
225
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
226
+ audioUtteranceId: string;
227
+ finalTranscript: string;
228
+ is_finished: boolean;
229
+ finalTranscriptConfidence?: number | undefined;
230
+ pendingTranscript?: string | undefined;
231
+ pendingTranscriptConfidence?: number | undefined;
232
+ voiceStart?: number | undefined;
233
+ voiceDuration?: number | undefined;
234
+ voiceEnd?: number | undefined;
235
+ startTimestamp?: number | undefined;
236
+ endTimestamp?: number | undefined;
237
+ receivedAtMs?: number | undefined;
238
+ accumulatedAudioTimeMs?: number | undefined;
239
+ }>;
240
+ type TranscriptionResultV1 = z.infer<typeof TranscriptionResultSchemaV1>;
241
+ /**
242
+ * Function call result V1 - similar to LLM function call
243
+ * In the long run game server should know it, rather than TV or client.
244
+ */
245
+ declare const FunctionCallResultSchemaV1: z.ZodObject<{
246
+ type: z.ZodLiteral<RecognitionResultTypeV1.FUNCTION_CALL>;
247
+ audioUtteranceId: z.ZodString;
248
+ functionName: z.ZodString;
249
+ functionArgJson: z.ZodString;
250
+ }, "strip", z.ZodTypeAny, {
251
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
252
+ audioUtteranceId: string;
253
+ functionName: string;
254
+ functionArgJson: string;
255
+ }, {
256
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
257
+ audioUtteranceId: string;
258
+ functionName: string;
259
+ functionArgJson: string;
260
+ }>;
261
+ type FunctionCallResultV1 = z.infer<typeof FunctionCallResultSchemaV1>;
262
+ /**
263
+ * Metadata result V1 - contains metadata, timing information, and ASR config
264
+ * Sent when the provider connection closes to provide final timing metrics and config
265
+ * In the long run game server should know it, rather than TV or client.
266
+ */
267
+ declare const MetadataResultSchemaV1: z.ZodObject<{
268
+ type: z.ZodLiteral<RecognitionResultTypeV1.METADATA>;
269
+ audioUtteranceId: z.ZodString;
270
+ recordingStartMs: z.ZodOptional<z.ZodNumber>;
271
+ recordingEndMs: z.ZodOptional<z.ZodNumber>;
272
+ transcriptEndMs: z.ZodOptional<z.ZodNumber>;
273
+ socketCloseAtMs: z.ZodOptional<z.ZodNumber>;
274
+ duration: z.ZodOptional<z.ZodNumber>;
275
+ volume: z.ZodOptional<z.ZodNumber>;
276
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
277
+ costInUSD: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
278
+ apiType: z.ZodOptional<z.ZodNativeEnum<typeof ASRApiType>>;
279
+ asrConfig: z.ZodOptional<z.ZodString>;
280
+ rawAsrMetadata: z.ZodOptional<z.ZodString>;
281
+ }, "strip", z.ZodTypeAny, {
282
+ type: RecognitionResultTypeV1.METADATA;
283
+ audioUtteranceId: string;
284
+ recordingStartMs?: number | undefined;
285
+ recordingEndMs?: number | undefined;
286
+ transcriptEndMs?: number | undefined;
287
+ socketCloseAtMs?: number | undefined;
288
+ duration?: number | undefined;
289
+ volume?: number | undefined;
290
+ accumulatedAudioTimeMs?: number | undefined;
291
+ costInUSD?: number | undefined;
292
+ apiType?: ASRApiType | undefined;
293
+ asrConfig?: string | undefined;
294
+ rawAsrMetadata?: string | undefined;
295
+ }, {
296
+ type: RecognitionResultTypeV1.METADATA;
297
+ audioUtteranceId: string;
298
+ recordingStartMs?: number | undefined;
299
+ recordingEndMs?: number | undefined;
300
+ transcriptEndMs?: number | undefined;
301
+ socketCloseAtMs?: number | undefined;
302
+ duration?: number | undefined;
303
+ volume?: number | undefined;
304
+ accumulatedAudioTimeMs?: number | undefined;
305
+ costInUSD?: number | undefined;
306
+ apiType?: ASRApiType | undefined;
307
+ asrConfig?: string | undefined;
308
+ rawAsrMetadata?: string | undefined;
309
+ }>;
310
+ type MetadataResultV1 = z.infer<typeof MetadataResultSchemaV1>;
311
+ /**
312
+ * Error type enum V1 - categorizes different types of errors
313
+ */
314
+ declare enum ErrorTypeV1 {
315
+ AUTHENTICATION_ERROR = "authentication_error",// Authentication/authorization failures
316
+ VALIDATION_ERROR = "validation_error",// Invalid input or configuration
317
+ PROVIDER_ERROR = "provider_error",// Error from ASR provider (Deepgram, Google, etc.) Unlikely to happen with fall
318
+ TIMEOUT_ERROR = "timeout_error",// Request or operation timeout. Likely business logic did not handle timeout.
319
+ QUOTA_EXCEEDED = "quota_exceeded",// Quota or rate limit exceeded. Unlikely to happen with fallbakcs
320
+ CONNECTION_ERROR = "connection_error",// Connection establishment or network error
321
+ UNKNOWN_ERROR = "unknown_error"
322
+ }
323
+ /**
324
+ * Error result V1 - contains error message
325
+ * In the long run game server should know it, rather than TV or client.
326
+ */
327
+ declare const ErrorResultSchemaV1: z.ZodObject<{
328
+ type: z.ZodLiteral<RecognitionResultTypeV1.ERROR>;
329
+ audioUtteranceId: z.ZodString;
330
+ errorType: z.ZodOptional<z.ZodNativeEnum<typeof ErrorTypeV1>>;
331
+ message: z.ZodOptional<z.ZodString>;
332
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
333
+ description: z.ZodOptional<z.ZodString>;
334
+ }, "strip", z.ZodTypeAny, {
335
+ type: RecognitionResultTypeV1.ERROR;
336
+ audioUtteranceId: string;
337
+ errorType?: ErrorTypeV1 | undefined;
338
+ message?: string | undefined;
339
+ code?: string | number | undefined;
340
+ description?: string | undefined;
341
+ }, {
342
+ type: RecognitionResultTypeV1.ERROR;
343
+ audioUtteranceId: string;
344
+ errorType?: ErrorTypeV1 | undefined;
345
+ message?: string | undefined;
346
+ code?: string | number | undefined;
347
+ description?: string | undefined;
348
+ }>;
349
+ type ErrorResultV1 = z.infer<typeof ErrorResultSchemaV1>;
350
+ /**
351
+ * Client control actions enum V1
352
+ * Actions that can be sent from server to client to control the recognition stream
353
+ * In the long run audio client(mic) should know it, rather than servers.
354
+ */
355
+ declare enum ClientControlActionV1 {
356
+ READY_FOR_UPLOADING_RECORDING = "ready_for_uploading_recording",// Server is ready to receive audio, client should start uploading recording (includes audioUtteranceId)
357
+ STOP_RECORDING = "stop_recording"
358
+ }
359
+
360
+ /**
361
+ * Error Exception Types
362
+ *
363
+ * Defines structured exception types for each ErrorTypeV1 category.
364
+ * Each exception type has metadata about whether it's immediately available
365
+ * (can be shown to user right away vs needs investigation/retry).
366
+ */
367
+
368
+ /**
369
+ * Authentication/Authorization Error
370
+ * isImmediatelyAvailable: false
371
+ * These are system configuration issues, not user-facing
372
+ */
373
+ declare const AuthenticationExceptionSchema: z.ZodObject<{
374
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
375
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
376
+ message: z.ZodString;
377
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
378
+ description: z.ZodOptional<z.ZodString>;
379
+ timestamp: z.ZodOptional<z.ZodNumber>;
380
+ errorType: z.ZodLiteral<ErrorTypeV1.AUTHENTICATION_ERROR>;
381
+ isImmediatelyAvailable: z.ZodLiteral<false>;
382
+ service: z.ZodOptional<z.ZodString>;
383
+ authMethod: z.ZodOptional<z.ZodString>;
384
+ }, "strip", z.ZodTypeAny, {
385
+ message: string;
386
+ errorType: ErrorTypeV1.AUTHENTICATION_ERROR;
387
+ isImmediatelyAvailable: false;
388
+ provider?: RecognitionProvider | undefined;
389
+ code?: string | number | undefined;
390
+ audioUtteranceId?: string | undefined;
391
+ description?: string | undefined;
392
+ timestamp?: number | undefined;
393
+ service?: string | undefined;
394
+ authMethod?: string | undefined;
395
+ }, {
396
+ message: string;
397
+ errorType: ErrorTypeV1.AUTHENTICATION_ERROR;
398
+ isImmediatelyAvailable: false;
399
+ provider?: RecognitionProvider | undefined;
400
+ code?: string | number | undefined;
401
+ audioUtteranceId?: string | undefined;
402
+ description?: string | undefined;
403
+ timestamp?: number | undefined;
404
+ service?: string | undefined;
405
+ authMethod?: string | undefined;
406
+ }>;
407
+ type AuthenticationException = z.infer<typeof AuthenticationExceptionSchema>;
408
+ /**
409
+ * Validation Error
410
+ * isImmediatelyAvailable: true
411
+ * User provided invalid input - can show them what's wrong
412
+ */
413
+ declare const ValidationExceptionSchema: z.ZodObject<{
414
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
415
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
416
+ message: z.ZodString;
417
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
418
+ description: z.ZodOptional<z.ZodString>;
419
+ timestamp: z.ZodOptional<z.ZodNumber>;
420
+ errorType: z.ZodLiteral<ErrorTypeV1.VALIDATION_ERROR>;
421
+ isImmediatelyAvailable: z.ZodLiteral<true>;
422
+ field: z.ZodOptional<z.ZodString>;
423
+ expected: z.ZodOptional<z.ZodString>;
424
+ received: z.ZodOptional<z.ZodString>;
425
+ }, "strip", z.ZodTypeAny, {
426
+ message: string;
427
+ errorType: ErrorTypeV1.VALIDATION_ERROR;
428
+ isImmediatelyAvailable: true;
429
+ provider?: RecognitionProvider | undefined;
430
+ code?: string | number | undefined;
431
+ audioUtteranceId?: string | undefined;
432
+ description?: string | undefined;
433
+ timestamp?: number | undefined;
434
+ field?: string | undefined;
435
+ expected?: string | undefined;
436
+ received?: string | undefined;
437
+ }, {
438
+ message: string;
439
+ errorType: ErrorTypeV1.VALIDATION_ERROR;
440
+ isImmediatelyAvailable: true;
441
+ provider?: RecognitionProvider | undefined;
442
+ code?: string | number | undefined;
443
+ audioUtteranceId?: string | undefined;
444
+ description?: string | undefined;
445
+ timestamp?: number | undefined;
446
+ field?: string | undefined;
447
+ expected?: string | undefined;
448
+ received?: string | undefined;
449
+ }>;
450
+ type ValidationException = z.infer<typeof ValidationExceptionSchema>;
451
+ /**
452
+ * Provider Error
453
+ * isImmediatelyAvailable: false
454
+ * Error from ASR provider - usually transient or needs investigation
455
+ */
456
+ declare const ProviderExceptionSchema: z.ZodObject<{
457
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
458
+ message: z.ZodString;
459
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
460
+ description: z.ZodOptional<z.ZodString>;
461
+ timestamp: z.ZodOptional<z.ZodNumber>;
462
+ errorType: z.ZodLiteral<ErrorTypeV1.PROVIDER_ERROR>;
463
+ isImmediatelyAvailable: z.ZodLiteral<false>;
464
+ provider: z.ZodOptional<z.ZodString>;
465
+ providerErrorCode: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
466
+ isTransient: z.ZodOptional<z.ZodBoolean>;
467
+ }, "strip", z.ZodTypeAny, {
468
+ message: string;
469
+ errorType: ErrorTypeV1.PROVIDER_ERROR;
470
+ isImmediatelyAvailable: false;
471
+ code?: string | number | undefined;
472
+ audioUtteranceId?: string | undefined;
473
+ description?: string | undefined;
474
+ timestamp?: number | undefined;
475
+ provider?: string | undefined;
476
+ providerErrorCode?: string | number | undefined;
477
+ isTransient?: boolean | undefined;
478
+ }, {
479
+ message: string;
480
+ errorType: ErrorTypeV1.PROVIDER_ERROR;
481
+ isImmediatelyAvailable: false;
482
+ code?: string | number | undefined;
483
+ audioUtteranceId?: string | undefined;
484
+ description?: string | undefined;
485
+ timestamp?: number | undefined;
486
+ provider?: string | undefined;
487
+ providerErrorCode?: string | number | undefined;
488
+ isTransient?: boolean | undefined;
489
+ }>;
490
+ type ProviderException = z.infer<typeof ProviderExceptionSchema>;
491
+ /**
492
+ * Timeout Error
493
+ * isImmediatelyAvailable: true
494
+ * Request took too long - user should try again
495
+ */
496
+ declare const TimeoutExceptionSchema: z.ZodObject<{
497
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
498
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
499
+ message: z.ZodString;
500
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
501
+ description: z.ZodOptional<z.ZodString>;
502
+ timestamp: z.ZodOptional<z.ZodNumber>;
503
+ errorType: z.ZodLiteral<ErrorTypeV1.TIMEOUT_ERROR>;
504
+ isImmediatelyAvailable: z.ZodLiteral<true>;
505
+ timeoutMs: z.ZodOptional<z.ZodNumber>;
506
+ operation: z.ZodOptional<z.ZodString>;
507
+ }, "strip", z.ZodTypeAny, {
508
+ message: string;
509
+ errorType: ErrorTypeV1.TIMEOUT_ERROR;
510
+ isImmediatelyAvailable: true;
511
+ provider?: RecognitionProvider | undefined;
512
+ code?: string | number | undefined;
513
+ audioUtteranceId?: string | undefined;
514
+ description?: string | undefined;
515
+ timestamp?: number | undefined;
516
+ timeoutMs?: number | undefined;
517
+ operation?: string | undefined;
518
+ }, {
519
+ message: string;
520
+ errorType: ErrorTypeV1.TIMEOUT_ERROR;
521
+ isImmediatelyAvailable: true;
522
+ provider?: RecognitionProvider | undefined;
523
+ code?: string | number | undefined;
524
+ audioUtteranceId?: string | undefined;
525
+ description?: string | undefined;
526
+ timestamp?: number | undefined;
527
+ timeoutMs?: number | undefined;
528
+ operation?: string | undefined;
529
+ }>;
530
+ type TimeoutException = z.infer<typeof TimeoutExceptionSchema>;
531
+ /**
532
+ * Quota Exceeded Error
533
+ * isImmediatelyAvailable: true
534
+ * Rate limit or quota exceeded - user should wait
535
+ */
536
+ declare const QuotaExceededExceptionSchema: z.ZodObject<{
537
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
538
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
539
+ message: z.ZodString;
540
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
541
+ description: z.ZodOptional<z.ZodString>;
542
+ timestamp: z.ZodOptional<z.ZodNumber>;
543
+ errorType: z.ZodLiteral<ErrorTypeV1.QUOTA_EXCEEDED>;
544
+ isImmediatelyAvailable: z.ZodLiteral<true>;
545
+ quotaType: z.ZodOptional<z.ZodString>;
546
+ resetAt: z.ZodOptional<z.ZodNumber>;
547
+ retryAfterSeconds: z.ZodOptional<z.ZodNumber>;
548
+ }, "strip", z.ZodTypeAny, {
549
+ message: string;
550
+ errorType: ErrorTypeV1.QUOTA_EXCEEDED;
551
+ isImmediatelyAvailable: true;
552
+ provider?: RecognitionProvider | undefined;
553
+ code?: string | number | undefined;
554
+ audioUtteranceId?: string | undefined;
555
+ description?: string | undefined;
556
+ timestamp?: number | undefined;
557
+ quotaType?: string | undefined;
558
+ resetAt?: number | undefined;
559
+ retryAfterSeconds?: number | undefined;
560
+ }, {
561
+ message: string;
562
+ errorType: ErrorTypeV1.QUOTA_EXCEEDED;
563
+ isImmediatelyAvailable: true;
564
+ provider?: RecognitionProvider | undefined;
565
+ code?: string | number | undefined;
566
+ audioUtteranceId?: string | undefined;
567
+ description?: string | undefined;
568
+ timestamp?: number | undefined;
569
+ quotaType?: string | undefined;
570
+ resetAt?: number | undefined;
571
+ retryAfterSeconds?: number | undefined;
572
+ }>;
573
+ type QuotaExceededException = z.infer<typeof QuotaExceededExceptionSchema>;
574
+ /**
575
+ * Connection Error
576
+ * isImmediatelyAvailable: true
577
+ * Connection establishment or network failure - user should check network or retry
578
+ */
579
+ declare const ConnectionExceptionSchema: z.ZodObject<{
580
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
581
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
582
+ message: z.ZodString;
583
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
584
+ description: z.ZodOptional<z.ZodString>;
585
+ timestamp: z.ZodOptional<z.ZodNumber>;
586
+ errorType: z.ZodLiteral<ErrorTypeV1.CONNECTION_ERROR>;
587
+ isImmediatelyAvailable: z.ZodLiteral<true>;
588
+ attempts: z.ZodOptional<z.ZodNumber>;
589
+ url: z.ZodOptional<z.ZodString>;
590
+ underlyingError: z.ZodOptional<z.ZodString>;
591
+ }, "strip", z.ZodTypeAny, {
592
+ message: string;
593
+ errorType: ErrorTypeV1.CONNECTION_ERROR;
594
+ isImmediatelyAvailable: true;
595
+ provider?: RecognitionProvider | undefined;
596
+ code?: string | number | undefined;
597
+ audioUtteranceId?: string | undefined;
598
+ description?: string | undefined;
599
+ timestamp?: number | undefined;
600
+ attempts?: number | undefined;
601
+ url?: string | undefined;
602
+ underlyingError?: string | undefined;
603
+ }, {
604
+ message: string;
605
+ errorType: ErrorTypeV1.CONNECTION_ERROR;
606
+ isImmediatelyAvailable: true;
607
+ provider?: RecognitionProvider | undefined;
608
+ code?: string | number | undefined;
609
+ audioUtteranceId?: string | undefined;
610
+ description?: string | undefined;
611
+ timestamp?: number | undefined;
612
+ attempts?: number | undefined;
613
+ url?: string | undefined;
614
+ underlyingError?: string | undefined;
615
+ }>;
616
+ type ConnectionException = z.infer<typeof ConnectionExceptionSchema>;
617
+ /**
618
+ * Unknown Error
619
+ * isImmediatelyAvailable: false
620
+ * Unexpected error - needs investigation
621
+ */
622
+ declare const UnknownExceptionSchema: z.ZodObject<{
623
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
624
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
625
+ message: z.ZodString;
626
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
627
+ description: z.ZodOptional<z.ZodString>;
628
+ timestamp: z.ZodOptional<z.ZodNumber>;
629
+ errorType: z.ZodLiteral<ErrorTypeV1.UNKNOWN_ERROR>;
630
+ isImmediatelyAvailable: z.ZodLiteral<false>;
631
+ stack: z.ZodOptional<z.ZodString>;
632
+ context: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
633
+ }, "strip", z.ZodTypeAny, {
634
+ message: string;
635
+ errorType: ErrorTypeV1.UNKNOWN_ERROR;
636
+ isImmediatelyAvailable: false;
637
+ provider?: RecognitionProvider | undefined;
638
+ code?: string | number | undefined;
639
+ audioUtteranceId?: string | undefined;
640
+ description?: string | undefined;
641
+ timestamp?: number | undefined;
642
+ stack?: string | undefined;
643
+ context?: Record<string, unknown> | undefined;
644
+ }, {
645
+ message: string;
646
+ errorType: ErrorTypeV1.UNKNOWN_ERROR;
647
+ isImmediatelyAvailable: false;
648
+ provider?: RecognitionProvider | undefined;
649
+ code?: string | number | undefined;
650
+ audioUtteranceId?: string | undefined;
651
+ description?: string | undefined;
652
+ timestamp?: number | undefined;
653
+ stack?: string | undefined;
654
+ context?: Record<string, unknown> | undefined;
655
+ }>;
656
+ type UnknownException = z.infer<typeof UnknownExceptionSchema>;
657
+ /**
658
+ * Discriminated union of all exception types
659
+ * Use this for type-safe error handling
660
+ */
661
+ declare const RecognitionExceptionSchema: z.ZodDiscriminatedUnion<"errorType", [z.ZodObject<{
662
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
663
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
664
+ message: z.ZodString;
665
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
666
+ description: z.ZodOptional<z.ZodString>;
667
+ timestamp: z.ZodOptional<z.ZodNumber>;
668
+ errorType: z.ZodLiteral<ErrorTypeV1.AUTHENTICATION_ERROR>;
669
+ isImmediatelyAvailable: z.ZodLiteral<false>;
670
+ service: z.ZodOptional<z.ZodString>;
671
+ authMethod: z.ZodOptional<z.ZodString>;
672
+ }, "strip", z.ZodTypeAny, {
673
+ message: string;
674
+ errorType: ErrorTypeV1.AUTHENTICATION_ERROR;
675
+ isImmediatelyAvailable: false;
676
+ provider?: RecognitionProvider | undefined;
677
+ code?: string | number | undefined;
678
+ audioUtteranceId?: string | undefined;
679
+ description?: string | undefined;
680
+ timestamp?: number | undefined;
681
+ service?: string | undefined;
682
+ authMethod?: string | undefined;
683
+ }, {
684
+ message: string;
685
+ errorType: ErrorTypeV1.AUTHENTICATION_ERROR;
686
+ isImmediatelyAvailable: false;
687
+ provider?: RecognitionProvider | undefined;
688
+ code?: string | number | undefined;
689
+ audioUtteranceId?: string | undefined;
690
+ description?: string | undefined;
691
+ timestamp?: number | undefined;
692
+ service?: string | undefined;
693
+ authMethod?: string | undefined;
694
+ }>, z.ZodObject<{
695
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
696
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
697
+ message: z.ZodString;
698
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
699
+ description: z.ZodOptional<z.ZodString>;
700
+ timestamp: z.ZodOptional<z.ZodNumber>;
701
+ errorType: z.ZodLiteral<ErrorTypeV1.VALIDATION_ERROR>;
702
+ isImmediatelyAvailable: z.ZodLiteral<true>;
703
+ field: z.ZodOptional<z.ZodString>;
704
+ expected: z.ZodOptional<z.ZodString>;
705
+ received: z.ZodOptional<z.ZodString>;
706
+ }, "strip", z.ZodTypeAny, {
707
+ message: string;
708
+ errorType: ErrorTypeV1.VALIDATION_ERROR;
709
+ isImmediatelyAvailable: true;
710
+ provider?: RecognitionProvider | undefined;
711
+ code?: string | number | undefined;
712
+ audioUtteranceId?: string | undefined;
713
+ description?: string | undefined;
714
+ timestamp?: number | undefined;
715
+ field?: string | undefined;
716
+ expected?: string | undefined;
717
+ received?: string | undefined;
718
+ }, {
719
+ message: string;
720
+ errorType: ErrorTypeV1.VALIDATION_ERROR;
721
+ isImmediatelyAvailable: true;
722
+ provider?: RecognitionProvider | undefined;
723
+ code?: string | number | undefined;
724
+ audioUtteranceId?: string | undefined;
725
+ description?: string | undefined;
726
+ timestamp?: number | undefined;
727
+ field?: string | undefined;
728
+ expected?: string | undefined;
729
+ received?: string | undefined;
730
+ }>, z.ZodObject<{
731
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
732
+ message: z.ZodString;
733
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
734
+ description: z.ZodOptional<z.ZodString>;
735
+ timestamp: z.ZodOptional<z.ZodNumber>;
736
+ errorType: z.ZodLiteral<ErrorTypeV1.PROVIDER_ERROR>;
737
+ isImmediatelyAvailable: z.ZodLiteral<false>;
738
+ provider: z.ZodOptional<z.ZodString>;
739
+ providerErrorCode: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
740
+ isTransient: z.ZodOptional<z.ZodBoolean>;
741
+ }, "strip", z.ZodTypeAny, {
742
+ message: string;
743
+ errorType: ErrorTypeV1.PROVIDER_ERROR;
744
+ isImmediatelyAvailable: false;
745
+ code?: string | number | undefined;
746
+ audioUtteranceId?: string | undefined;
747
+ description?: string | undefined;
748
+ timestamp?: number | undefined;
749
+ provider?: string | undefined;
750
+ providerErrorCode?: string | number | undefined;
751
+ isTransient?: boolean | undefined;
752
+ }, {
753
+ message: string;
754
+ errorType: ErrorTypeV1.PROVIDER_ERROR;
755
+ isImmediatelyAvailable: false;
756
+ code?: string | number | undefined;
757
+ audioUtteranceId?: string | undefined;
758
+ description?: string | undefined;
759
+ timestamp?: number | undefined;
760
+ provider?: string | undefined;
761
+ providerErrorCode?: string | number | undefined;
762
+ isTransient?: boolean | undefined;
763
+ }>, z.ZodObject<{
764
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
765
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
766
+ message: z.ZodString;
767
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
768
+ description: z.ZodOptional<z.ZodString>;
769
+ timestamp: z.ZodOptional<z.ZodNumber>;
770
+ errorType: z.ZodLiteral<ErrorTypeV1.TIMEOUT_ERROR>;
771
+ isImmediatelyAvailable: z.ZodLiteral<true>;
772
+ timeoutMs: z.ZodOptional<z.ZodNumber>;
773
+ operation: z.ZodOptional<z.ZodString>;
774
+ }, "strip", z.ZodTypeAny, {
775
+ message: string;
776
+ errorType: ErrorTypeV1.TIMEOUT_ERROR;
777
+ isImmediatelyAvailable: true;
778
+ provider?: RecognitionProvider | undefined;
779
+ code?: string | number | undefined;
780
+ audioUtteranceId?: string | undefined;
781
+ description?: string | undefined;
782
+ timestamp?: number | undefined;
783
+ timeoutMs?: number | undefined;
784
+ operation?: string | undefined;
785
+ }, {
786
+ message: string;
787
+ errorType: ErrorTypeV1.TIMEOUT_ERROR;
788
+ isImmediatelyAvailable: true;
789
+ provider?: RecognitionProvider | undefined;
790
+ code?: string | number | undefined;
791
+ audioUtteranceId?: string | undefined;
792
+ description?: string | undefined;
793
+ timestamp?: number | undefined;
794
+ timeoutMs?: number | undefined;
795
+ operation?: string | undefined;
796
+ }>, z.ZodObject<{
797
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
798
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
799
+ message: z.ZodString;
800
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
801
+ description: z.ZodOptional<z.ZodString>;
802
+ timestamp: z.ZodOptional<z.ZodNumber>;
803
+ errorType: z.ZodLiteral<ErrorTypeV1.QUOTA_EXCEEDED>;
804
+ isImmediatelyAvailable: z.ZodLiteral<true>;
805
+ quotaType: z.ZodOptional<z.ZodString>;
806
+ resetAt: z.ZodOptional<z.ZodNumber>;
807
+ retryAfterSeconds: z.ZodOptional<z.ZodNumber>;
808
+ }, "strip", z.ZodTypeAny, {
809
+ message: string;
810
+ errorType: ErrorTypeV1.QUOTA_EXCEEDED;
811
+ isImmediatelyAvailable: true;
812
+ provider?: RecognitionProvider | undefined;
813
+ code?: string | number | undefined;
814
+ audioUtteranceId?: string | undefined;
815
+ description?: string | undefined;
816
+ timestamp?: number | undefined;
817
+ quotaType?: string | undefined;
818
+ resetAt?: number | undefined;
819
+ retryAfterSeconds?: number | undefined;
820
+ }, {
821
+ message: string;
822
+ errorType: ErrorTypeV1.QUOTA_EXCEEDED;
823
+ isImmediatelyAvailable: true;
824
+ provider?: RecognitionProvider | undefined;
825
+ code?: string | number | undefined;
826
+ audioUtteranceId?: string | undefined;
827
+ description?: string | undefined;
828
+ timestamp?: number | undefined;
829
+ quotaType?: string | undefined;
830
+ resetAt?: number | undefined;
831
+ retryAfterSeconds?: number | undefined;
832
+ }>, z.ZodObject<{
833
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
834
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
835
+ message: z.ZodString;
836
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
837
+ description: z.ZodOptional<z.ZodString>;
838
+ timestamp: z.ZodOptional<z.ZodNumber>;
839
+ errorType: z.ZodLiteral<ErrorTypeV1.CONNECTION_ERROR>;
840
+ isImmediatelyAvailable: z.ZodLiteral<true>;
841
+ attempts: z.ZodOptional<z.ZodNumber>;
842
+ url: z.ZodOptional<z.ZodString>;
843
+ underlyingError: z.ZodOptional<z.ZodString>;
844
+ }, "strip", z.ZodTypeAny, {
845
+ message: string;
846
+ errorType: ErrorTypeV1.CONNECTION_ERROR;
847
+ isImmediatelyAvailable: true;
848
+ provider?: RecognitionProvider | undefined;
849
+ code?: string | number | undefined;
850
+ audioUtteranceId?: string | undefined;
851
+ description?: string | undefined;
852
+ timestamp?: number | undefined;
853
+ attempts?: number | undefined;
854
+ url?: string | undefined;
855
+ underlyingError?: string | undefined;
856
+ }, {
857
+ message: string;
858
+ errorType: ErrorTypeV1.CONNECTION_ERROR;
859
+ isImmediatelyAvailable: true;
860
+ provider?: RecognitionProvider | undefined;
861
+ code?: string | number | undefined;
862
+ audioUtteranceId?: string | undefined;
863
+ description?: string | undefined;
864
+ timestamp?: number | undefined;
865
+ attempts?: number | undefined;
866
+ url?: string | undefined;
867
+ underlyingError?: string | undefined;
868
+ }>, z.ZodObject<{
869
+ provider: z.ZodOptional<z.ZodNativeEnum<typeof RecognitionProvider>>;
870
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
871
+ message: z.ZodString;
872
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
873
+ description: z.ZodOptional<z.ZodString>;
874
+ timestamp: z.ZodOptional<z.ZodNumber>;
875
+ errorType: z.ZodLiteral<ErrorTypeV1.UNKNOWN_ERROR>;
876
+ isImmediatelyAvailable: z.ZodLiteral<false>;
877
+ stack: z.ZodOptional<z.ZodString>;
878
+ context: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
879
+ }, "strip", z.ZodTypeAny, {
880
+ message: string;
881
+ errorType: ErrorTypeV1.UNKNOWN_ERROR;
882
+ isImmediatelyAvailable: false;
883
+ provider?: RecognitionProvider | undefined;
884
+ code?: string | number | undefined;
885
+ audioUtteranceId?: string | undefined;
886
+ description?: string | undefined;
887
+ timestamp?: number | undefined;
888
+ stack?: string | undefined;
889
+ context?: Record<string, unknown> | undefined;
890
+ }, {
891
+ message: string;
892
+ errorType: ErrorTypeV1.UNKNOWN_ERROR;
893
+ isImmediatelyAvailable: false;
894
+ provider?: RecognitionProvider | undefined;
895
+ code?: string | number | undefined;
896
+ audioUtteranceId?: string | undefined;
897
+ description?: string | undefined;
898
+ timestamp?: number | undefined;
899
+ stack?: string | undefined;
900
+ context?: Record<string, unknown> | undefined;
901
+ }>]>;
902
+ type RecognitionException = z.infer<typeof RecognitionExceptionSchema>;
903
+ /**
904
+ * Check if an exception should be shown to the user immediately
905
+ */
906
+ declare function isExceptionImmediatelyAvailable(exception: RecognitionException): boolean;
907
+ /**
908
+ * Get user-friendly error message for exceptions
909
+ */
910
+ declare function getUserFriendlyMessage(exception: RecognitionException): string;
911
+
912
+ /**
913
+ * Recognition Context Types V1
914
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
915
+ * Types and schemas for recognition context data
916
+ */
917
+
918
+ /**
919
+ * Message type discriminator for recognition context V1
920
+ */
921
+ declare enum RecognitionContextTypeV1 {
922
+ GAME_CONTEXT = "GameContext",
923
+ CONTROL_SIGNAL = "ControlSignal",
924
+ ASR_REQUEST = "ASRRequest"
925
+ }
926
+ /**
927
+ * Control signal types for recognition V1
928
+ */
929
+ declare enum ControlSignalTypeV1 {
930
+ START_RECORDING = "start_recording",
931
+ STOP_RECORDING = "stop_recording"
932
+ }
933
+ /**
934
+ * SlotMap - A strongly typed map from slot names to lists of values
935
+ * Used for entity extraction and slot filling in voice interactions
936
+ */
937
+ declare const SlotMapSchema: z.ZodRecord<z.ZodString, z.ZodArray<z.ZodString, "many">>;
938
+ type SlotMap = z.infer<typeof SlotMapSchema>;
939
+ /**
940
+ * Game context V1 - contains game state information
941
+ */
942
+ declare const GameContextSchemaV1: z.ZodObject<{
943
+ type: z.ZodLiteral<RecognitionContextTypeV1.GAME_CONTEXT>;
944
+ gameId: z.ZodString;
945
+ gamePhase: z.ZodString;
946
+ promptSTT: z.ZodOptional<z.ZodString>;
947
+ promptSTF: z.ZodOptional<z.ZodString>;
948
+ promptTTF: z.ZodOptional<z.ZodString>;
949
+ slotMap: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodArray<z.ZodString, "many">>>;
950
+ }, "strip", z.ZodTypeAny, {
951
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
952
+ gameId: string;
953
+ gamePhase: string;
954
+ promptSTT?: string | undefined;
955
+ promptSTF?: string | undefined;
956
+ promptTTF?: string | undefined;
957
+ slotMap?: Record<string, string[]> | undefined;
958
+ }, {
959
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
960
+ gameId: string;
961
+ gamePhase: string;
962
+ promptSTT?: string | undefined;
963
+ promptSTF?: string | undefined;
964
+ promptTTF?: string | undefined;
965
+ slotMap?: Record<string, string[]> | undefined;
966
+ }>;
967
+ type GameContextV1 = z.infer<typeof GameContextSchemaV1>;
968
+ /**
969
+ * ASR Request V1 - contains complete ASR setup information
970
+ * Sent once at connection start to configure the session
971
+ */
972
+ declare const ASRRequestSchemaV1: z.ZodObject<{
973
+ type: z.ZodLiteral<RecognitionContextTypeV1.ASR_REQUEST>;
974
+ audioUtteranceId: z.ZodOptional<z.ZodString>;
975
+ provider: z.ZodString;
976
+ model: z.ZodOptional<z.ZodString>;
977
+ language: z.ZodString;
978
+ sampleRate: z.ZodNumber;
979
+ encoding: z.ZodNumber;
980
+ interimResults: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
981
+ useContext: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
982
+ finalTranscriptStability: z.ZodOptional<z.ZodString>;
983
+ debugCommand: z.ZodOptional<z.ZodObject<{
984
+ enableDebugLog: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
985
+ enableAudioStorage: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
986
+ enableSongQuizSessionIdCheck: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
987
+ enablePilotModels: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
988
+ }, "strip", z.ZodTypeAny, {
989
+ enableDebugLog: boolean;
990
+ enableAudioStorage: boolean;
991
+ enableSongQuizSessionIdCheck: boolean;
992
+ enablePilotModels: boolean;
993
+ }, {
994
+ enableDebugLog?: boolean | undefined;
995
+ enableAudioStorage?: boolean | undefined;
996
+ enableSongQuizSessionIdCheck?: boolean | undefined;
997
+ enablePilotModels?: boolean | undefined;
998
+ }>>;
999
+ }, "strip", z.ZodTypeAny, {
1000
+ provider: string;
1001
+ language: string;
1002
+ sampleRate: number;
1003
+ encoding: number;
1004
+ interimResults: boolean;
1005
+ useContext: boolean;
1006
+ type: RecognitionContextTypeV1.ASR_REQUEST;
1007
+ audioUtteranceId?: string | undefined;
1008
+ model?: string | undefined;
1009
+ finalTranscriptStability?: string | undefined;
1010
+ debugCommand?: {
1011
+ enableDebugLog: boolean;
1012
+ enableAudioStorage: boolean;
1013
+ enableSongQuizSessionIdCheck: boolean;
1014
+ enablePilotModels: boolean;
1015
+ } | undefined;
1016
+ }, {
1017
+ provider: string;
1018
+ language: string;
1019
+ sampleRate: number;
1020
+ encoding: number;
1021
+ type: RecognitionContextTypeV1.ASR_REQUEST;
1022
+ audioUtteranceId?: string | undefined;
1023
+ model?: string | undefined;
1024
+ interimResults?: boolean | undefined;
1025
+ useContext?: boolean | undefined;
1026
+ finalTranscriptStability?: string | undefined;
1027
+ debugCommand?: {
1028
+ enableDebugLog?: boolean | undefined;
1029
+ enableAudioStorage?: boolean | undefined;
1030
+ enableSongQuizSessionIdCheck?: boolean | undefined;
1031
+ enablePilotModels?: boolean | undefined;
1032
+ } | undefined;
1033
+ }>;
1034
+ type ASRRequestV1 = z.infer<typeof ASRRequestSchemaV1>;
1035
+
1036
+ /**
1037
+ * Unified ASR Request Configuration
1038
+ *
1039
+ * Provider-agnostic configuration for ASR (Automatic Speech Recognition) requests.
1040
+ * This interface provides a consistent API for clients regardless of the underlying provider.
1041
+ *
1042
+ * All fields use library-defined enums for type safety and consistency.
1043
+ * Provider-specific mappers will convert these to provider-native formats.
1044
+ */
1045
+
1046
+ /**
1047
+ * Final transcript stability modes
1048
+ *
1049
+ * Controls timeout duration for fallback final transcript after stopRecording().
1050
+ * Similar to AssemblyAI's turn detection confidence modes but applied to our
1051
+ * internal timeout mechanism when vendors don't respond with is_final=true.
1052
+ *
1053
+ * @see https://www.assemblyai.com/docs/speech-to-text/universal-streaming/turn-detection
1054
+ */
1055
+ declare enum FinalTranscriptStability {
1056
+ /**
1057
+ * Aggressive mode: 100ms timeout
1058
+ * Fast response, optimized for short utterances and quick back-and-forth
1059
+ * Use cases: IVR, quick commands, retail confirmations
1060
+ */
1061
+ AGGRESSIVE = "aggressive",
1062
+ /**
1063
+ * Balanced mode: 200ms timeout (default)
1064
+ * Natural middle ground for most conversational scenarios
1065
+ * Use cases: General customer support, tech support, typical voice interactions
1066
+ */
1067
+ BALANCED = "balanced",
1068
+ /**
1069
+ * Conservative mode: 400ms timeout
1070
+ * Wait longer for providers, optimized for complex/reflective speech
1071
+ * Use cases: Healthcare, complex queries, careful thought processes
1072
+ */
1073
+ CONSERVATIVE = "conservative",
1074
+ /**
1075
+ * Experimental mode: 10000ms (10 seconds) timeout
1076
+ * Very long wait for batch/async providers that need significant processing time
1077
+ * Use cases: Batch processing (Gemini, OpenAI Whisper), complex audio analysis
1078
+ * Note: Should be cancelled immediately when transcript is received
1079
+ */
1080
+ EXPERIMENTAL = "experimental"
1081
+ }
1082
+ /**
1083
+ * Unified ASR request configuration
1084
+ *
1085
+ * This configuration is used by:
1086
+ * - Client SDKs to specify recognition parameters
1087
+ * - Demo applications for user input
1088
+ * - Service layer to configure provider sessions
1089
+ *
1090
+ * Core fields only - all provider-specific options go in providerOptions
1091
+ *
1092
+ * @example
1093
+ * ```typescript
1094
+ * const config: ASRRequestConfig = {
1095
+ * provider: RecognitionProvider.GOOGLE,
1096
+ * model: GoogleModel.LATEST_LONG,
1097
+ * language: Language.ENGLISH_US,
1098
+ * sampleRate: SampleRate.RATE_16000, // or just 16000
1099
+ * encoding: AudioEncoding.LINEAR16,
1100
+ * providerOptions: {
1101
+ * google: {
1102
+ * enableAutomaticPunctuation: true,
1103
+ * interimResults: true,
1104
+ * singleUtterance: false
1105
+ * }
1106
+ * }
1107
+ * };
1108
+ * ```
1109
+ */
1110
+ interface ASRRequestConfig {
1111
+ /**
1112
+ * The ASR provider to use
1113
+ * Must be one of the supported providers in RecognitionProvider enum
1114
+ */
1115
+ provider: RecognitionProvider | string;
1116
+ /**
1117
+ * Optional model specification for the provider
1118
+ * Can be provider-specific model enum or string
1119
+ * If not specified, provider's default model will be used
1120
+ */
1121
+ model?: RecognitionModel;
1122
+ /**
1123
+ * Language/locale for recognition
1124
+ * Use Language enum for common languages
1125
+ * Can also accept BCP-47 language tags as strings
1126
+ */
1127
+ language: Language | string;
1128
+ /**
1129
+ * Audio sample rate in Hz
1130
+ * Prefer using SampleRate enum values for standard rates
1131
+ * Can also accept numeric Hz values (e.g., 16000)
1132
+ */
1133
+ sampleRate: SampleRate | number;
1134
+ /**
1135
+ * Audio encoding format
1136
+ * Must match the actual audio data being sent
1137
+ * Use AudioEncoding enum for standard formats
1138
+ */
1139
+ encoding: AudioEncoding | string;
1140
+ /**
1141
+ * Enable interim (partial) results during recognition
1142
+ * When true, receive real-time updates before finalization
1143
+ * When false, only receive final results
1144
+ * Default: false
1145
+ */
1146
+ interimResults?: boolean;
1147
+ /**
1148
+ * Require GameContext before starting recognition such as song titles
1149
+ * When true, server waits for GameContext message before processing audio
1150
+ * When false, recognition starts immediately
1151
+ * Default: false
1152
+ */
1153
+ useContext?: boolean;
1154
+ /**
1155
+ * Final transcript stability mode
1156
+ *
1157
+ * Controls timeout duration for fallback final transcript when provider
1158
+ * doesn't respond with is_final=true after stopRecording().
1159
+ *
1160
+ * - aggressive: 100ms - fast response, may cut off slow providers
1161
+ * - balanced: 200ms - current default, good for most cases
1162
+ * - conservative: 400ms - wait longer for complex utterances
1163
+ *
1164
+ * @default 'balanced'
1165
+ * @see FinalTranscriptStability enum for detailed descriptions
1166
+ */
1167
+ finalTranscriptStability?: FinalTranscriptStability | string;
1168
+ /**
1169
+ * Additional provider-specific options
1170
+ *
1171
+ * Common options per provider:
1172
+ * - Deepgram: punctuate, smart_format, diarize, utterances
1173
+ * - Google: enableAutomaticPunctuation, singleUtterance, enableWordTimeOffsets
1174
+ * - AssemblyAI: formatTurns, filter_profanity, word_boost
1175
+ *
1176
+ * Note: interimResults is now a top-level field, but can still be overridden per provider
1177
+ *
1178
+ * @example
1179
+ * ```typescript
1180
+ * providerOptions: {
1181
+ * google: {
1182
+ * enableAutomaticPunctuation: true,
1183
+ * singleUtterance: false,
1184
+ * enableWordTimeOffsets: false
1185
+ * }
1186
+ * }
1187
+ * ```
1188
+ */
1189
+ providerOptions?: Record<string, any>;
1190
+ /**
1191
+ * Optional fallback ASR configurations
1192
+ *
1193
+ * List of alternative ASR configurations to use if the primary fails.
1194
+ * Each fallback config is a complete ASRRequestConfig that will be tried
1195
+ * in order until one succeeds.
1196
+ *
1197
+ * @example
1198
+ * ```typescript
1199
+ * fallbackModels: [
1200
+ * {
1201
+ * provider: RecognitionProvider.DEEPGRAM,
1202
+ * model: DeepgramModel.NOVA_2,
1203
+ * language: Language.ENGLISH_US,
1204
+ * sampleRate: 16000,
1205
+ * encoding: AudioEncoding.LINEAR16
1206
+ * },
1207
+ * {
1208
+ * provider: RecognitionProvider.GOOGLE,
1209
+ * model: GoogleModel.LATEST_SHORT,
1210
+ * language: Language.ENGLISH_US,
1211
+ * sampleRate: 16000,
1212
+ * encoding: AudioEncoding.LINEAR16
1213
+ * }
1214
+ * ]
1215
+ * ```
1216
+ */
1217
+ fallbackModels?: ASRRequestConfig[];
1218
+ }
1219
+ /**
1220
+ * Partial ASR config for updates
1221
+ * All fields are optional for partial updates
1222
+ */
1223
+ type PartialASRRequestConfig = Partial<ASRRequestConfig>;
1224
+ /**
1225
+ * Helper function to create a default ASR config
1226
+ */
1227
+ declare function createDefaultASRConfig(overrides?: PartialASRRequestConfig): ASRRequestConfig;
1228
+
1229
+ /**
1230
+ * Gemini Model Types
1231
+ * Based on available models as of January 2025
1232
+ *
1233
+ * API Version Notes:
1234
+ * - Gemini 2.5+ models: Use v1beta API (early access features)
1235
+ * - Gemini 2.0 models: Use v1beta API (early access features)
1236
+ * - Gemini 1.5 models: Use v1 API (stable, production-ready)
1237
+ *
1238
+ * @see https://ai.google.dev/gemini-api/docs/models
1239
+ * @see https://ai.google.dev/gemini-api/docs/api-versions
1240
+ */
1241
+ declare enum GeminiModel {
1242
+ GEMINI_2_5_PRO = "gemini-2.5-pro",// State-of-the-art thinking model
1243
+ GEMINI_2_5_FLASH = "gemini-2.5-flash",// Best price-performance balance
1244
+ GEMINI_2_5_FLASH_LITE = "gemini-2.5-flash-lite",// Fastest, most cost-efficient
1245
+ GEMINI_2_0_FLASH_LATEST = "gemini-2.0-flash-latest",// Auto-updated to latest 2.0 flash
1246
+ GEMINI_2_0_FLASH = "gemini-2.0-flash-002",// Specific stable version
1247
+ GEMINI_2_0_FLASH_EXP = "gemini-2.0-flash-exp",// Experimental version
1248
+ GEMINI_1_5_FLASH = "gemini-1.5-flash",
1249
+ GEMINI_1_5_PRO = "gemini-1.5-pro"
1250
+ }
1251
+
1252
+ /**
1253
+ * OpenAI Model Types
1254
+ */
1255
+ declare enum OpenAIModel {
1256
+ WHISPER_1 = "whisper-1"
1257
+ }
1258
+
1259
+ /**
1260
+ * Standard stage/environment constants used across all services
1261
+ */
1262
+ declare const STAGES: {
1263
+ readonly LOCAL: "local";
1264
+ readonly DEV: "dev";
1265
+ readonly STAGING: "staging";
1266
+ readonly PRODUCTION: "production";
1267
+ };
1268
+ type Stage = typeof STAGES[keyof typeof STAGES];
1269
+
1270
+ /**
1271
+ * Generic WebSocket protocol types and utilities
1272
+ * Supports flexible versioning and message types
1273
+ * Used by both client and server implementations
1274
+ */
1275
+
1276
+ /**
1277
+ * Base message structure - completely flexible
1278
+ * @template V - Version type (number, string, etc.)
1279
+ */
1280
+ interface Message<V = number> {
1281
+ v: V;
1282
+ type: string;
1283
+ data?: unknown;
1284
+ }
1285
+ /**
1286
+ * Version serializer interface
1287
+ * Converts between version type V and byte representation
1288
+ */
1289
+ interface VersionSerializer<V> {
1290
+ serialize: (v: V) => number;
1291
+ deserialize: (byte: number) => V;
1292
+ }
1293
+
1294
+ /**
1295
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
1296
+ * Sends audio and control messages, receives responses from server
1297
+ *
1298
+ * Features:
1299
+ * - Generic version type support (number, string, etc.)
1300
+ * - Type-safe upward/downward message data
1301
+ * - Client-side backpressure monitoring
1302
+ * - Abstract hooks for application-specific logic
1303
+ * - Format-agnostic audio protocol (supports any encoding)
1304
+ */
1305
+
1306
+ type ClientConfig = {
1307
+ url: string;
1308
+ highWM?: number;
1309
+ lowWM?: number;
1310
+ };
1311
+ /**
1312
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
1313
+ * that send audio frames and JSON messages
1314
+ *
1315
+ * @template V - Version type (number, string, object, etc.)
1316
+ * @template TUpward - Type of upward message data (Client -> Server)
1317
+ * @template TDownward - Type of downward message data (Server -> Client)
1318
+ *
1319
+ * @example
1320
+ * ```typescript
1321
+ * class MyClient extends WebSocketAudioClient<number, MyUpMsg, MyDownMsg> {
1322
+ * protected onConnected() {
1323
+ * console.log('Connected!');
1324
+ * }
1325
+ *
1326
+ * protected onMessage(msg) {
1327
+ * console.log('Received:', msg.type, msg.data);
1328
+ * }
1329
+ *
1330
+ * protected onDisconnected(code, reason) {
1331
+ * console.log('Disconnected:', code, reason);
1332
+ * }
1333
+ *
1334
+ * protected onError(error) {
1335
+ * console.error('Error:', error);
1336
+ * }
1337
+ * }
1338
+ *
1339
+ * const client = new MyClient({ url: 'ws://localhost:8080' });
1340
+ * client.connect();
1341
+ * client.sendMessage(1, 'configure', { language: 'en' });
1342
+ * client.sendAudio(audioData);
1343
+ * ```
1344
+ */
1345
+ declare abstract class WebSocketAudioClient<V = number, // Version type (default: number)
1346
+ TUpward = unknown, // Upward message data type
1347
+ TDownward = unknown> {
1348
+ private cfg;
1349
+ protected versionSerializer: VersionSerializer<V>;
1350
+ private ws;
1351
+ private seq;
1352
+ private HWM;
1353
+ private LWM;
1354
+ constructor(cfg: ClientConfig, versionSerializer?: VersionSerializer<V>);
1355
+ /**
1356
+ * Hook: Called when WebSocket connection is established
1357
+ */
1358
+ protected abstract onConnected(): void;
1359
+ /**
1360
+ * Hook: Called when WebSocket connection closes
1361
+ * @param code - Close code (see WebSocketCloseCode enum)
1362
+ * @param reason - Human-readable close reason
1363
+ */
1364
+ protected abstract onDisconnected(code: number, reason: string): void;
1365
+ /**
1366
+ * Hook: Called when WebSocket error occurs
1367
+ */
1368
+ protected abstract onError(error: Event): void;
1369
+ /**
1370
+ * Hook: Called when downward message arrives from server
1371
+ * Override this to handle messages (optional - default does nothing)
1372
+ */
1373
+ protected onMessage(_msg: Message<V> & {
1374
+ data: TDownward;
1375
+ }): void;
1376
+ connect(): void;
1377
+ /**
1378
+ * Send JSON message to server
1379
+ * @param version - Message version
1380
+ * @param type - Message type (developer defined)
1381
+ * @param data - Message payload (typed)
1382
+ */
1383
+ sendMessage(version: V, type: string, data: TUpward): void;
1384
+ /**
1385
+ * Send audio frame with specified encoding and sample rate
1386
+ * @param audioData - Audio data (any format: Int16Array, Uint8Array, ArrayBuffer, etc.)
1387
+ * @param version - Audio frame version
1388
+ * @param encodingId - Audio encoding ID (0-5, e.g., AudioEncoding.LINEAR16)
1389
+ * @param sampleRate - Sample rate in Hz (e.g., 16000)
1390
+ */
1391
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView, version: V, encodingId: number, sampleRate: number): void;
1392
+ /**
1393
+ * Get current WebSocket buffer size
1394
+ */
1395
+ getBufferedAmount(): number;
1396
+ /**
1397
+ * Check if local buffer is backpressured
1398
+ */
1399
+ isLocalBackpressured(): boolean;
1400
+ /**
1401
+ * Check if ready to send audio
1402
+ * Verifies: connection open, no local buffer pressure
1403
+ */
1404
+ canSend(): boolean;
1405
+ /**
1406
+ * Check if connection is open
1407
+ */
1408
+ isOpen(): boolean;
1409
+ /**
1410
+ * Get current connection state
1411
+ */
1412
+ getReadyState(): number;
1413
+ /**
1414
+ * Close the WebSocket connection
1415
+ * Protected method for subclasses to implement disconnect logic
1416
+ * @param code - WebSocket close code (default: 1000 = normal closure)
1417
+ * @param reason - Human-readable close reason
1418
+ */
1419
+ protected closeConnection(code?: number, reason?: string): void;
1420
+ }
1421
+
1422
+ /**
1423
+ * Recognition Client Types
1424
+ *
1425
+ * Type definitions and interfaces for the recognition client SDK.
1426
+ * These interfaces enable dependency injection, testing, and alternative implementations.
1427
+ */
1428
+
1429
+ /**
1430
+ * Client connection state enum
1431
+ * Represents the various states a recognition client can be in during its lifecycle
1432
+ */
1433
+ declare enum ClientState {
1434
+ /** Initial state, no connection established */
1435
+ INITIAL = "initial",
1436
+ /** Actively establishing WebSocket connection */
1437
+ CONNECTING = "connecting",
1438
+ /** WebSocket connected but waiting for server ready signal */
1439
+ CONNECTED = "connected",
1440
+ /** Server ready, can send audio */
1441
+ READY = "ready",
1442
+ /** Sent stop signal, waiting for final transcript */
1443
+ STOPPING = "stopping",
1444
+ /** Connection closed normally after stop */
1445
+ STOPPED = "stopped",
1446
+ /** Connection failed or lost unexpectedly */
1447
+ FAILED = "failed"
1448
+ }
1449
+ /**
1450
+ * Callback URL configuration with message type filtering
1451
+ */
1452
+ interface RecognitionCallbackUrl {
1453
+ /** The callback URL endpoint */
1454
+ url: string;
1455
+ /** Array of message types to send to this URL. If empty/undefined, all types are sent */
1456
+ messageTypes?: Array<string | number>;
1457
+ }
1458
+ interface IRecognitionClientConfig {
1459
+ /**
1460
+ * WebSocket endpoint URL (optional)
1461
+ * Either `url` or `stage` must be provided.
1462
+ * If both are provided, `url` takes precedence.
1463
+ *
1464
+ * Example with explicit URL:
1465
+ * ```typescript
1466
+ * { url: 'wss://custom-endpoint.example.com/ws/v1/recognize' }
1467
+ * ```
1468
+ */
1469
+ url?: string;
1470
+ /**
1471
+ * Stage for recognition service (recommended)
1472
+ * Either `url` or `stage` must be provided.
1473
+ * If both are provided, `url` takes precedence.
1474
+ * Defaults to production if neither is provided.
1475
+ *
1476
+ * Example with STAGES enum (recommended):
1477
+ * ```typescript
1478
+ * import { STAGES } from '@recog/shared-types';
1479
+ * { stage: STAGES.STAGING }
1480
+ * ```
1481
+ *
1482
+ * String values also accepted:
1483
+ * ```typescript
1484
+ * { stage: 'staging' } // STAGES.LOCAL | STAGES.DEV | STAGES.STAGING | STAGES.PRODUCTION
1485
+ * ```
1486
+ */
1487
+ stage?: Stage | string;
1488
+ /** ASR configuration (provider, model, language, etc.) - optional */
1489
+ asrRequestConfig?: ASRRequestConfig;
1490
+ /** Game context for improved recognition accuracy */
1491
+ gameContext?: GameContextV1;
1492
+ /** Audio utterance ID (optional) - if not provided, a UUID v4 will be generated */
1493
+ audioUtteranceId?: string;
1494
+ /** Callback URLs for server-side notifications with optional message type filtering (optional)
1495
+ * Game side only need to use it if another service need to be notified about the transcription results.
1496
+ */
1497
+ callbackUrls?: RecognitionCallbackUrl[];
1498
+ /** User identification (optional) */
1499
+ userId?: string;
1500
+ /** Game session identification (optional). called 'sessionId' in Platform and most games. */
1501
+ gameSessionId?: string;
1502
+ /** Device identification (optional) */
1503
+ deviceId?: string;
1504
+ /** Account identification (optional) */
1505
+ accountId?: string;
1506
+ /** Question answer identifier for tracking Q&A sessions (optional and tracking purpose only) */
1507
+ questionAnswerId?: string;
1508
+ /** Platform for audio recording device (optional, e.g., 'ios', 'android', 'web', 'unity') */
1509
+ platform?: string;
1510
+ /** Callback when transcript is received */
1511
+ onTranscript?: (result: TranscriptionResultV1) => void;
1512
+ /**
1513
+ * Callback when function call is received
1514
+ * Note: Not supported in 2025. P2 feature for future speech-to-function-call capability.
1515
+ */
1516
+ onFunctionCall?: (result: FunctionCallResultV1) => void;
1517
+ /** Callback when metadata is received. Only once after transcription is complete.*/
1518
+ onMetadata?: (metadata: MetadataResultV1) => void;
1519
+ /** Callback when error occurs */
1520
+ onError?: (error: ErrorResultV1) => void;
1521
+ /** Callback when connected to WebSocket */
1522
+ onConnected?: () => void;
1523
+ /**
1524
+ * Callback when WebSocket disconnects
1525
+ * @param code - WebSocket close code (1000 = normal, 1006 = abnormal, etc.)
1526
+ * @param reason - Close reason string
1527
+ */
1528
+ onDisconnected?: (code: number, reason: string) => void;
1529
+ /** High water mark for backpressure control (bytes) */
1530
+ highWaterMark?: number;
1531
+ /** Low water mark for backpressure control (bytes) */
1532
+ lowWaterMark?: number;
1533
+ /** Maximum buffer duration in seconds (default: 60s) */
1534
+ maxBufferDurationSec?: number;
1535
+ /** Expected chunks per second for ring buffer sizing (default: 100) */
1536
+ chunksPerSecond?: number;
1537
+ /**
1538
+ * Connection retry configuration (optional)
1539
+ * Only applies to initial connection establishment, not mid-stream interruptions.
1540
+ *
1541
+ * Default: { maxAttempts: 4, delayMs: 200 } (try once, retry 3 times = 4 total attempts)
1542
+ *
1543
+ * Timing: Attempt 1 → FAIL → wait 200ms → Attempt 2 → FAIL → wait 200ms → Attempt 3 → FAIL → wait 200ms → Attempt 4
1544
+ *
1545
+ * Example:
1546
+ * ```typescript
1547
+ * {
1548
+ * connectionRetry: {
1549
+ * maxAttempts: 2, // Try connecting up to 2 times (1 retry)
1550
+ * delayMs: 500 // Wait 500ms between attempts
1551
+ * }
1552
+ * }
1553
+ * ```
1554
+ */
1555
+ connectionRetry?: {
1556
+ /** Maximum number of connection attempts (default: 4, min: 1, max: 5) */
1557
+ maxAttempts?: number;
1558
+ /** Delay in milliseconds between retry attempts (default: 200ms) */
1559
+ delayMs?: number;
1560
+ };
1561
+ /**
1562
+ * Optional logger function for debugging
1563
+ * If not provided, no logging will occur
1564
+ * @param level - Log level: 'debug', 'info', 'warn', 'error'
1565
+ * @param message - Log message
1566
+ * @param data - Optional additional data
1567
+ */
1568
+ logger?: (level: 'debug' | 'info' | 'warn' | 'error', message: string, data?: any) => void;
1569
+ }
1570
+ /**
1571
+ * Recognition Client Interface
1572
+ *
1573
+ * Main interface for real-time speech recognition clients.
1574
+ * Provides methods for connection management, audio streaming, and session control.
1575
+ */
1576
+ interface IRecognitionClient {
1577
+ /**
1578
+ * Connect to the WebSocket endpoint
1579
+ * @returns Promise that resolves when connected
1580
+ * @throws Error if connection fails or times out
1581
+ */
1582
+ connect(): Promise<void>;
1583
+ /**
1584
+ * Send audio data to the recognition service
1585
+ * Audio is buffered locally and sent when connection is ready.
1586
+ * @param audioData - PCM audio data as ArrayBuffer, typed array view, or Blob
1587
+ */
1588
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
1589
+ /**
1590
+ * Stop recording and wait for final transcript
1591
+ * The server will close the connection after sending the final transcript.
1592
+ * @returns Promise that resolves when final transcript is received
1593
+ */
1594
+ stopRecording(): Promise<void>;
1595
+ /**
1596
+ * Force stop and immediately close connection without waiting for server
1597
+ *
1598
+ * WARNING: This is an abnormal shutdown that bypasses the graceful stop flow:
1599
+ * - Does NOT wait for server to process remaining audio
1600
+ * - Does NOT receive final transcript from server
1601
+ * - Immediately closes WebSocket connection
1602
+ * - Cleans up resources (buffers, listeners)
1603
+ *
1604
+ * Use Cases:
1605
+ * - User explicitly cancels/abandons session
1606
+ * - Timeout scenarios where waiting is not acceptable
1607
+ * - Need immediate cleanup and can't wait for server
1608
+ *
1609
+ * RECOMMENDED: Use stopRecording() for normal shutdown.
1610
+ * Only use this when immediate disconnection is required.
1611
+ */
1612
+ stopAbnormally(): void;
1613
+ /**
1614
+ * Get the audio utterance ID for this session
1615
+ * Available immediately after client construction.
1616
+ * @returns UUID v4 string identifying this recognition session
1617
+ */
1618
+ getAudioUtteranceId(): string;
1619
+ /**
1620
+ * Get the current state of the client
1621
+ * @returns Current ClientState value
1622
+ */
1623
+ getState(): ClientState;
1624
+ /**
1625
+ * Check if WebSocket connection is open
1626
+ * @returns true if connected and ready to communicate
1627
+ */
1628
+ isConnected(): boolean;
1629
+ /**
1630
+ * Check if client is currently connecting
1631
+ * @returns true if connection is in progress
1632
+ */
1633
+ isConnecting(): boolean;
1634
+ /**
1635
+ * Check if client is currently stopping
1636
+ * @returns true if stopRecording() is in progress
1637
+ */
1638
+ isStopping(): boolean;
1639
+ /**
1640
+ * Check if transcription has finished
1641
+ * @returns true if the transcription is complete
1642
+ */
1643
+ isTranscriptionFinished(): boolean;
1644
+ /**
1645
+ * Check if the audio buffer has overflowed
1646
+ * @returns true if the ring buffer has wrapped around
1647
+ */
1648
+ isBufferOverflowing(): boolean;
1649
+ /**
1650
+ * Get client statistics
1651
+ * @returns Statistics about audio transmission and buffering
1652
+ */
1653
+ getStats(): IRecognitionClientStats;
1654
+ /**
1655
+ * Get the WebSocket URL being used by this client
1656
+ * Available immediately after client construction.
1657
+ * @returns WebSocket URL string
1658
+ */
1659
+ getUrl(): string;
1660
+ }
1661
+ /**
1662
+ * Client statistics interface
1663
+ */
1664
+ interface IRecognitionClientStats {
1665
+ /** Total audio bytes sent to server */
1666
+ audioBytesSent: number;
1667
+ /** Total number of audio chunks sent */
1668
+ audioChunksSent: number;
1669
+ /** Total number of audio chunks buffered */
1670
+ audioChunksBuffered: number;
1671
+ /** Number of times the ring buffer overflowed */
1672
+ bufferOverflowCount: number;
1673
+ /** Current number of chunks in buffer */
1674
+ currentBufferedChunks: number;
1675
+ /** Whether the ring buffer has wrapped (overwritten old data) */
1676
+ hasWrapped: boolean;
1677
+ }
1678
+ /**
1679
+ * Configuration for RealTimeTwoWayWebSocketRecognitionClient
1680
+ * This extends IRecognitionClientConfig and is the main configuration interface
1681
+ * for creating a new RealTimeTwoWayWebSocketRecognitionClient instance.
1682
+ */
1683
+ interface RealTimeTwoWayWebSocketRecognitionClientConfig extends IRecognitionClientConfig {
1684
+ }
1685
+
1686
+ /**
1687
+ * RealTimeTwoWayWebSocketRecognitionClient - Clean, compact SDK for real-time speech recognition
1688
+ *
1689
+ * Features:
1690
+ * - Ring buffer-based audio storage with fixed memory footprint
1691
+ * - Automatic buffering when disconnected, immediate send when connected
1692
+ * - Buffer persists after flush (for future retry/reconnection scenarios)
1693
+ * - Built on WebSocketAudioClient for robust protocol handling
1694
+ * - Simple API: connect() → sendAudio() → stopRecording()
1695
+ * - Type-safe message handling with callbacks
1696
+ * - Automatic backpressure management
1697
+ * - Overflow detection with buffer state tracking
1698
+ *
1699
+ * Example:
1700
+ * ```typescript
1701
+ * const client = new RealTimeTwoWayWebSocketRecognitionClient({
1702
+ * url: 'ws://localhost:3101/ws/v1/recognize',
1703
+ * onTranscript: (result) => console.log(result.finalTranscript),
1704
+ * onError: (error) => console.error(error),
1705
+ * maxBufferDurationSec: 60 // Ring buffer for 60 seconds
1706
+ * });
1707
+ *
1708
+ * await client.connect();
1709
+ *
1710
+ * // Send audio chunks - always stored in ring buffer, sent if connected
1711
+ * micStream.on('data', (chunk) => client.sendAudio(chunk));
1712
+ *
1713
+ * // Signal end of audio and wait for final results
1714
+ * await client.stopRecording();
1715
+ *
1716
+ * // Server will close connection after sending finals
1717
+ * // No manual cleanup needed - browser handles it
1718
+ * ```
1719
+ */
1720
+
1721
+ /**
1722
+ * Check if a WebSocket close code indicates normal closure
1723
+ * @param code - WebSocket close code
1724
+ * @returns true if the disconnection was normal/expected, false if it was an error
1725
+ */
1726
+ declare function isNormalDisconnection(code: number): boolean;
1727
+ /**
1728
+ * Re-export TranscriptionResultV1 as TranscriptionResult for backward compatibility
1729
+ */
1730
+ type TranscriptionResult = TranscriptionResultV1;
1731
+
1732
+ /**
1733
+ * RealTimeTwoWayWebSocketRecognitionClient - SDK-level client for real-time speech recognition
1734
+ *
1735
+ * Implements IRecognitionClient interface for dependency injection and testing.
1736
+ * Extends WebSocketAudioClient with local audio buffering and simple callback-based API.
1737
+ */
1738
+ declare class RealTimeTwoWayWebSocketRecognitionClient extends WebSocketAudioClient<number, any, any> implements IRecognitionClient {
1739
+ private static readonly PROTOCOL_VERSION;
1740
+ private config;
1741
+ private audioBuffer;
1742
+ private messageHandler;
1743
+ private state;
1744
+ private connectionPromise;
1745
+ private isDebugLogEnabled;
1746
+ private audioBytesSent;
1747
+ private audioChunksSent;
1748
+ private audioStatsLogInterval;
1749
+ private lastAudioStatsLog;
1750
+ constructor(config: RealTimeTwoWayWebSocketRecognitionClientConfig);
1751
+ /**
1752
+ * Internal logging helper - only logs if a logger was provided in config
1753
+ * Debug logs are additionally gated by isDebugLogEnabled flag
1754
+ * @param level - Log level: debug, info, warn, or error
1755
+ * @param message - Message to log
1756
+ * @param data - Optional additional data to log
1757
+ */
1758
+ private log;
1759
+ /**
1760
+ * Clean up internal resources to free memory
1761
+ * Called when connection closes (normally or abnormally)
1762
+ */
1763
+ private cleanup;
1764
+ connect(): Promise<void>;
1765
+ /**
1766
+ * Attempt to connect with retry logic
1767
+ * Only retries on initial connection establishment, not mid-stream interruptions
1768
+ */
1769
+ private connectWithRetry;
1770
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
1771
+ private sendAudioInternal;
1772
+ stopRecording(): Promise<void>;
1773
+ stopAbnormally(): void;
1774
+ getAudioUtteranceId(): string;
1775
+ getUrl(): string;
1776
+ getState(): ClientState;
1777
+ isConnected(): boolean;
1778
+ isConnecting(): boolean;
1779
+ isStopping(): boolean;
1780
+ isTranscriptionFinished(): boolean;
1781
+ isBufferOverflowing(): boolean;
1782
+ getStats(): IRecognitionClientStats;
1783
+ protected onConnected(): void;
1784
+ protected onDisconnected(code: number, reason: string): void;
1785
+ protected onError(error: Event): void;
1786
+ protected onMessage(msg: {
1787
+ v: number;
1788
+ type: string;
1789
+ data: any;
1790
+ }): void;
1791
+ /**
1792
+ * Handle control messages from server
1793
+ * @param msg - Control message containing server actions
1794
+ */
1795
+ private handleControlMessage;
1796
+ /**
1797
+ * Send audio immediately to the server (without buffering)
1798
+ * @param audioData - Audio data to send
1799
+ */
1800
+ private sendAudioNow;
1801
+ }
1802
+
1803
+ /**
1804
+ * Configuration Builder for Recognition Client
1805
+ *
1806
+ * Simple builder pattern for RealTimeTwoWayWebSocketRecognitionClientConfig
1807
+ */
1808
+
1809
+ /**
1810
+ * Builder for RealTimeTwoWayWebSocketRecognitionClientConfig
1811
+ *
1812
+ * Provides a fluent API for building client configurations.
1813
+ *
1814
+ * Example:
1815
+ * ```typescript
1816
+ * import { STAGES } from '@recog/shared-types';
1817
+ *
1818
+ * const config = new ConfigBuilder()
1819
+ * .stage(STAGES.STAGING) // Recommended: automatic environment selection
1820
+ * .asrRequestConfig({
1821
+ * provider: RecognitionProvider.DEEPGRAM,
1822
+ * model: 'nova-2-general'
1823
+ * })
1824
+ * .onTranscript((result) => console.log(result))
1825
+ * .build();
1826
+ * ```
1827
+ */
1828
+ declare class ConfigBuilder {
1829
+ private config;
1830
+ /**
1831
+ * Set the WebSocket URL (advanced usage)
1832
+ * For standard environments, use stage() instead
1833
+ */
1834
+ url(url: string): this;
1835
+ /**
1836
+ * Set the stage for automatic environment selection (recommended)
1837
+ * @param stage - STAGES.LOCAL | STAGES.DEV | STAGES.STAGING | STAGES.PRODUCTION
1838
+ * @example
1839
+ * ```typescript
1840
+ * import { STAGES } from '@recog/shared-types';
1841
+ * builder.stage(STAGES.STAGING)
1842
+ * ```
1843
+ */
1844
+ stage(stage: Stage | string): this;
1845
+ /**
1846
+ * Set ASR request configuration
1847
+ */
1848
+ asrRequestConfig(config: ASRRequestConfig): this;
1849
+ /**
1850
+ * Set game context
1851
+ */
1852
+ gameContext(context: GameContextV1): this;
1853
+ /**
1854
+ * Set audio utterance ID
1855
+ */
1856
+ audioUtteranceId(id: string): this;
1857
+ /**
1858
+ * Set callback URLs
1859
+ */
1860
+ callbackUrls(urls: RecognitionCallbackUrl[]): this;
1861
+ /**
1862
+ * Set user ID
1863
+ */
1864
+ userId(id: string): this;
1865
+ /**
1866
+ * Set game session ID
1867
+ */
1868
+ gameSessionId(id: string): this;
1869
+ /**
1870
+ * Set device ID
1871
+ */
1872
+ deviceId(id: string): this;
1873
+ /**
1874
+ * Set account ID
1875
+ */
1876
+ accountId(id: string): this;
1877
+ /**
1878
+ * Set question answer ID
1879
+ */
1880
+ questionAnswerId(id: string): this;
1881
+ /**
1882
+ * Set platform
1883
+ */
1884
+ platform(platform: string): this;
1885
+ /**
1886
+ * Set transcript callback
1887
+ */
1888
+ onTranscript(callback: (result: TranscriptionResultV1) => void): this;
1889
+ /**
1890
+ * Set metadata callback
1891
+ */
1892
+ onMetadata(callback: (metadata: MetadataResultV1) => void): this;
1893
+ /**
1894
+ * Set error callback
1895
+ */
1896
+ onError(callback: (error: ErrorResultV1) => void): this;
1897
+ /**
1898
+ * Set connected callback
1899
+ */
1900
+ onConnected(callback: () => void): this;
1901
+ /**
1902
+ * Set disconnected callback
1903
+ */
1904
+ onDisconnected(callback: (code: number, reason: string) => void): this;
1905
+ /**
1906
+ * Set high water mark
1907
+ */
1908
+ highWaterMark(bytes: number): this;
1909
+ /**
1910
+ * Set low water mark
1911
+ */
1912
+ lowWaterMark(bytes: number): this;
1913
+ /**
1914
+ * Set max buffer duration in seconds
1915
+ */
1916
+ maxBufferDurationSec(seconds: number): this;
1917
+ /**
1918
+ * Set chunks per second
1919
+ */
1920
+ chunksPerSecond(chunks: number): this;
1921
+ /**
1922
+ * Set logger function
1923
+ */
1924
+ logger(logger: (level: 'debug' | 'info' | 'warn' | 'error', message: string, data?: any) => void): this;
1925
+ /**
1926
+ * Build the configuration
1927
+ */
1928
+ build(): RealTimeTwoWayWebSocketRecognitionClientConfig;
1929
+ }
1930
+
1931
+ /**
1932
+ * Factory function for creating Recognition Client instances
1933
+ */
1934
+
1935
+ /**
1936
+ * Create a recognition client from a configuration object
1937
+ *
1938
+ * Example:
1939
+ * ```typescript
1940
+ * const client = createClient({
1941
+ * url: 'ws://localhost:3101/ws/v1/recognize',
1942
+ * audioUtteranceId: 'unique-id',
1943
+ * onTranscript: (result) => console.log(result)
1944
+ * });
1945
+ * ```
1946
+ *
1947
+ * @param config - Client configuration
1948
+ * @returns Configured recognition client instance
1949
+ */
1950
+ declare function createClient(config: RealTimeTwoWayWebSocketRecognitionClientConfig): IRecognitionClient;
1951
+ /**
1952
+ * Create a recognition client using the builder pattern
1953
+ *
1954
+ * Example:
1955
+ * ```typescript
1956
+ * const client = createClientWithBuilder((builder) =>
1957
+ * builder
1958
+ * .url('ws://localhost:3101/ws/v1/recognize')
1959
+ * .onTranscript((result) => console.log(result))
1960
+ * .onError((error) => console.error(error))
1961
+ * );
1962
+ * ```
1963
+ */
1964
+ declare function createClientWithBuilder(configure: (builder: ConfigBuilder) => ConfigBuilder): IRecognitionClient;
1965
+
1966
+ /**
1967
+ * SDK Error Classes
1968
+ *
1969
+ * Typed error classes that extend native Error with recognition-specific metadata
1970
+ */
1971
+
1972
+ /**
1973
+ * Base class for all recognition SDK errors
1974
+ */
1975
+ declare class RecognitionError extends Error {
1976
+ readonly errorType: ErrorTypeV1;
1977
+ readonly timestamp: number;
1978
+ constructor(errorType: ErrorTypeV1, message: string);
1979
+ }
1980
+ /**
1981
+ * Connection error - thrown when WebSocket connection fails after all retry attempts
1982
+ */
1983
+ declare class ConnectionError extends RecognitionError {
1984
+ readonly attempts: number;
1985
+ readonly url: string;
1986
+ readonly underlyingError?: Error;
1987
+ constructor(message: string, attempts: number, url: string, underlyingError?: Error);
1988
+ }
1989
+ /**
1990
+ * Timeout error - thrown when operations exceed timeout limits
1991
+ */
1992
+ declare class TimeoutError extends RecognitionError {
1993
+ readonly timeoutMs: number;
1994
+ readonly operation: string;
1995
+ constructor(message: string, timeoutMs: number, operation: string);
1996
+ }
1997
+ /**
1998
+ * Validation error - thrown when invalid configuration or input is provided
1999
+ */
2000
+ declare class ValidationError extends RecognitionError {
2001
+ readonly field?: string;
2002
+ readonly expected?: string;
2003
+ readonly received?: string;
2004
+ constructor(message: string, field?: string, expected?: string, received?: string);
2005
+ }
2006
+
2007
+ /**
2008
+ * VGF-style state schema for game-side recognition state/results management.
2009
+ *
2010
+ * This schema provides a standardized way for game developers to manage
2011
+ * voice recognition state and results in their applications. It supports:
2012
+ *
2013
+ * STEP 1: Basic transcription flow
2014
+ * STEP 2: Mic auto-stop upon correct answer (using partial transcripts)
2015
+ * STEP 3: Semantic/function-call outcomes for game actions
2016
+ *
2017
+ * Ideally this should be part of a more centralized shared type library to free
2018
+ * game developers and provide helper functions (VGF? Platform SDK?).
2019
+ */
2020
+ declare const RecognitionVGFStateSchema: z.ZodObject<{
2021
+ audioUtteranceId: z.ZodString;
2022
+ startRecordingStatus: z.ZodOptional<z.ZodString>;
2023
+ transcriptionStatus: z.ZodOptional<z.ZodString>;
2024
+ finalTranscript: z.ZodOptional<z.ZodString>;
2025
+ finalConfidence: z.ZodOptional<z.ZodNumber>;
2026
+ asrConfig: z.ZodOptional<z.ZodString>;
2027
+ startRecordingTimestamp: z.ZodOptional<z.ZodString>;
2028
+ finalRecordingTimestamp: z.ZodOptional<z.ZodString>;
2029
+ finalTranscriptionTimestamp: z.ZodOptional<z.ZodString>;
2030
+ pendingTranscript: z.ZodDefault<z.ZodOptional<z.ZodString>>;
2031
+ pendingConfidence: z.ZodOptional<z.ZodNumber>;
2032
+ functionCallMetadata: z.ZodOptional<z.ZodString>;
2033
+ functionCallConfidence: z.ZodOptional<z.ZodNumber>;
2034
+ finalFunctionCallTimestamp: z.ZodOptional<z.ZodString>;
2035
+ promptSlotMap: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodArray<z.ZodString, "many">>>;
2036
+ }, "strip", z.ZodTypeAny, {
2037
+ audioUtteranceId: string;
2038
+ pendingTranscript: string;
2039
+ startRecordingStatus?: string | undefined;
2040
+ transcriptionStatus?: string | undefined;
2041
+ finalTranscript?: string | undefined;
2042
+ finalConfidence?: number | undefined;
2043
+ asrConfig?: string | undefined;
2044
+ startRecordingTimestamp?: string | undefined;
2045
+ finalRecordingTimestamp?: string | undefined;
2046
+ finalTranscriptionTimestamp?: string | undefined;
2047
+ pendingConfidence?: number | undefined;
2048
+ functionCallMetadata?: string | undefined;
2049
+ functionCallConfidence?: number | undefined;
2050
+ finalFunctionCallTimestamp?: string | undefined;
2051
+ promptSlotMap?: Record<string, string[]> | undefined;
2052
+ }, {
2053
+ audioUtteranceId: string;
2054
+ startRecordingStatus?: string | undefined;
2055
+ transcriptionStatus?: string | undefined;
2056
+ finalTranscript?: string | undefined;
2057
+ finalConfidence?: number | undefined;
2058
+ asrConfig?: string | undefined;
2059
+ startRecordingTimestamp?: string | undefined;
2060
+ finalRecordingTimestamp?: string | undefined;
2061
+ finalTranscriptionTimestamp?: string | undefined;
2062
+ pendingTranscript?: string | undefined;
2063
+ pendingConfidence?: number | undefined;
2064
+ functionCallMetadata?: string | undefined;
2065
+ functionCallConfidence?: number | undefined;
2066
+ finalFunctionCallTimestamp?: string | undefined;
2067
+ promptSlotMap?: Record<string, string[]> | undefined;
2068
+ }>;
2069
+ type RecognitionState = z.infer<typeof RecognitionVGFStateSchema>;
2070
+ declare const RecordingStatus: {
2071
+ readonly NOT_READY: "NOT_READY";
2072
+ readonly READY: "READY";
2073
+ readonly RECORDING: "RECORDING";
2074
+ readonly FINISHED: "FINISHED";
2075
+ };
2076
+ type RecordingStatusType = typeof RecordingStatus[keyof typeof RecordingStatus];
2077
+ declare const TranscriptionStatus: {
2078
+ readonly NOT_STARTED: "NOT_STARTED";
2079
+ readonly IN_PROGRESS: "IN_PROGRESS";
2080
+ readonly FINALIZED: "FINALIZED";
2081
+ readonly ABORTED: "ABORTED";
2082
+ readonly ERROR: "ERROR";
2083
+ };
2084
+ type TranscriptionStatusType = typeof TranscriptionStatus[keyof typeof TranscriptionStatus];
2085
+ declare function createInitialRecognitionState(audioUtteranceId: string): RecognitionState;
2086
+ declare function isValidRecordingStatusTransition(from: string | undefined, to: string): boolean;
2087
+
2088
+ /**
2089
+ * Simplified VGF Recognition Client
2090
+ *
2091
+ * A thin wrapper around RealTimeTwoWayWebSocketRecognitionClient that maintains
2092
+ * a VGF RecognitionState as a pure sink/output of recognition events.
2093
+ *
2094
+ * The VGF state is updated based on events but never influences client behavior.
2095
+ * All functionality is delegated to the underlying client.
2096
+ */
2097
+
2098
+ /**
2099
+ * Configuration for SimplifiedVGFRecognitionClient
2100
+ */
2101
+ interface SimplifiedVGFClientConfig extends IRecognitionClientConfig {
2102
+ /**
2103
+ * Callback invoked whenever the VGF state changes
2104
+ * Use this to update your UI or React state
2105
+ */
2106
+ onStateChange?: (state: RecognitionState) => void;
2107
+ /**
2108
+ * Optional initial state to restore from a previous session
2109
+ * If provided, audioUtteranceId will be extracted and used
2110
+ */
2111
+ initialState?: RecognitionState;
2112
+ }
2113
+ /**
2114
+ * Interface for SimplifiedVGFRecognitionClient
2115
+ *
2116
+ * A simplified client that maintains VGF state for game developers.
2117
+ * All methods from the underlying client are available, plus VGF state management.
2118
+ */
2119
+ interface ISimplifiedVGFRecognitionClient {
2120
+ /**
2121
+ * Connect to the recognition service WebSocket
2122
+ * @returns Promise that resolves when connected and ready
2123
+ */
2124
+ connect(): Promise<void>;
2125
+ /**
2126
+ * Send audio data for transcription
2127
+ * @param audioData - PCM audio data as ArrayBuffer, typed array, or Blob
2128
+ */
2129
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
2130
+ /**
2131
+ * Stop recording and wait for final transcription
2132
+ * @returns Promise that resolves when transcription is complete
2133
+ */
2134
+ stopRecording(): Promise<void>;
2135
+ /**
2136
+ * Force stop and immediately close connection without waiting for server
2137
+ *
2138
+ * WARNING: This is an abnormal shutdown that bypasses the graceful stop flow:
2139
+ * - Does NOT wait for server to process remaining audio
2140
+ * - Does NOT receive final transcript from server (VGF state set to empty)
2141
+ * - Immediately closes WebSocket connection
2142
+ * - Cleans up resources (buffers, listeners)
2143
+ *
2144
+ * Use Cases:
2145
+ * - User explicitly cancels/abandons the session
2146
+ * - Timeout scenarios where waiting is not acceptable
2147
+ * - Need immediate cleanup and can't wait for server
2148
+ *
2149
+ * RECOMMENDED: Use stopRecording() for normal shutdown.
2150
+ * Only use this when immediate disconnection is required.
2151
+ */
2152
+ stopAbnormally(): void;
2153
+ /**
2154
+ * Get the current VGF recognition state
2155
+ * @returns Current RecognitionState with all transcription data
2156
+ */
2157
+ getVGFState(): RecognitionState;
2158
+ /**
2159
+ * Check if connected to the WebSocket
2160
+ */
2161
+ isConnected(): boolean;
2162
+ /**
2163
+ * Check if currently connecting
2164
+ */
2165
+ isConnecting(): boolean;
2166
+ /**
2167
+ * Check if currently stopping
2168
+ */
2169
+ isStopping(): boolean;
2170
+ /**
2171
+ * Check if transcription has finished
2172
+ */
2173
+ isTranscriptionFinished(): boolean;
2174
+ /**
2175
+ * Check if the audio buffer has overflowed
2176
+ */
2177
+ isBufferOverflowing(): boolean;
2178
+ /**
2179
+ * Get the audio utterance ID for this session
2180
+ */
2181
+ getAudioUtteranceId(): string;
2182
+ /**
2183
+ * Get the WebSocket URL being used
2184
+ */
2185
+ getUrl(): string;
2186
+ /**
2187
+ * Get the underlying client state (for advanced usage)
2188
+ */
2189
+ getState(): ClientState;
2190
+ }
2191
+ /**
2192
+ * This wrapper ONLY maintains VGF state as a sink.
2193
+ * All actual functionality is delegated to the underlying client.
2194
+ */
2195
+ declare class SimplifiedVGFRecognitionClient implements ISimplifiedVGFRecognitionClient {
2196
+ private client;
2197
+ private state;
2198
+ private isRecordingAudio;
2199
+ private stateChangeCallback;
2200
+ constructor(config: SimplifiedVGFClientConfig);
2201
+ connect(): Promise<void>;
2202
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
2203
+ stopRecording(): Promise<void>;
2204
+ stopAbnormally(): void;
2205
+ getAudioUtteranceId(): string;
2206
+ getUrl(): string;
2207
+ getState(): ClientState;
2208
+ isConnected(): boolean;
2209
+ isConnecting(): boolean;
2210
+ isStopping(): boolean;
2211
+ isTranscriptionFinished(): boolean;
2212
+ isBufferOverflowing(): boolean;
2213
+ getVGFState(): RecognitionState;
2214
+ private notifyStateChange;
2215
+ }
2216
+ /**
2217
+ * Factory function for creating simplified client
2218
+ * Usage examples:
2219
+ *
2220
+ * // Basic usage
2221
+ * const client = createSimplifiedVGFClient({
2222
+ * asrRequestConfig: { provider: 'deepgram', language: 'en' },
2223
+ * onStateChange: (state) => {
2224
+ * console.log('VGF State updated:', state);
2225
+ * // Update React state, game UI, etc.
2226
+ * }
2227
+ * });
2228
+ *
2229
+ * // With initial state (e.g., restoring from previous session)
2230
+ * const client = createSimplifiedVGFClient({
2231
+ * asrRequestConfig: { provider: 'deepgram', language: 'en' },
2232
+ * initialState: previousState, // Will use audioUtteranceId from state
2233
+ * onStateChange: (state) => setVGFState(state)
2234
+ * });
2235
+ *
2236
+ * // With initial state containing promptSlotMap for enhanced recognition
2237
+ * const stateWithSlots: RecognitionState = {
2238
+ * audioUtteranceId: 'session-123',
2239
+ * promptSlotMap: {
2240
+ * 'song_title': ['one time', 'baby'],
2241
+ * 'artists': ['justin bieber']
2242
+ * }
2243
+ * };
2244
+ * const client = createSimplifiedVGFClient({
2245
+ * asrRequestConfig: { provider: 'deepgram', language: 'en' },
2246
+ * gameContext: {
2247
+ * type: RecognitionContextTypeV1.GAME_CONTEXT,
2248
+ * gameId: 'music-quiz', // Your game's ID
2249
+ * gamePhase: 'song-guessing' // Current game phase
2250
+ * },
2251
+ * initialState: stateWithSlots, // promptSlotMap will be added to gameContext
2252
+ * onStateChange: (state) => setVGFState(state)
2253
+ * });
2254
+ *
2255
+ * await client.connect();
2256
+ * client.sendAudio(audioData);
2257
+ * // VGF state automatically updates based on transcription results
2258
+ */
2259
+ declare function createSimplifiedVGFClient(config: SimplifiedVGFClientConfig): ISimplifiedVGFRecognitionClient;
2260
+
2261
+ /**
2262
+ * Base URL schema shared across service endpoint helpers.
2263
+ */
2264
+ type ServiceBaseUrls = {
2265
+ httpBase: string;
2266
+ wsBase: string;
2267
+ };
2268
+ /**
2269
+ * Base URL mappings keyed by stage.
2270
+ */
2271
+ declare const RECOGNITION_SERVICE_BASES: Record<Stage, ServiceBaseUrls>;
2272
+ declare const RECOGNITION_CONDUCTOR_BASES: Record<Stage, ServiceBaseUrls>;
2273
+ /**
2274
+ * Normalize arbitrary stage input into a known `Stage`, defaulting to `local`.
2275
+ */
2276
+ declare function normalizeStage(input?: Stage | string | null | undefined): Stage;
2277
+ /**
2278
+ * Resolve the recognition-service base URLs for a given stage.
2279
+ */
2280
+ declare function getRecognitionServiceBase(stage?: Stage | string | null | undefined): ServiceBaseUrls;
2281
+ /**
2282
+ * Convenience helper for retrieving the HTTP base URL.
2283
+ */
2284
+ declare function getRecognitionServiceHttpBase(stage?: Stage | string | null | undefined): string;
2285
+ /**
2286
+ * Convenience helper for retrieving the WebSocket base URL.
2287
+ */
2288
+ declare function getRecognitionServiceWsBase(stage?: Stage | string | null | undefined): string;
2289
+ /**
2290
+ * Expose hostname lookup separately for callers that need raw host strings.
2291
+ */
2292
+ declare function getRecognitionServiceHost(stage?: Stage | string | null | undefined): string;
2293
+ /**
2294
+ * Resolve the recognition-conductor base URLs for a given stage.
2295
+ */
2296
+ declare function getRecognitionConductorBase(stage?: Stage | string | null | undefined): ServiceBaseUrls;
2297
+ declare function getRecognitionConductorHttpBase(stage?: Stage | string | null | undefined): string;
2298
+ declare function getRecognitionConductorWsBase(stage?: Stage | string | null | undefined): string;
2299
+ declare function getRecognitionConductorHost(stage?: Stage | string | null | undefined): string;
2300
+
2301
+ export { AudioEncoding, ClientControlActionV1, ClientState, ConfigBuilder, ConnectionError, ControlSignalTypeV1 as ControlSignal, ControlSignalTypeV1, DeepgramModel, ErrorTypeV1, FinalTranscriptStability, GeminiModel, GoogleModel, Language, OpenAIModel, RECOGNITION_CONDUCTOR_BASES, RECOGNITION_SERVICE_BASES, RealTimeTwoWayWebSocketRecognitionClient, RecognitionContextTypeV1, RecognitionError, RecognitionProvider, RecognitionResultTypeV1, RecognitionVGFStateSchema, RecordingStatus, STAGES, SampleRate, SimplifiedVGFRecognitionClient, TimeoutError, TranscriptionStatus, ValidationError, createClient, createClientWithBuilder, createDefaultASRConfig, createInitialRecognitionState, createSimplifiedVGFClient, getRecognitionConductorBase, getRecognitionConductorHost, getRecognitionConductorHttpBase, getRecognitionConductorWsBase, getRecognitionServiceBase, getRecognitionServiceHost, getRecognitionServiceHttpBase, getRecognitionServiceWsBase, getUserFriendlyMessage, isExceptionImmediatelyAvailable, isNormalDisconnection, isValidRecordingStatusTransition, normalizeStage };
2302
+ export type { ASRRequestConfig, ASRRequestV1, AuthenticationException, ConnectionException, ErrorResultV1, FunctionCallResultV1, GameContextV1, IRecognitionClient, IRecognitionClientConfig, IRecognitionClientStats, ISimplifiedVGFRecognitionClient, MetadataResultV1, ProviderException, QuotaExceededException, RealTimeTwoWayWebSocketRecognitionClientConfig, RecognitionCallbackUrl, RecognitionException, RecognitionState, RecordingStatusType, SimplifiedVGFClientConfig, SlotMap, Stage, TimeoutException, TranscriptionResult, TranscriptionResultV1, TranscriptionStatusType, UnknownException, ValidationException };