@volley/recognition-client-sdk 0.1.296 → 0.1.297

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1125 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Provider types and enums for recognition services
5
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
6
+ */
7
+ /**
8
+ * Supported speech recognition providers
9
+ */
10
+ declare enum RecognitionProvider {
11
+ ASSEMBLYAI = "assemblyai",
12
+ DEEPGRAM = "deepgram",
13
+ GOOGLE = "google",
14
+ GEMINI_BATCH = "gemini-batch",
15
+ OPENAI_BATCH = "openai-batch"
16
+ }
17
+ /**
18
+ * ASR API type - distinguishes between streaming and file-based transcription APIs
19
+ * - STREAMING: Real-time streaming APIs (Deepgram, AssemblyAI, Google)
20
+ * - FILE_BASED: File upload/batch APIs (OpenAI Batch, Gemini Batch)
21
+ */
22
+ declare enum ASRApiType {
23
+ STREAMING = "streaming",
24
+ FILE_BASED = "file-based"
25
+ }
26
+ /**
27
+ * Deepgram model names
28
+ */
29
+ declare enum DeepgramModel {
30
+ NOVA_2 = "nova-2",
31
+ NOVA_3 = "nova-3",
32
+ FLUX_GENERAL_EN = "flux-general-en"
33
+ }
34
+ /**
35
+ * Google Cloud Speech models
36
+ * @see https://cloud.google.com/speech-to-text/docs/transcription-model
37
+ */
38
+ declare enum GoogleModel {
39
+ LATEST_LONG = "latest_long",
40
+ LATEST_SHORT = "latest_short",
41
+ TELEPHONY = "telephony",
42
+ TELEPHONY_SHORT = "telephony_short",
43
+ MEDICAL_DICTATION = "medical_dictation",
44
+ MEDICAL_CONVERSATION = "medical_conversation",
45
+ DEFAULT = "default",
46
+ COMMAND_AND_SEARCH = "command_and_search",
47
+ PHONE_CALL = "phone_call",
48
+ VIDEO = "video"
49
+ }
50
+ /**
51
+ * Type alias for any model from any provider
52
+ */
53
+ type RecognitionModel = DeepgramModel | GoogleModel | string;
54
+
55
+ /**
56
+ * Audio encoding types
57
+ */
58
+ declare enum AudioEncoding {
59
+ ENCODING_UNSPECIFIED = 0,
60
+ LINEAR16 = 1,
61
+ OGG_OPUS = 2,
62
+ FLAC = 3,
63
+ MULAW = 4,
64
+ ALAW = 5
65
+ }
66
+ declare namespace AudioEncoding {
67
+ /**
68
+ * Convert numeric ID to AudioEncoding enum
69
+ * @param id - Numeric encoding identifier (0-5)
70
+ * @returns AudioEncoding enum value or undefined if invalid
71
+ */
72
+ function fromId(id: number): AudioEncoding | undefined;
73
+ /**
74
+ * Convert string name to AudioEncoding enum
75
+ * @param nameStr - String name like "linear16", "LINEAR16", "ogg_opus", "OGG_OPUS", etc. (case insensitive)
76
+ * @returns AudioEncoding enum value or undefined if invalid
77
+ */
78
+ function fromName(nameStr: string): AudioEncoding | undefined;
79
+ /**
80
+ * Convert AudioEncoding enum to numeric ID
81
+ * @param encoding - AudioEncoding enum value
82
+ * @returns Numeric ID (0-5)
83
+ */
84
+ function toId(encoding: AudioEncoding): number;
85
+ /**
86
+ * Convert AudioEncoding enum to string name
87
+ * @param encoding - AudioEncoding enum value
88
+ * @returns String name like "LINEAR16", "MULAW", etc.
89
+ */
90
+ function toName(encoding: AudioEncoding): string;
91
+ /**
92
+ * Check if a numeric ID is a valid encoding
93
+ * @param id - Numeric identifier to validate
94
+ * @returns true if valid encoding ID
95
+ */
96
+ function isIdValid(id: number): boolean;
97
+ /**
98
+ * Check if a string name is a valid encoding
99
+ * @param nameStr - String name to validate
100
+ * @returns true if valid encoding name
101
+ */
102
+ function isNameValid(nameStr: string): boolean;
103
+ }
104
+ /**
105
+ * Common sample rates (in Hz)
106
+ */
107
+ declare enum SampleRate {
108
+ RATE_8000 = 8000,
109
+ RATE_16000 = 16000,
110
+ RATE_22050 = 22050,
111
+ RATE_24000 = 24000,
112
+ RATE_32000 = 32000,
113
+ RATE_44100 = 44100,
114
+ RATE_48000 = 48000
115
+ }
116
+ declare namespace SampleRate {
117
+ /**
118
+ * Convert Hz value to SampleRate enum
119
+ * @param hz - Sample rate in Hz (8000, 16000, etc.)
120
+ * @returns SampleRate enum value or undefined if invalid
121
+ */
122
+ function fromHz(hz: number): SampleRate | undefined;
123
+ /**
124
+ * Convert string name to SampleRate enum
125
+ * @param nameStr - String name like "rate_8000", "RATE_16000", etc. (case insensitive)
126
+ * @returns SampleRate enum value or undefined if invalid
127
+ */
128
+ function fromName(nameStr: string): SampleRate | undefined;
129
+ /**
130
+ * Convert SampleRate enum to Hz value
131
+ * @param rate - SampleRate enum value
132
+ * @returns Hz value (8000, 16000, etc.)
133
+ */
134
+ function toHz(rate: SampleRate): number;
135
+ /**
136
+ * Convert SampleRate enum to string name
137
+ * @param rate - SampleRate enum value
138
+ * @returns String name like "RATE_8000", "RATE_16000", etc.
139
+ */
140
+ function toName(rate: SampleRate): string;
141
+ /**
142
+ * Check if a numeric Hz value is a valid sample rate
143
+ * @param hz - Hz value to validate
144
+ * @returns true if valid sample rate
145
+ */
146
+ function isHzValid(hz: number): boolean;
147
+ /**
148
+ * Check if a string name is a valid sample rate
149
+ * @param nameStr - String name to validate
150
+ * @returns true if valid sample rate name
151
+ */
152
+ function isNameValid(nameStr: string): boolean;
153
+ }
154
+ /**
155
+ * Supported languages for recognition
156
+ * Using BCP-47 language tags
157
+ */
158
+ declare enum Language {
159
+ ENGLISH_US = "en-US",
160
+ ENGLISH_GB = "en-GB",
161
+ SPANISH_ES = "es-ES",
162
+ SPANISH_MX = "es-MX",
163
+ FRENCH_FR = "fr-FR",
164
+ GERMAN_DE = "de-DE",
165
+ ITALIAN_IT = "it-IT",
166
+ PORTUGUESE_BR = "pt-BR",
167
+ JAPANESE_JP = "ja-JP",
168
+ KOREAN_KR = "ko-KR",
169
+ CHINESE_CN = "zh-CN",
170
+ CHINESE_TW = "zh-TW"
171
+ }
172
+
173
+ /**
174
+ * Recognition Result Types V1
175
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
176
+ * Types and schemas for recognition results sent to SDK clients
177
+ */
178
+
179
+ /**
180
+ * Message type discriminator for recognition results V1
181
+ */
182
+ declare enum RecognitionResultTypeV1 {
183
+ TRANSCRIPTION = "Transcription",// Transcript message contains all in the history. result of STT(Speech to text)
184
+ FUNCTION_CALL = "FunctionCall",// Not supported in P1.result of STF(Speedch to function call) Function call schema
185
+ METADATA = "Metadata",// Metadata message contains all the timestamps, provider info, and ASR config
186
+ ERROR = "Error",// Error message contains the error details
187
+ CLIENT_CONTROL_MESSAGE = "ClientControlMessage"
188
+ }
189
+ /**
190
+ * Transcription result V1 - contains transcript message
191
+ * In the long run game side should not need to know it. In the short run it is send back to client.
192
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
193
+ */
194
+ declare const TranscriptionResultSchemaV1: z.ZodObject<{
195
+ type: z.ZodLiteral<RecognitionResultTypeV1.TRANSCRIPTION>;
196
+ audioUtteranceId: z.ZodString;
197
+ finalTranscript: z.ZodString;
198
+ finalTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
199
+ pendingTranscript: z.ZodOptional<z.ZodString>;
200
+ pendingTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
201
+ is_finished: z.ZodBoolean;
202
+ voiceStart: z.ZodOptional<z.ZodNumber>;
203
+ voiceDuration: z.ZodOptional<z.ZodNumber>;
204
+ voiceEnd: z.ZodOptional<z.ZodNumber>;
205
+ startTimestamp: z.ZodOptional<z.ZodNumber>;
206
+ endTimestamp: z.ZodOptional<z.ZodNumber>;
207
+ receivedAtMs: z.ZodOptional<z.ZodNumber>;
208
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
209
+ }, "strip", z.ZodTypeAny, {
210
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
211
+ audioUtteranceId: string;
212
+ finalTranscript: string;
213
+ is_finished: boolean;
214
+ finalTranscriptConfidence?: number | undefined;
215
+ pendingTranscript?: string | undefined;
216
+ pendingTranscriptConfidence?: number | undefined;
217
+ voiceStart?: number | undefined;
218
+ voiceDuration?: number | undefined;
219
+ voiceEnd?: number | undefined;
220
+ startTimestamp?: number | undefined;
221
+ endTimestamp?: number | undefined;
222
+ receivedAtMs?: number | undefined;
223
+ accumulatedAudioTimeMs?: number | undefined;
224
+ }, {
225
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
226
+ audioUtteranceId: string;
227
+ finalTranscript: string;
228
+ is_finished: boolean;
229
+ finalTranscriptConfidence?: number | undefined;
230
+ pendingTranscript?: string | undefined;
231
+ pendingTranscriptConfidence?: number | undefined;
232
+ voiceStart?: number | undefined;
233
+ voiceDuration?: number | undefined;
234
+ voiceEnd?: number | undefined;
235
+ startTimestamp?: number | undefined;
236
+ endTimestamp?: number | undefined;
237
+ receivedAtMs?: number | undefined;
238
+ accumulatedAudioTimeMs?: number | undefined;
239
+ }>;
240
+ type TranscriptionResultV1 = z.infer<typeof TranscriptionResultSchemaV1>;
241
+ /**
242
+ * Function call result V1 - similar to LLM function call
243
+ * In the long run game server should know it, rather than TV or client.
244
+ */
245
+ declare const FunctionCallResultSchemaV1: z.ZodObject<{
246
+ type: z.ZodLiteral<RecognitionResultTypeV1.FUNCTION_CALL>;
247
+ audioUtteranceId: z.ZodString;
248
+ functionName: z.ZodString;
249
+ functionArgJson: z.ZodString;
250
+ }, "strip", z.ZodTypeAny, {
251
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
252
+ audioUtteranceId: string;
253
+ functionName: string;
254
+ functionArgJson: string;
255
+ }, {
256
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
257
+ audioUtteranceId: string;
258
+ functionName: string;
259
+ functionArgJson: string;
260
+ }>;
261
+ type FunctionCallResultV1 = z.infer<typeof FunctionCallResultSchemaV1>;
262
+ /**
263
+ * Metadata result V1 - contains metadata, timing information, and ASR config
264
+ * Sent when the provider connection closes to provide final timing metrics and config
265
+ * In the long run game server should know it, rather than TV or client.
266
+ */
267
+ declare const MetadataResultSchemaV1: z.ZodObject<{
268
+ type: z.ZodLiteral<RecognitionResultTypeV1.METADATA>;
269
+ audioUtteranceId: z.ZodString;
270
+ recordingStartMs: z.ZodOptional<z.ZodNumber>;
271
+ recordingEndMs: z.ZodOptional<z.ZodNumber>;
272
+ transcriptEndMs: z.ZodOptional<z.ZodNumber>;
273
+ socketCloseAtMs: z.ZodOptional<z.ZodNumber>;
274
+ duration: z.ZodOptional<z.ZodNumber>;
275
+ volume: z.ZodOptional<z.ZodNumber>;
276
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
277
+ costInUSD: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
278
+ apiType: z.ZodOptional<z.ZodNativeEnum<typeof ASRApiType>>;
279
+ asrConfig: z.ZodOptional<z.ZodString>;
280
+ rawAsrMetadata: z.ZodOptional<z.ZodString>;
281
+ }, "strip", z.ZodTypeAny, {
282
+ type: RecognitionResultTypeV1.METADATA;
283
+ audioUtteranceId: string;
284
+ recordingStartMs?: number | undefined;
285
+ recordingEndMs?: number | undefined;
286
+ transcriptEndMs?: number | undefined;
287
+ socketCloseAtMs?: number | undefined;
288
+ duration?: number | undefined;
289
+ volume?: number | undefined;
290
+ accumulatedAudioTimeMs?: number | undefined;
291
+ costInUSD?: number | undefined;
292
+ apiType?: ASRApiType | undefined;
293
+ asrConfig?: string | undefined;
294
+ rawAsrMetadata?: string | undefined;
295
+ }, {
296
+ type: RecognitionResultTypeV1.METADATA;
297
+ audioUtteranceId: string;
298
+ recordingStartMs?: number | undefined;
299
+ recordingEndMs?: number | undefined;
300
+ transcriptEndMs?: number | undefined;
301
+ socketCloseAtMs?: number | undefined;
302
+ duration?: number | undefined;
303
+ volume?: number | undefined;
304
+ accumulatedAudioTimeMs?: number | undefined;
305
+ costInUSD?: number | undefined;
306
+ apiType?: ASRApiType | undefined;
307
+ asrConfig?: string | undefined;
308
+ rawAsrMetadata?: string | undefined;
309
+ }>;
310
+ type MetadataResultV1 = z.infer<typeof MetadataResultSchemaV1>;
311
+ /**
312
+ * Error type enum V1 - categorizes different types of errors
313
+ */
314
+ declare enum ErrorTypeV1 {
315
+ AUTHENTICATION_ERROR = "authentication_error",// Authentication/authorization failures
316
+ VALIDATION_ERROR = "validation_error",// Invalid input or configuration
317
+ PROVIDER_ERROR = "provider_error",// Error from ASR provider (Deepgram, Google, etc.) Unlikely to happen with fall
318
+ TIMEOUT_ERROR = "timeout_error",// Request or operation timeout. Likely business logic did not handle timeout.
319
+ QUOTA_EXCEEDED = "quota_exceeded",// Quota or rate limit exceeded. Unlikely to happen with fallbakcs
320
+ CONNECTION_ERROR = "connection_error",// Connection establishment or network error
321
+ UNKNOWN_ERROR = "unknown_error"
322
+ }
323
+ /**
324
+ * Error result V1 - contains error message
325
+ * In the long run game server should know it, rather than TV or client.
326
+ */
327
+ declare const ErrorResultSchemaV1: z.ZodObject<{
328
+ type: z.ZodLiteral<RecognitionResultTypeV1.ERROR>;
329
+ audioUtteranceId: z.ZodString;
330
+ errorType: z.ZodOptional<z.ZodNativeEnum<typeof ErrorTypeV1>>;
331
+ message: z.ZodOptional<z.ZodString>;
332
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
333
+ description: z.ZodOptional<z.ZodString>;
334
+ }, "strip", z.ZodTypeAny, {
335
+ type: RecognitionResultTypeV1.ERROR;
336
+ audioUtteranceId: string;
337
+ errorType?: ErrorTypeV1 | undefined;
338
+ message?: string | undefined;
339
+ code?: string | number | undefined;
340
+ description?: string | undefined;
341
+ }, {
342
+ type: RecognitionResultTypeV1.ERROR;
343
+ audioUtteranceId: string;
344
+ errorType?: ErrorTypeV1 | undefined;
345
+ message?: string | undefined;
346
+ code?: string | number | undefined;
347
+ description?: string | undefined;
348
+ }>;
349
+ type ErrorResultV1 = z.infer<typeof ErrorResultSchemaV1>;
350
+
351
+ /**
352
+ * Recognition Context Types V1
353
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
354
+ * Types and schemas for recognition context data
355
+ */
356
+
357
+ /**
358
+ * Message type discriminator for recognition context V1
359
+ */
360
+ declare enum RecognitionContextTypeV1 {
361
+ GAME_CONTEXT = "GameContext",
362
+ CONTROL_SIGNAL = "ControlSignal",
363
+ ASR_REQUEST = "ASRRequest"
364
+ }
365
+ /**
366
+ * Control signal types for recognition V1
367
+ */
368
+ declare enum ControlSignalTypeV1 {
369
+ START_RECORDING = "start_recording",
370
+ STOP_RECORDING = "stop_recording"
371
+ }
372
+ /**
373
+ * Game context V1 - contains game state information
374
+ */
375
+ declare const GameContextSchemaV1: z.ZodObject<{
376
+ type: z.ZodLiteral<RecognitionContextTypeV1.GAME_CONTEXT>;
377
+ gameId: z.ZodString;
378
+ gamePhase: z.ZodString;
379
+ promptSTT: z.ZodOptional<z.ZodString>;
380
+ promptSTF: z.ZodOptional<z.ZodString>;
381
+ promptTTF: z.ZodOptional<z.ZodString>;
382
+ slotMap: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodArray<z.ZodString, "many">>>;
383
+ }, "strip", z.ZodTypeAny, {
384
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
385
+ gameId: string;
386
+ gamePhase: string;
387
+ promptSTT?: string | undefined;
388
+ promptSTF?: string | undefined;
389
+ promptTTF?: string | undefined;
390
+ slotMap?: Record<string, string[]> | undefined;
391
+ }, {
392
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
393
+ gameId: string;
394
+ gamePhase: string;
395
+ promptSTT?: string | undefined;
396
+ promptSTF?: string | undefined;
397
+ promptTTF?: string | undefined;
398
+ slotMap?: Record<string, string[]> | undefined;
399
+ }>;
400
+ type GameContextV1 = z.infer<typeof GameContextSchemaV1>;
401
+
402
+ /**
403
+ * Unified ASR Request Configuration
404
+ *
405
+ * Provider-agnostic configuration for ASR (Automatic Speech Recognition) requests.
406
+ * This interface provides a consistent API for clients regardless of the underlying provider.
407
+ *
408
+ * All fields use library-defined enums for type safety and consistency.
409
+ * Provider-specific mappers will convert these to provider-native formats.
410
+ */
411
+
412
+ /**
413
+ * Final transcript stability modes
414
+ *
415
+ * Controls timeout duration for fallback final transcript after stopRecording().
416
+ * Similar to AssemblyAI's turn detection confidence modes but applied to our
417
+ * internal timeout mechanism when vendors don't respond with is_final=true.
418
+ *
419
+ * @see https://www.assemblyai.com/docs/speech-to-text/universal-streaming/turn-detection
420
+ */
421
+ declare enum FinalTranscriptStability {
422
+ /**
423
+ * Aggressive mode: 100ms timeout
424
+ * Fast response, optimized for short utterances and quick back-and-forth
425
+ * Use cases: IVR, quick commands, retail confirmations
426
+ */
427
+ AGGRESSIVE = "aggressive",
428
+ /**
429
+ * Balanced mode: 200ms timeout (default)
430
+ * Natural middle ground for most conversational scenarios
431
+ * Use cases: General customer support, tech support, typical voice interactions
432
+ */
433
+ BALANCED = "balanced",
434
+ /**
435
+ * Conservative mode: 400ms timeout
436
+ * Wait longer for providers, optimized for complex/reflective speech
437
+ * Use cases: Healthcare, complex queries, careful thought processes
438
+ */
439
+ CONSERVATIVE = "conservative",
440
+ /**
441
+ * Experimental mode: 10000ms (10 seconds) timeout
442
+ * Very long wait for batch/async providers that need significant processing time
443
+ * Use cases: Batch processing (Gemini, OpenAI Whisper), complex audio analysis
444
+ * Note: Should be cancelled immediately when transcript is received
445
+ */
446
+ EXPERIMENTAL = "experimental"
447
+ }
448
+ /**
449
+ * Unified ASR request configuration
450
+ *
451
+ * This configuration is used by:
452
+ * - Client SDKs to specify recognition parameters
453
+ * - Demo applications for user input
454
+ * - Service layer to configure provider sessions
455
+ *
456
+ * Core fields only - all provider-specific options go in providerOptions
457
+ *
458
+ * @example
459
+ * ```typescript
460
+ * const config: ASRRequestConfig = {
461
+ * provider: RecognitionProvider.GOOGLE,
462
+ * model: GoogleModel.LATEST_LONG,
463
+ * language: Language.ENGLISH_US,
464
+ * sampleRate: SampleRate.RATE_16000, // or just 16000
465
+ * encoding: AudioEncoding.LINEAR16,
466
+ * providerOptions: {
467
+ * google: {
468
+ * enableAutomaticPunctuation: true,
469
+ * interimResults: true,
470
+ * singleUtterance: false
471
+ * }
472
+ * }
473
+ * };
474
+ * ```
475
+ */
476
+ interface ASRRequestConfig {
477
+ /**
478
+ * The ASR provider to use
479
+ * Must be one of the supported providers in RecognitionProvider enum
480
+ */
481
+ provider: RecognitionProvider | string;
482
+ /**
483
+ * Optional model specification for the provider
484
+ * Can be provider-specific model enum or string
485
+ * If not specified, provider's default model will be used
486
+ */
487
+ model?: RecognitionModel;
488
+ /**
489
+ * Language/locale for recognition
490
+ * Use Language enum for common languages
491
+ * Can also accept BCP-47 language tags as strings
492
+ */
493
+ language: Language | string;
494
+ /**
495
+ * Audio sample rate in Hz
496
+ * Prefer using SampleRate enum values for standard rates
497
+ * Can also accept numeric Hz values (e.g., 16000)
498
+ */
499
+ sampleRate: SampleRate | number;
500
+ /**
501
+ * Audio encoding format
502
+ * Must match the actual audio data being sent
503
+ * Use AudioEncoding enum for standard formats
504
+ */
505
+ encoding: AudioEncoding | string;
506
+ /**
507
+ * Enable interim (partial) results during recognition
508
+ * When true, receive real-time updates before finalization
509
+ * When false, only receive final results
510
+ * Default: false
511
+ */
512
+ interimResults?: boolean;
513
+ /**
514
+ * Require GameContext before starting recognition such as song titles
515
+ * When true, server waits for GameContext message before processing audio
516
+ * When false, recognition starts immediately
517
+ * Default: false
518
+ */
519
+ useContext?: boolean;
520
+ /**
521
+ * Final transcript stability mode
522
+ *
523
+ * Controls timeout duration for fallback final transcript when provider
524
+ * doesn't respond with is_final=true after stopRecording().
525
+ *
526
+ * - aggressive: 100ms - fast response, may cut off slow providers
527
+ * - balanced: 200ms - current default, good for most cases
528
+ * - conservative: 400ms - wait longer for complex utterances
529
+ *
530
+ * @default 'balanced'
531
+ * @see FinalTranscriptStability enum for detailed descriptions
532
+ */
533
+ finalTranscriptStability?: FinalTranscriptStability | string;
534
+ /**
535
+ * Additional provider-specific options
536
+ *
537
+ * Common options per provider:
538
+ * - Deepgram: punctuate, smart_format, diarize, utterances
539
+ * - Google: enableAutomaticPunctuation, singleUtterance, enableWordTimeOffsets
540
+ * - AssemblyAI: formatTurns, filter_profanity, word_boost
541
+ *
542
+ * Note: interimResults is now a top-level field, but can still be overridden per provider
543
+ *
544
+ * @example
545
+ * ```typescript
546
+ * providerOptions: {
547
+ * google: {
548
+ * enableAutomaticPunctuation: true,
549
+ * singleUtterance: false,
550
+ * enableWordTimeOffsets: false
551
+ * }
552
+ * }
553
+ * ```
554
+ */
555
+ providerOptions?: Record<string, any>;
556
+ /**
557
+ * Optional fallback ASR configurations
558
+ *
559
+ * List of alternative ASR configurations to use if the primary fails.
560
+ * Each fallback config is a complete ASRRequestConfig that will be tried
561
+ * in order until one succeeds.
562
+ *
563
+ * @example
564
+ * ```typescript
565
+ * fallbackModels: [
566
+ * {
567
+ * provider: RecognitionProvider.DEEPGRAM,
568
+ * model: DeepgramModel.NOVA_2,
569
+ * language: Language.ENGLISH_US,
570
+ * sampleRate: 16000,
571
+ * encoding: AudioEncoding.LINEAR16
572
+ * },
573
+ * {
574
+ * provider: RecognitionProvider.GOOGLE,
575
+ * model: GoogleModel.LATEST_SHORT,
576
+ * language: Language.ENGLISH_US,
577
+ * sampleRate: 16000,
578
+ * encoding: AudioEncoding.LINEAR16
579
+ * }
580
+ * ]
581
+ * ```
582
+ */
583
+ fallbackModels?: ASRRequestConfig[];
584
+ }
585
+
586
+ /**
587
+ * Standard stage/environment constants used across all services
588
+ */
589
+ declare const STAGES: {
590
+ readonly LOCAL: "local";
591
+ readonly DEV: "dev";
592
+ readonly STAGING: "staging";
593
+ readonly PRODUCTION: "production";
594
+ };
595
+ type Stage = typeof STAGES[keyof typeof STAGES];
596
+
597
+ /**
598
+ * Generic WebSocket protocol types and utilities
599
+ * Supports flexible versioning and message types
600
+ * Used by both client and server implementations
601
+ */
602
+
603
+ /**
604
+ * Base message structure - completely flexible
605
+ * @template V - Version type (number, string, etc.)
606
+ */
607
+ interface Message<V = number> {
608
+ v: V;
609
+ type: string;
610
+ data?: unknown;
611
+ }
612
+ /**
613
+ * Version serializer interface
614
+ * Converts between version type V and byte representation
615
+ */
616
+ interface VersionSerializer<V> {
617
+ serialize: (v: V) => number;
618
+ deserialize: (byte: number) => V;
619
+ }
620
+
621
+ /**
622
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
623
+ * Sends audio and control messages, receives responses from server
624
+ *
625
+ * Features:
626
+ * - Generic version type support (number, string, etc.)
627
+ * - Type-safe upward/downward message data
628
+ * - Client-side backpressure monitoring
629
+ * - Abstract hooks for application-specific logic
630
+ * - Format-agnostic audio protocol (supports any encoding)
631
+ */
632
+
633
+ type ClientConfig = {
634
+ url: string;
635
+ highWM?: number;
636
+ lowWM?: number;
637
+ };
638
+ /**
639
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
640
+ * that send audio frames and JSON messages
641
+ *
642
+ * @template V - Version type (number, string, object, etc.)
643
+ * @template TUpward - Type of upward message data (Client -> Server)
644
+ * @template TDownward - Type of downward message data (Server -> Client)
645
+ *
646
+ * @example
647
+ * ```typescript
648
+ * class MyClient extends WebSocketAudioClient<number, MyUpMsg, MyDownMsg> {
649
+ * protected onConnected() {
650
+ * console.log('Connected!');
651
+ * }
652
+ *
653
+ * protected onMessage(msg) {
654
+ * console.log('Received:', msg.type, msg.data);
655
+ * }
656
+ *
657
+ * protected onDisconnected(code, reason) {
658
+ * console.log('Disconnected:', code, reason);
659
+ * }
660
+ *
661
+ * protected onError(error) {
662
+ * console.error('Error:', error);
663
+ * }
664
+ * }
665
+ *
666
+ * const client = new MyClient({ url: 'ws://localhost:8080' });
667
+ * client.connect();
668
+ * client.sendMessage(1, 'configure', { language: 'en' });
669
+ * client.sendAudio(audioData);
670
+ * ```
671
+ */
672
+ declare abstract class WebSocketAudioClient<V = number, // Version type (default: number)
673
+ TUpward = unknown, // Upward message data type
674
+ TDownward = unknown> {
675
+ private cfg;
676
+ protected versionSerializer: VersionSerializer<V>;
677
+ private ws;
678
+ private seq;
679
+ private HWM;
680
+ private LWM;
681
+ constructor(cfg: ClientConfig, versionSerializer?: VersionSerializer<V>);
682
+ /**
683
+ * Hook: Called when WebSocket connection is established
684
+ */
685
+ protected abstract onConnected(): void;
686
+ /**
687
+ * Hook: Called when WebSocket connection closes
688
+ * @param code - Close code (see WebSocketCloseCode enum)
689
+ * @param reason - Human-readable close reason
690
+ */
691
+ protected abstract onDisconnected(code: number, reason: string): void;
692
+ /**
693
+ * Hook: Called when WebSocket error occurs
694
+ */
695
+ protected abstract onError(error: Event): void;
696
+ /**
697
+ * Hook: Called when downward message arrives from server
698
+ * Override this to handle messages (optional - default does nothing)
699
+ */
700
+ protected onMessage(_msg: Message<V> & {
701
+ data: TDownward;
702
+ }): void;
703
+ connect(): void;
704
+ /**
705
+ * Send JSON message to server
706
+ * @param version - Message version
707
+ * @param type - Message type (developer defined)
708
+ * @param data - Message payload (typed)
709
+ */
710
+ sendMessage(version: V, type: string, data: TUpward): void;
711
+ /**
712
+ * Send audio frame with specified encoding and sample rate
713
+ * @param audioData - Audio data (any format: Int16Array, Uint8Array, ArrayBuffer, etc.)
714
+ * @param version - Audio frame version
715
+ * @param encodingId - Audio encoding ID (0-5, e.g., AudioEncoding.LINEAR16)
716
+ * @param sampleRate - Sample rate in Hz (e.g., 16000)
717
+ */
718
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView, version: V, encodingId: number, sampleRate: number): void;
719
+ /**
720
+ * Get current WebSocket buffer size
721
+ */
722
+ getBufferedAmount(): number;
723
+ /**
724
+ * Check if local buffer is backpressured
725
+ */
726
+ isLocalBackpressured(): boolean;
727
+ /**
728
+ * Check if ready to send audio
729
+ * Verifies: connection open, no local buffer pressure
730
+ */
731
+ canSend(): boolean;
732
+ /**
733
+ * Check if connection is open
734
+ */
735
+ isOpen(): boolean;
736
+ /**
737
+ * Get current connection state
738
+ */
739
+ getReadyState(): number;
740
+ /**
741
+ * Close the WebSocket connection
742
+ * Protected method for subclasses to implement disconnect logic
743
+ * @param code - WebSocket close code (default: 1000 = normal closure)
744
+ * @param reason - Human-readable close reason
745
+ */
746
+ protected closeConnection(code?: number, reason?: string): void;
747
+ }
748
+
749
+ /**
750
+ * Recognition Client Types
751
+ *
752
+ * Type definitions and interfaces for the recognition client SDK.
753
+ * These interfaces enable dependency injection, testing, and alternative implementations.
754
+ */
755
+
756
+ /**
757
+ * Client connection state enum
758
+ * Represents the various states a recognition client can be in during its lifecycle
759
+ */
760
+ declare enum ClientState {
761
+ /** Initial state, no connection established */
762
+ INITIAL = "initial",
763
+ /** Actively establishing WebSocket connection */
764
+ CONNECTING = "connecting",
765
+ /** WebSocket connected but waiting for server ready signal */
766
+ CONNECTED = "connected",
767
+ /** Server ready, can send audio */
768
+ READY = "ready",
769
+ /** Sent stop signal, waiting for final transcript */
770
+ STOPPING = "stopping",
771
+ /** Connection closed normally after stop */
772
+ STOPPED = "stopped",
773
+ /** Connection failed or lost unexpectedly */
774
+ FAILED = "failed"
775
+ }
776
+ /**
777
+ * Callback URL configuration with message type filtering
778
+ */
779
+ interface RecognitionCallbackUrl {
780
+ /** The callback URL endpoint */
781
+ url: string;
782
+ /** Array of message types to send to this URL. If empty/undefined, all types are sent */
783
+ messageTypes?: Array<string | number>;
784
+ }
785
+ interface IRecognitionClientConfig {
786
+ /**
787
+ * WebSocket endpoint URL (optional)
788
+ * Either `url` or `stage` must be provided.
789
+ * If both are provided, `url` takes precedence.
790
+ *
791
+ * Example with explicit URL:
792
+ * ```typescript
793
+ * { url: 'wss://custom-endpoint.example.com/ws/v1/recognize' }
794
+ * ```
795
+ */
796
+ url?: string;
797
+ /**
798
+ * Stage for recognition service (recommended)
799
+ * Either `url` or `stage` must be provided.
800
+ * If both are provided, `url` takes precedence.
801
+ * Defaults to production if neither is provided.
802
+ *
803
+ * Example with STAGES enum (recommended):
804
+ * ```typescript
805
+ * import { STAGES } from '@recog/shared-types';
806
+ * { stage: STAGES.STAGING }
807
+ * ```
808
+ *
809
+ * String values also accepted:
810
+ * ```typescript
811
+ * { stage: 'staging' } // STAGES.LOCAL | STAGES.DEV | STAGES.STAGING | STAGES.PRODUCTION
812
+ * ```
813
+ */
814
+ stage?: Stage | string;
815
+ /** ASR configuration (provider, model, language, etc.) - optional */
816
+ asrRequestConfig?: ASRRequestConfig;
817
+ /** Game context for improved recognition accuracy */
818
+ gameContext?: GameContextV1;
819
+ /** Audio utterance ID (optional) - if not provided, a UUID v4 will be generated */
820
+ audioUtteranceId?: string;
821
+ /** Callback URLs for server-side notifications with optional message type filtering (optional)
822
+ * Game side only need to use it if another service need to be notified about the transcription results.
823
+ */
824
+ callbackUrls?: RecognitionCallbackUrl[];
825
+ /** User identification (optional) */
826
+ userId?: string;
827
+ /** Game session identification (optional). called 'sessionId' in Platform and most games. */
828
+ gameSessionId?: string;
829
+ /** Device identification (optional) */
830
+ deviceId?: string;
831
+ /** Account identification (optional) */
832
+ accountId?: string;
833
+ /** Question answer identifier for tracking Q&A sessions (optional and tracking purpose only) */
834
+ questionAnswerId?: string;
835
+ /** Platform for audio recording device (optional, e.g., 'ios', 'android', 'web', 'unity') */
836
+ platform?: string;
837
+ /** Callback when transcript is received */
838
+ onTranscript?: (result: TranscriptionResultV1) => void;
839
+ /**
840
+ * Callback when function call is received
841
+ * Note: Not supported in 2025. P2 feature for future speech-to-function-call capability.
842
+ */
843
+ onFunctionCall?: (result: FunctionCallResultV1) => void;
844
+ /** Callback when metadata is received. Only once after transcription is complete.*/
845
+ onMetadata?: (metadata: MetadataResultV1) => void;
846
+ /** Callback when error occurs */
847
+ onError?: (error: ErrorResultV1) => void;
848
+ /** Callback when connected to WebSocket */
849
+ onConnected?: () => void;
850
+ /**
851
+ * Callback when WebSocket disconnects
852
+ * @param code - WebSocket close code (1000 = normal, 1006 = abnormal, etc.)
853
+ * @param reason - Close reason string
854
+ */
855
+ onDisconnected?: (code: number, reason: string) => void;
856
+ /** High water mark for backpressure control (bytes) */
857
+ highWaterMark?: number;
858
+ /** Low water mark for backpressure control (bytes) */
859
+ lowWaterMark?: number;
860
+ /** Maximum buffer duration in seconds (default: 60s) */
861
+ maxBufferDurationSec?: number;
862
+ /** Expected chunks per second for ring buffer sizing (default: 100) */
863
+ chunksPerSecond?: number;
864
+ /**
865
+ * Connection retry configuration (optional)
866
+ * Only applies to initial connection establishment, not mid-stream interruptions.
867
+ *
868
+ * Default: { maxAttempts: 4, delayMs: 200 } (try once, retry 3 times = 4 total attempts)
869
+ *
870
+ * Timing: Attempt 1 → FAIL → wait 200ms → Attempt 2 → FAIL → wait 200ms → Attempt 3 → FAIL → wait 200ms → Attempt 4
871
+ *
872
+ * Example:
873
+ * ```typescript
874
+ * {
875
+ * connectionRetry: {
876
+ * maxAttempts: 2, // Try connecting up to 2 times (1 retry)
877
+ * delayMs: 500 // Wait 500ms between attempts
878
+ * }
879
+ * }
880
+ * ```
881
+ */
882
+ connectionRetry?: {
883
+ /** Maximum number of connection attempts (default: 4, min: 1, max: 5) */
884
+ maxAttempts?: number;
885
+ /** Delay in milliseconds between retry attempts (default: 200ms) */
886
+ delayMs?: number;
887
+ };
888
+ /**
889
+ * Optional logger function for debugging
890
+ * If not provided, no logging will occur
891
+ * @param level - Log level: 'debug', 'info', 'warn', 'error'
892
+ * @param message - Log message
893
+ * @param data - Optional additional data
894
+ */
895
+ logger?: (level: 'debug' | 'info' | 'warn' | 'error', message: string, data?: any) => void;
896
+ }
897
+ /**
898
+ * Recognition Client Interface
899
+ *
900
+ * Main interface for real-time speech recognition clients.
901
+ * Provides methods for connection management, audio streaming, and session control.
902
+ */
903
+ interface IRecognitionClient {
904
+ /**
905
+ * Connect to the WebSocket endpoint
906
+ * @returns Promise that resolves when connected
907
+ * @throws Error if connection fails or times out
908
+ */
909
+ connect(): Promise<void>;
910
+ /**
911
+ * Send audio data to the recognition service
912
+ * Audio is buffered locally and sent when connection is ready.
913
+ * @param audioData - PCM audio data as ArrayBuffer, typed array view, or Blob
914
+ */
915
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
916
+ /**
917
+ * Stop recording and wait for final transcript
918
+ * The server will close the connection after sending the final transcript.
919
+ * @returns Promise that resolves when final transcript is received
920
+ */
921
+ stopRecording(): Promise<void>;
922
+ /**
923
+ * Force stop and immediately close connection without waiting for server
924
+ *
925
+ * WARNING: This is an abnormal shutdown that bypasses the graceful stop flow:
926
+ * - Does NOT wait for server to process remaining audio
927
+ * - Does NOT receive final transcript from server
928
+ * - Immediately closes WebSocket connection
929
+ * - Cleans up resources (buffers, listeners)
930
+ *
931
+ * Use Cases:
932
+ * - User explicitly cancels/abandons session
933
+ * - Timeout scenarios where waiting is not acceptable
934
+ * - Need immediate cleanup and can't wait for server
935
+ *
936
+ * RECOMMENDED: Use stopRecording() for normal shutdown.
937
+ * Only use this when immediate disconnection is required.
938
+ */
939
+ stopAbnormally(): void;
940
+ /**
941
+ * Get the audio utterance ID for this session
942
+ * Available immediately after client construction.
943
+ * @returns UUID v4 string identifying this recognition session
944
+ */
945
+ getAudioUtteranceId(): string;
946
+ /**
947
+ * Get the current state of the client
948
+ * @returns Current ClientState value
949
+ */
950
+ getState(): ClientState;
951
+ /**
952
+ * Check if WebSocket connection is open
953
+ * @returns true if connected and ready to communicate
954
+ */
955
+ isConnected(): boolean;
956
+ /**
957
+ * Check if client is currently connecting
958
+ * @returns true if connection is in progress
959
+ */
960
+ isConnecting(): boolean;
961
+ /**
962
+ * Check if client is currently stopping
963
+ * @returns true if stopRecording() is in progress
964
+ */
965
+ isStopping(): boolean;
966
+ /**
967
+ * Check if transcription has finished
968
+ * @returns true if the transcription is complete
969
+ */
970
+ isTranscriptionFinished(): boolean;
971
+ /**
972
+ * Check if the audio buffer has overflowed
973
+ * @returns true if the ring buffer has wrapped around
974
+ */
975
+ isBufferOverflowing(): boolean;
976
+ /**
977
+ * Get client statistics
978
+ * @returns Statistics about audio transmission and buffering
979
+ */
980
+ getStats(): IRecognitionClientStats;
981
+ /**
982
+ * Get the WebSocket URL being used by this client
983
+ * Available immediately after client construction.
984
+ * @returns WebSocket URL string
985
+ */
986
+ getUrl(): string;
987
+ }
988
+ /**
989
+ * Client statistics interface
990
+ */
991
+ interface IRecognitionClientStats {
992
+ /** Total audio bytes sent to server */
993
+ audioBytesSent: number;
994
+ /** Total number of audio chunks sent */
995
+ audioChunksSent: number;
996
+ /** Total number of audio chunks buffered */
997
+ audioChunksBuffered: number;
998
+ /** Number of times the ring buffer overflowed */
999
+ bufferOverflowCount: number;
1000
+ /** Current number of chunks in buffer */
1001
+ currentBufferedChunks: number;
1002
+ /** Whether the ring buffer has wrapped (overwritten old data) */
1003
+ hasWrapped: boolean;
1004
+ }
1005
+ /**
1006
+ * Configuration for RealTimeTwoWayWebSocketRecognitionClient
1007
+ * This extends IRecognitionClientConfig and is the main configuration interface
1008
+ * for creating a new RealTimeTwoWayWebSocketRecognitionClient instance.
1009
+ */
1010
+ interface RealTimeTwoWayWebSocketRecognitionClientConfig extends IRecognitionClientConfig {
1011
+ }
1012
+
1013
+ /**
1014
+ * RealTimeTwoWayWebSocketRecognitionClient - Clean, compact SDK for real-time speech recognition
1015
+ *
1016
+ * Features:
1017
+ * - Ring buffer-based audio storage with fixed memory footprint
1018
+ * - Automatic buffering when disconnected, immediate send when connected
1019
+ * - Buffer persists after flush (for future retry/reconnection scenarios)
1020
+ * - Built on WebSocketAudioClient for robust protocol handling
1021
+ * - Simple API: connect() → sendAudio() → stopRecording()
1022
+ * - Type-safe message handling with callbacks
1023
+ * - Automatic backpressure management
1024
+ * - Overflow detection with buffer state tracking
1025
+ *
1026
+ * Example:
1027
+ * ```typescript
1028
+ * const client = new RealTimeTwoWayWebSocketRecognitionClient({
1029
+ * url: 'ws://localhost:3101/ws/v1/recognize',
1030
+ * onTranscript: (result) => console.log(result.finalTranscript),
1031
+ * onError: (error) => console.error(error),
1032
+ * maxBufferDurationSec: 60 // Ring buffer for 60 seconds
1033
+ * });
1034
+ *
1035
+ * await client.connect();
1036
+ *
1037
+ * // Send audio chunks - always stored in ring buffer, sent if connected
1038
+ * micStream.on('data', (chunk) => client.sendAudio(chunk));
1039
+ *
1040
+ * // Signal end of audio and wait for final results
1041
+ * await client.stopRecording();
1042
+ *
1043
+ * // Server will close connection after sending finals
1044
+ * // No manual cleanup needed - browser handles it
1045
+ * ```
1046
+ */
1047
+
1048
+ /**
1049
+ * Re-export TranscriptionResultV1 as TranscriptionResult for backward compatibility
1050
+ */
1051
+ type TranscriptionResult = TranscriptionResultV1;
1052
+
1053
+ /**
1054
+ * RealTimeTwoWayWebSocketRecognitionClient - SDK-level client for real-time speech recognition
1055
+ *
1056
+ * Implements IRecognitionClient interface for dependency injection and testing.
1057
+ * Extends WebSocketAudioClient with local audio buffering and simple callback-based API.
1058
+ */
1059
+ declare class RealTimeTwoWayWebSocketRecognitionClient extends WebSocketAudioClient<number, any, any> implements IRecognitionClient {
1060
+ private static readonly PROTOCOL_VERSION;
1061
+ private config;
1062
+ private audioBuffer;
1063
+ private messageHandler;
1064
+ private state;
1065
+ private connectionPromise;
1066
+ private isDebugLogEnabled;
1067
+ private audioBytesSent;
1068
+ private audioChunksSent;
1069
+ private audioStatsLogInterval;
1070
+ private lastAudioStatsLog;
1071
+ constructor(config: RealTimeTwoWayWebSocketRecognitionClientConfig);
1072
+ /**
1073
+ * Internal logging helper - only logs if a logger was provided in config
1074
+ * Debug logs are additionally gated by isDebugLogEnabled flag
1075
+ * @param level - Log level: debug, info, warn, or error
1076
+ * @param message - Message to log
1077
+ * @param data - Optional additional data to log
1078
+ */
1079
+ private log;
1080
+ /**
1081
+ * Clean up internal resources to free memory
1082
+ * Called when connection closes (normally or abnormally)
1083
+ */
1084
+ private cleanup;
1085
+ connect(): Promise<void>;
1086
+ /**
1087
+ * Attempt to connect with retry logic
1088
+ * Only retries on initial connection establishment, not mid-stream interruptions
1089
+ */
1090
+ private connectWithRetry;
1091
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
1092
+ private sendAudioInternal;
1093
+ stopRecording(): Promise<void>;
1094
+ stopAbnormally(): void;
1095
+ getAudioUtteranceId(): string;
1096
+ getUrl(): string;
1097
+ getState(): ClientState;
1098
+ isConnected(): boolean;
1099
+ isConnecting(): boolean;
1100
+ isStopping(): boolean;
1101
+ isTranscriptionFinished(): boolean;
1102
+ isBufferOverflowing(): boolean;
1103
+ getStats(): IRecognitionClientStats;
1104
+ protected onConnected(): void;
1105
+ protected onDisconnected(code: number, reason: string): void;
1106
+ protected onError(error: Event): void;
1107
+ protected onMessage(msg: {
1108
+ v: number;
1109
+ type: string;
1110
+ data: any;
1111
+ }): void;
1112
+ /**
1113
+ * Handle control messages from server
1114
+ * @param msg - Control message containing server actions
1115
+ */
1116
+ private handleControlMessage;
1117
+ /**
1118
+ * Send audio immediately to the server (without buffering)
1119
+ * @param audioData - Audio data to send
1120
+ */
1121
+ private sendAudioNow;
1122
+ }
1123
+
1124
+ export { AudioEncoding, ControlSignalTypeV1 as ControlSignal, RealTimeTwoWayWebSocketRecognitionClient, RecognitionContextTypeV1 };
1125
+ export type { GameContextV1, RealTimeTwoWayWebSocketRecognitionClientConfig, TranscriptionResult };