@volley/recognition-client-sdk 0.1.419 → 0.1.420

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1185 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Provider types and enums for recognition services
5
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
6
+ */
7
+ /**
8
+ * Supported speech recognition providers
9
+ */
10
+ declare enum RecognitionProvider {
11
+ ASSEMBLYAI = "assemblyai",
12
+ DEEPGRAM = "deepgram",
13
+ ELEVENLABS = "elevenlabs",
14
+ FIREWORKS = "fireworks",
15
+ GOOGLE = "google",
16
+ GEMINI_BATCH = "gemini-batch",
17
+ OPENAI_BATCH = "openai-batch",
18
+ OPENAI_REALTIME = "openai-realtime"
19
+ }
20
+ /**
21
+ * ASR API type - distinguishes between streaming and file-based transcription APIs
22
+ * - STREAMING: Real-time streaming APIs (Deepgram, AssemblyAI, Google)
23
+ * - FILE_BASED: File upload/batch APIs (OpenAI Batch, Gemini Batch)
24
+ */
25
+ declare enum ASRApiType {
26
+ STREAMING = "streaming",
27
+ FILE_BASED = "file-based"
28
+ }
29
+ /**
30
+ * Deepgram model names
31
+ */
32
+ declare enum DeepgramModel {
33
+ NOVA_2 = "nova-2",
34
+ NOVA_3 = "nova-3",
35
+ FLUX_GENERAL_EN = "flux-general-en"
36
+ }
37
+ /**
38
+ * Google Cloud Speech models
39
+ * @see https://cloud.google.com/speech-to-text/docs/transcription-model
40
+ * @see https://cloud.google.com/speech-to-text/v2/docs/chirp_3-model
41
+ */
42
+ declare enum GoogleModel {
43
+ CHIRP_3 = "chirp_3",
44
+ CHIRP_2 = "chirp_2",
45
+ CHIRP = "chirp",
46
+ LATEST_LONG = "latest_long",
47
+ LATEST_SHORT = "latest_short",
48
+ TELEPHONY = "telephony",
49
+ TELEPHONY_SHORT = "telephony_short",
50
+ DEFAULT = "default",
51
+ COMMAND_AND_SEARCH = "command_and_search",
52
+ PHONE_CALL = "phone_call",
53
+ VIDEO = "video"
54
+ }
55
+ /**
56
+ * Fireworks AI models for ASR
57
+ * @see https://docs.fireworks.ai/guides/querying-asr-models
58
+ * @see https://fireworks.ai/models/fireworks/fireworks-asr-large
59
+ */
60
+ declare enum FireworksModel {
61
+ ASR_V1 = "fireworks-asr-large",
62
+ ASR_V2 = "fireworks-asr-v2",
63
+ WHISPER_V3 = "whisper-v3",
64
+ WHISPER_V3_TURBO = "whisper-v3-turbo"
65
+ }
66
+ /**
67
+ * ElevenLabs Scribe models for speech-to-text
68
+ * @see https://elevenlabs.io/blog/introducing-scribe-v2-realtime
69
+ * @see https://elevenlabs.io/docs/cookbooks/speech-to-text/streaming
70
+ * @see https://elevenlabs.io/docs/api-reference/speech-to-text/convert
71
+ */
72
+ declare enum ElevenLabsModel {
73
+ SCRIBE_V2_REALTIME = "scribe_v2_realtime",
74
+ SCRIBE_V1 = "scribe_v1"
75
+ }
76
+ /**
77
+ * OpenAI Realtime API transcription models
78
+ * These are the verified `input_audio_transcription.model` values.
79
+ * @see https://platform.openai.com/docs/guides/realtime
80
+ */
81
+ declare enum OpenAIRealtimeModel {
82
+ GPT_4O_MINI_TRANSCRIBE = "gpt-4o-mini-transcribe"
83
+ }
84
+ /**
85
+ * Type alias for any model from any provider
86
+ */
87
+ type RecognitionModel = DeepgramModel | GoogleModel | FireworksModel | ElevenLabsModel | OpenAIRealtimeModel | string;
88
+
89
+ /**
90
+ * Audio encoding types
91
+ */
92
+ declare enum AudioEncoding {
93
+ ENCODING_UNSPECIFIED = 0,
94
+ LINEAR16 = 1,
95
+ OGG_OPUS = 2,
96
+ FLAC = 3,
97
+ MULAW = 4,
98
+ ALAW = 5
99
+ }
100
+ declare namespace AudioEncoding {
101
+ /**
102
+ * Convert numeric ID to AudioEncoding enum
103
+ * @param id - Numeric encoding identifier (0-5)
104
+ * @returns AudioEncoding enum value or undefined if invalid
105
+ */
106
+ function fromId(id: number): AudioEncoding | undefined;
107
+ /**
108
+ * Convert string name to AudioEncoding enum
109
+ * @param nameStr - String name like "linear16", "LINEAR16", "ogg_opus", "OGG_OPUS", etc. (case insensitive)
110
+ * @returns AudioEncoding enum value or undefined if invalid
111
+ */
112
+ function fromName(nameStr: string): AudioEncoding | undefined;
113
+ /**
114
+ * Convert AudioEncoding enum to numeric ID
115
+ * @param encoding - AudioEncoding enum value
116
+ * @returns Numeric ID (0-5)
117
+ */
118
+ function toId(encoding: AudioEncoding): number;
119
+ /**
120
+ * Convert AudioEncoding enum to string name
121
+ * @param encoding - AudioEncoding enum value
122
+ * @returns String name like "LINEAR16", "MULAW", etc.
123
+ */
124
+ function toName(encoding: AudioEncoding): string;
125
+ /**
126
+ * Check if a numeric ID is a valid encoding
127
+ * @param id - Numeric identifier to validate
128
+ * @returns true if valid encoding ID
129
+ */
130
+ function isIdValid(id: number): boolean;
131
+ /**
132
+ * Check if a string name is a valid encoding
133
+ * @param nameStr - String name to validate
134
+ * @returns true if valid encoding name
135
+ */
136
+ function isNameValid(nameStr: string): boolean;
137
+ }
138
+ /**
139
+ * Common sample rates (in Hz)
140
+ */
141
+ declare enum SampleRate {
142
+ RATE_8000 = 8000,
143
+ RATE_16000 = 16000,
144
+ RATE_22050 = 22050,
145
+ RATE_24000 = 24000,
146
+ RATE_32000 = 32000,
147
+ RATE_44100 = 44100,
148
+ RATE_48000 = 48000
149
+ }
150
+ declare namespace SampleRate {
151
+ /**
152
+ * Convert Hz value to SampleRate enum
153
+ * @param hz - Sample rate in Hz (8000, 16000, etc.)
154
+ * @returns SampleRate enum value or undefined if invalid
155
+ */
156
+ function fromHz(hz: number): SampleRate | undefined;
157
+ /**
158
+ * Convert string name to SampleRate enum
159
+ * @param nameStr - String name like "rate_8000", "RATE_16000", etc. (case insensitive)
160
+ * @returns SampleRate enum value or undefined if invalid
161
+ */
162
+ function fromName(nameStr: string): SampleRate | undefined;
163
+ /**
164
+ * Convert SampleRate enum to Hz value
165
+ * @param rate - SampleRate enum value
166
+ * @returns Hz value (8000, 16000, etc.)
167
+ */
168
+ function toHz(rate: SampleRate): number;
169
+ /**
170
+ * Convert SampleRate enum to string name
171
+ * @param rate - SampleRate enum value
172
+ * @returns String name like "RATE_8000", "RATE_16000", etc.
173
+ */
174
+ function toName(rate: SampleRate): string;
175
+ /**
176
+ * Check if a numeric Hz value is a valid sample rate
177
+ * @param hz - Hz value to validate
178
+ * @returns true if valid sample rate
179
+ */
180
+ function isHzValid(hz: number): boolean;
181
+ /**
182
+ * Check if a string name is a valid sample rate
183
+ * @param nameStr - String name to validate
184
+ * @returns true if valid sample rate name
185
+ */
186
+ function isNameValid(nameStr: string): boolean;
187
+ }
188
+ /**
189
+ * Supported languages for recognition
190
+ * Using BCP-47 language tags
191
+ */
192
+ declare enum Language {
193
+ ENGLISH_US = "en-US",
194
+ ENGLISH_GB = "en-GB",
195
+ SPANISH_ES = "es-ES",
196
+ SPANISH_MX = "es-MX",
197
+ FRENCH_FR = "fr-FR",
198
+ GERMAN_DE = "de-DE",
199
+ ITALIAN_IT = "it-IT",
200
+ PORTUGUESE_BR = "pt-BR",
201
+ JAPANESE_JP = "ja-JP",
202
+ KOREAN_KR = "ko-KR",
203
+ CHINESE_CN = "zh-CN",
204
+ CHINESE_TW = "zh-TW"
205
+ }
206
+
207
+ /**
208
+ * Recognition Result Types V1
209
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
210
+ * Types and schemas for recognition results sent to SDK clients
211
+ */
212
+
213
+ /**
214
+ * Message type discriminator for recognition results V1
215
+ */
216
+ declare enum RecognitionResultTypeV1 {
217
+ TRANSCRIPTION = "Transcription",
218
+ FUNCTION_CALL = "FunctionCall",
219
+ METADATA = "Metadata",
220
+ ERROR = "Error",
221
+ CLIENT_CONTROL_MESSAGE = "ClientControlMessage",
222
+ AUDIO_METRICS = "AudioMetrics"
223
+ }
224
+ /**
225
+ * Transcription result V1 - contains transcript message
226
+ * In the long run game side should not need to know it. In the short run it is send back to client.
227
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
228
+ */
229
+ declare const TranscriptionResultSchemaV1: z.ZodObject<{
230
+ type: z.ZodLiteral<RecognitionResultTypeV1.TRANSCRIPTION>;
231
+ audioUtteranceId: z.ZodString;
232
+ finalTranscript: z.ZodString;
233
+ finalTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
234
+ pendingTranscript: z.ZodOptional<z.ZodString>;
235
+ pendingTranscriptConfidence: z.ZodOptional<z.ZodNumber>;
236
+ is_finished: z.ZodBoolean;
237
+ voiceStart: z.ZodOptional<z.ZodNumber>;
238
+ voiceDuration: z.ZodOptional<z.ZodNumber>;
239
+ voiceEnd: z.ZodOptional<z.ZodNumber>;
240
+ startTimestamp: z.ZodOptional<z.ZodNumber>;
241
+ endTimestamp: z.ZodOptional<z.ZodNumber>;
242
+ receivedAtMs: z.ZodOptional<z.ZodNumber>;
243
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
244
+ }, "strip", z.ZodTypeAny, {
245
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
246
+ audioUtteranceId: string;
247
+ finalTranscript: string;
248
+ is_finished: boolean;
249
+ finalTranscriptConfidence?: number | undefined;
250
+ pendingTranscript?: string | undefined;
251
+ pendingTranscriptConfidence?: number | undefined;
252
+ voiceStart?: number | undefined;
253
+ voiceDuration?: number | undefined;
254
+ voiceEnd?: number | undefined;
255
+ startTimestamp?: number | undefined;
256
+ endTimestamp?: number | undefined;
257
+ receivedAtMs?: number | undefined;
258
+ accumulatedAudioTimeMs?: number | undefined;
259
+ }, {
260
+ type: RecognitionResultTypeV1.TRANSCRIPTION;
261
+ audioUtteranceId: string;
262
+ finalTranscript: string;
263
+ is_finished: boolean;
264
+ finalTranscriptConfidence?: number | undefined;
265
+ pendingTranscript?: string | undefined;
266
+ pendingTranscriptConfidence?: number | undefined;
267
+ voiceStart?: number | undefined;
268
+ voiceDuration?: number | undefined;
269
+ voiceEnd?: number | undefined;
270
+ startTimestamp?: number | undefined;
271
+ endTimestamp?: number | undefined;
272
+ receivedAtMs?: number | undefined;
273
+ accumulatedAudioTimeMs?: number | undefined;
274
+ }>;
275
+ type TranscriptionResultV1 = z.infer<typeof TranscriptionResultSchemaV1>;
276
+ /**
277
+ * Function call result V1 - similar to LLM function call
278
+ * In the long run game server should know it, rather than TV or client.
279
+ */
280
+ declare const FunctionCallResultSchemaV1: z.ZodObject<{
281
+ type: z.ZodLiteral<RecognitionResultTypeV1.FUNCTION_CALL>;
282
+ audioUtteranceId: z.ZodString;
283
+ functionName: z.ZodString;
284
+ functionArgJson: z.ZodString;
285
+ }, "strip", z.ZodTypeAny, {
286
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
287
+ audioUtteranceId: string;
288
+ functionName: string;
289
+ functionArgJson: string;
290
+ }, {
291
+ type: RecognitionResultTypeV1.FUNCTION_CALL;
292
+ audioUtteranceId: string;
293
+ functionName: string;
294
+ functionArgJson: string;
295
+ }>;
296
+ type FunctionCallResultV1 = z.infer<typeof FunctionCallResultSchemaV1>;
297
+ /**
298
+ * Transcript outcome type - categorizes final transcript state
299
+ * Used in Metadata schema. Maps 1:1 with Datadog metrics:
300
+ * - WITH_CONTENT → recog.client.websocket.transcript.final_with_content
301
+ * - EMPTY → recog.client.websocket.transcript.final_empty
302
+ * - NEVER_SENT → derived from sessions.streamed - final_with_content - final_empty
303
+ */
304
+ declare enum TranscriptOutcomeType {
305
+ WITH_CONTENT = "with_content",
306
+ EMPTY = "empty",
307
+ NEVER_SENT = "never_sent"
308
+ }
309
+ /**
310
+ * Metadata result V1 - contains metadata, timing information, and ASR config
311
+ * Sent when the provider connection closes to provide final timing metrics and config
312
+ * In the long run game server should know it, rather than TV or client.
313
+ */
314
+ declare const MetadataResultSchemaV1: z.ZodObject<{
315
+ type: z.ZodLiteral<RecognitionResultTypeV1.METADATA>;
316
+ audioUtteranceId: z.ZodString;
317
+ recordingStartMs: z.ZodOptional<z.ZodNumber>;
318
+ recordingEndMs: z.ZodOptional<z.ZodNumber>;
319
+ transcriptEndMs: z.ZodOptional<z.ZodNumber>;
320
+ socketCloseAtMs: z.ZodOptional<z.ZodNumber>;
321
+ duration: z.ZodOptional<z.ZodNumber>;
322
+ volume: z.ZodOptional<z.ZodNumber>;
323
+ accumulatedAudioTimeMs: z.ZodOptional<z.ZodNumber>;
324
+ costInUSD: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
325
+ apiType: z.ZodOptional<z.ZodNativeEnum<typeof ASRApiType>>;
326
+ asrConfig: z.ZodOptional<z.ZodString>;
327
+ rawAsrMetadata: z.ZodOptional<z.ZodString>;
328
+ transcriptOutcome: z.ZodOptional<z.ZodNativeEnum<typeof TranscriptOutcomeType>>;
329
+ }, "strip", z.ZodTypeAny, {
330
+ type: RecognitionResultTypeV1.METADATA;
331
+ audioUtteranceId: string;
332
+ recordingStartMs?: number | undefined;
333
+ recordingEndMs?: number | undefined;
334
+ transcriptEndMs?: number | undefined;
335
+ socketCloseAtMs?: number | undefined;
336
+ duration?: number | undefined;
337
+ volume?: number | undefined;
338
+ accumulatedAudioTimeMs?: number | undefined;
339
+ costInUSD?: number | undefined;
340
+ apiType?: ASRApiType | undefined;
341
+ asrConfig?: string | undefined;
342
+ rawAsrMetadata?: string | undefined;
343
+ transcriptOutcome?: TranscriptOutcomeType | undefined;
344
+ }, {
345
+ type: RecognitionResultTypeV1.METADATA;
346
+ audioUtteranceId: string;
347
+ recordingStartMs?: number | undefined;
348
+ recordingEndMs?: number | undefined;
349
+ transcriptEndMs?: number | undefined;
350
+ socketCloseAtMs?: number | undefined;
351
+ duration?: number | undefined;
352
+ volume?: number | undefined;
353
+ accumulatedAudioTimeMs?: number | undefined;
354
+ costInUSD?: number | undefined;
355
+ apiType?: ASRApiType | undefined;
356
+ asrConfig?: string | undefined;
357
+ rawAsrMetadata?: string | undefined;
358
+ transcriptOutcome?: TranscriptOutcomeType | undefined;
359
+ }>;
360
+ type MetadataResultV1 = z.infer<typeof MetadataResultSchemaV1>;
361
+ /**
362
+ * Error type enum V1 - categorizes different types of errors
363
+ */
364
+ declare enum ErrorTypeV1 {
365
+ AUTHENTICATION_ERROR = "authentication_error",
366
+ VALIDATION_ERROR = "validation_error",
367
+ PROVIDER_ERROR = "provider_error",
368
+ TIMEOUT_ERROR = "timeout_error",
369
+ QUOTA_EXCEEDED = "quota_exceeded",
370
+ CONNECTION_ERROR = "connection_error",
371
+ UNKNOWN_ERROR = "unknown_error"
372
+ }
373
+ /**
374
+ * Error result V1 - contains error message
375
+ * In the long run game server should know it, rather than TV or client.
376
+ */
377
+ declare const ErrorResultSchemaV1: z.ZodObject<{
378
+ type: z.ZodLiteral<RecognitionResultTypeV1.ERROR>;
379
+ audioUtteranceId: z.ZodString;
380
+ errorType: z.ZodOptional<z.ZodNativeEnum<typeof ErrorTypeV1>>;
381
+ message: z.ZodOptional<z.ZodString>;
382
+ code: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodNumber]>>;
383
+ description: z.ZodOptional<z.ZodString>;
384
+ }, "strip", z.ZodTypeAny, {
385
+ type: RecognitionResultTypeV1.ERROR;
386
+ audioUtteranceId: string;
387
+ errorType?: ErrorTypeV1 | undefined;
388
+ message?: string | undefined;
389
+ code?: string | number | undefined;
390
+ description?: string | undefined;
391
+ }, {
392
+ type: RecognitionResultTypeV1.ERROR;
393
+ audioUtteranceId: string;
394
+ errorType?: ErrorTypeV1 | undefined;
395
+ message?: string | undefined;
396
+ code?: string | number | undefined;
397
+ description?: string | undefined;
398
+ }>;
399
+ type ErrorResultV1 = z.infer<typeof ErrorResultSchemaV1>;
400
+
401
+ /**
402
+ * Recognition Context Types V1
403
+ * NOTE_TO_AI: DO NOT CHANGE THIS UNLESS EXPLICITLY ASKED. Always ask before making any changes.
404
+ * Types and schemas for recognition context data
405
+ */
406
+
407
+ /**
408
+ * Message type discriminator for recognition context V1
409
+ */
410
+ declare enum RecognitionContextTypeV1 {
411
+ GAME_CONTEXT = "GameContext",
412
+ CONTROL_SIGNAL = "ControlSignal",
413
+ ASR_REQUEST = "ASRRequest"
414
+ }
415
+ /**
416
+ * Control signal types for recognition V1
417
+ */
418
+ declare enum ControlSignalTypeV1 {
419
+ START_RECORDING = "start_recording",
420
+ STOP_RECORDING = "stop_recording"
421
+ }
422
+ /**
423
+ * Game context V1 - contains game state information
424
+ */
425
+ declare const GameContextSchemaV1: z.ZodObject<{
426
+ type: z.ZodLiteral<RecognitionContextTypeV1.GAME_CONTEXT>;
427
+ gameId: z.ZodString;
428
+ gamePhase: z.ZodString;
429
+ promptSTT: z.ZodOptional<z.ZodString>;
430
+ promptSTF: z.ZodOptional<z.ZodString>;
431
+ promptTTF: z.ZodOptional<z.ZodString>;
432
+ slotMap: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodArray<z.ZodString, "many">>>;
433
+ }, "strip", z.ZodTypeAny, {
434
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
435
+ gameId: string;
436
+ gamePhase: string;
437
+ promptSTT?: string | undefined;
438
+ promptSTF?: string | undefined;
439
+ promptTTF?: string | undefined;
440
+ slotMap?: Record<string, string[]> | undefined;
441
+ }, {
442
+ type: RecognitionContextTypeV1.GAME_CONTEXT;
443
+ gameId: string;
444
+ gamePhase: string;
445
+ promptSTT?: string | undefined;
446
+ promptSTF?: string | undefined;
447
+ promptTTF?: string | undefined;
448
+ slotMap?: Record<string, string[]> | undefined;
449
+ }>;
450
+ type GameContextV1 = z.infer<typeof GameContextSchemaV1>;
451
+
452
+ /**
453
+ * Unified ASR Request Configuration
454
+ *
455
+ * Provider-agnostic configuration for ASR (Automatic Speech Recognition) requests.
456
+ * This interface provides a consistent API for clients regardless of the underlying provider.
457
+ *
458
+ * All fields use library-defined enums for type safety and consistency.
459
+ * Provider-specific mappers will convert these to provider-native formats.
460
+ */
461
+
462
+ /**
463
+ * Final transcript stability modes
464
+ *
465
+ * Controls timeout duration for fallback final transcript after stopRecording().
466
+ * Similar to AssemblyAI's turn detection confidence modes but applied to our
467
+ * internal timeout mechanism when vendors don't respond with is_final=true.
468
+ *
469
+ * @see https://www.assemblyai.com/docs/speech-to-text/universal-streaming/turn-detection
470
+ */
471
+ declare enum FinalTranscriptStability {
472
+ /**
473
+ * Aggressive mode: 100ms timeout
474
+ * Fast response, optimized for short utterances and quick back-and-forth
475
+ * Use cases: IVR, quick commands, retail confirmations
476
+ */
477
+ AGGRESSIVE = "aggressive",
478
+ /**
479
+ * Balanced mode: 200ms timeout (default)
480
+ * Natural middle ground for most conversational scenarios
481
+ * Use cases: General customer support, tech support, typical voice interactions
482
+ */
483
+ BALANCED = "balanced",
484
+ /**
485
+ * Conservative mode: 400ms timeout
486
+ * Wait longer for providers, optimized for complex/reflective speech
487
+ * Use cases: Healthcare, complex queries, careful thought processes
488
+ */
489
+ CONSERVATIVE = "conservative",
490
+ /**
491
+ * Experimental mode: 10000ms (10 seconds) timeout
492
+ * Very long wait for batch/async providers that need significant processing time
493
+ * Use cases: Batch processing (Gemini, OpenAI Whisper), complex audio analysis
494
+ * Note: Should be cancelled immediately when transcript is received
495
+ */
496
+ EXPERIMENTAL = "experimental"
497
+ }
498
+ /**
499
+ * Unified ASR request configuration
500
+ *
501
+ * This configuration is used by:
502
+ * - Client SDKs to specify recognition parameters
503
+ * - Demo applications for user input
504
+ * - Service layer to configure provider sessions
505
+ *
506
+ * Core fields only - all provider-specific options go in providerOptions
507
+ *
508
+ * @example
509
+ * ```typescript
510
+ * const config: ASRRequestConfig = {
511
+ * provider: RecognitionProvider.GOOGLE,
512
+ * model: GoogleModel.LATEST_LONG,
513
+ * language: Language.ENGLISH_US,
514
+ * sampleRate: SampleRate.RATE_16000, // or just 16000
515
+ * encoding: AudioEncoding.LINEAR16,
516
+ * providerOptions: {
517
+ * google: {
518
+ * enableAutomaticPunctuation: true,
519
+ * interimResults: true,
520
+ * singleUtterance: false
521
+ * }
522
+ * }
523
+ * };
524
+ * ```
525
+ */
526
+ interface ASRRequestConfig {
527
+ /**
528
+ * The ASR provider to use
529
+ * Must be one of the supported providers in RecognitionProvider enum
530
+ */
531
+ provider: RecognitionProvider | string;
532
+ /**
533
+ * Optional model specification for the provider
534
+ * Can be provider-specific model enum or string
535
+ * If not specified, provider's default model will be used
536
+ */
537
+ model?: RecognitionModel;
538
+ /**
539
+ * Language/locale for recognition
540
+ * Use Language enum for common languages
541
+ * Can also accept BCP-47 language tags as strings
542
+ */
543
+ language: Language | string;
544
+ /**
545
+ * Audio sample rate in Hz
546
+ * Prefer using SampleRate enum values for standard rates
547
+ * Can also accept numeric Hz values (e.g., 16000)
548
+ */
549
+ sampleRate: SampleRate | number;
550
+ /**
551
+ * Audio encoding format
552
+ * Must match the actual audio data being sent
553
+ * Use AudioEncoding enum for standard formats
554
+ */
555
+ encoding: AudioEncoding | string;
556
+ /**
557
+ * Enable interim (partial) results during recognition
558
+ * When true, receive real-time updates before finalization
559
+ * When false, only receive final results
560
+ * Default: false
561
+ */
562
+ interimResults?: boolean;
563
+ /**
564
+ * Require GameContext before starting recognition such as song titles
565
+ * When true, server waits for GameContext message before processing audio
566
+ * When false, recognition starts immediately
567
+ * Default: false
568
+ */
569
+ useContext?: boolean;
570
+ /**
571
+ * Final transcript stability mode
572
+ *
573
+ * Controls timeout duration for fallback final transcript when provider
574
+ * doesn't respond with is_final=true after stopRecording().
575
+ *
576
+ * - aggressive: 100ms - fast response, may cut off slow providers
577
+ * - balanced: 200ms - current default, good for most cases
578
+ * - conservative: 400ms - wait longer for complex utterances
579
+ *
580
+ * @default 'balanced'
581
+ * @see FinalTranscriptStability enum for detailed descriptions
582
+ */
583
+ finalTranscriptStability?: FinalTranscriptStability | string;
584
+ /**
585
+ * Additional provider-specific options
586
+ *
587
+ * Common options per provider:
588
+ * - Deepgram: punctuate, smart_format, diarize, utterances
589
+ * - Google: enableAutomaticPunctuation, singleUtterance, enableWordTimeOffsets
590
+ * - AssemblyAI: formatTurns, filter_profanity, word_boost
591
+ *
592
+ * Note: interimResults is now a top-level field, but can still be overridden per provider
593
+ *
594
+ * @example
595
+ * ```typescript
596
+ * providerOptions: {
597
+ * google: {
598
+ * enableAutomaticPunctuation: true,
599
+ * singleUtterance: false,
600
+ * enableWordTimeOffsets: false
601
+ * }
602
+ * }
603
+ * ```
604
+ */
605
+ providerOptions?: Record<string, any>;
606
+ /**
607
+ * Optional fallback ASR configurations
608
+ *
609
+ * List of alternative ASR configurations to use if the primary fails.
610
+ * Each fallback config is a complete ASRRequestConfig that will be tried
611
+ * in order until one succeeds.
612
+ *
613
+ * @example
614
+ * ```typescript
615
+ * fallbackModels: [
616
+ * {
617
+ * provider: RecognitionProvider.DEEPGRAM,
618
+ * model: DeepgramModel.NOVA_2,
619
+ * language: Language.ENGLISH_US,
620
+ * sampleRate: 16000,
621
+ * encoding: AudioEncoding.LINEAR16
622
+ * },
623
+ * {
624
+ * provider: RecognitionProvider.GOOGLE,
625
+ * model: GoogleModel.LATEST_SHORT,
626
+ * language: Language.ENGLISH_US,
627
+ * sampleRate: 16000,
628
+ * encoding: AudioEncoding.LINEAR16
629
+ * }
630
+ * ]
631
+ * ```
632
+ */
633
+ fallbackModels?: ASRRequestConfig[];
634
+ }
635
+
636
+ /**
637
+ * Standard stage/environment constants used across all services
638
+ */
639
+ declare const STAGES: {
640
+ readonly LOCAL: "local";
641
+ readonly DEV: "dev";
642
+ readonly STAGING: "staging";
643
+ readonly PRODUCTION: "production";
644
+ };
645
+ type Stage = typeof STAGES[keyof typeof STAGES];
646
+
647
+ /**
648
+ * Generic WebSocket protocol types and utilities
649
+ * Supports flexible versioning and message types
650
+ * Used by both client and server implementations
651
+ */
652
+
653
+ /**
654
+ * Base message structure - completely flexible
655
+ * @template V - Version type (number, string, etc.)
656
+ */
657
+ interface Message<V = number> {
658
+ v: V;
659
+ type: string;
660
+ data?: unknown;
661
+ }
662
+ /**
663
+ * Version serializer interface
664
+ * Converts between version type V and byte representation
665
+ */
666
+ interface VersionSerializer<V> {
667
+ serialize: (v: V) => number;
668
+ deserialize: (byte: number) => V;
669
+ }
670
+
671
+ /**
672
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
673
+ * Sends audio and control messages, receives responses from server
674
+ *
675
+ * Features:
676
+ * - Generic version type support (number, string, etc.)
677
+ * - Type-safe upward/downward message data
678
+ * - Client-side backpressure monitoring
679
+ * - Abstract hooks for application-specific logic
680
+ * - Format-agnostic audio protocol (supports any encoding)
681
+ */
682
+
683
+ type ClientConfig = {
684
+ url: string;
685
+ highWM?: number;
686
+ lowWM?: number;
687
+ };
688
+ /**
689
+ * WebSocketAudioClient - Abstract base class for WebSocket clients
690
+ * that send audio frames and JSON messages
691
+ *
692
+ * @template V - Version type (number, string, object, etc.)
693
+ * @template TUpward - Type of upward message data (Client -> Server)
694
+ * @template TDownward - Type of downward message data (Server -> Client)
695
+ *
696
+ * @example
697
+ * ```typescript
698
+ * class MyClient extends WebSocketAudioClient<number, MyUpMsg, MyDownMsg> {
699
+ * protected onConnected() {
700
+ * console.log('Connected!');
701
+ * }
702
+ *
703
+ * protected onMessage(msg) {
704
+ * console.log('Received:', msg.type, msg.data);
705
+ * }
706
+ *
707
+ * protected onDisconnected(code, reason) {
708
+ * console.log('Disconnected:', code, reason);
709
+ * }
710
+ *
711
+ * protected onError(error) {
712
+ * console.error('Error:', error);
713
+ * }
714
+ * }
715
+ *
716
+ * const client = new MyClient({ url: 'ws://localhost:8080' });
717
+ * client.connect();
718
+ * client.sendMessage(1, 'configure', { language: 'en' });
719
+ * client.sendAudio(audioData);
720
+ * ```
721
+ */
722
+ declare abstract class WebSocketAudioClient<V = number, // Version type (default: number)
723
+ TUpward = unknown, // Upward message data type
724
+ TDownward = unknown> {
725
+ private cfg;
726
+ protected versionSerializer: VersionSerializer<V>;
727
+ private ws;
728
+ private seq;
729
+ private HWM;
730
+ private LWM;
731
+ constructor(cfg: ClientConfig, versionSerializer?: VersionSerializer<V>);
732
+ /**
733
+ * Hook: Called when WebSocket connection is established
734
+ */
735
+ protected abstract onConnected(): void;
736
+ /**
737
+ * Hook: Called when WebSocket connection closes
738
+ * @param code - Close code (see WebSocketCloseCode enum)
739
+ * @param reason - Human-readable close reason
740
+ */
741
+ protected abstract onDisconnected(code: number, reason: string): void;
742
+ /**
743
+ * Hook: Called when WebSocket error occurs
744
+ */
745
+ protected abstract onError(error: Event): void;
746
+ /**
747
+ * Hook: Called when downward message arrives from server
748
+ * Override this to handle messages (optional - default does nothing)
749
+ */
750
+ protected onMessage(_msg: Message<V> & {
751
+ data: TDownward;
752
+ }): void;
753
+ connect(): void;
754
+ /**
755
+ * Send JSON message to server
756
+ * @param version - Message version
757
+ * @param type - Message type (developer defined)
758
+ * @param data - Message payload (typed)
759
+ */
760
+ sendMessage(version: V, type: string, data: TUpward): void;
761
+ /**
762
+ * Send audio frame with specified encoding and sample rate
763
+ * @param audioData - Audio data (any format: Int16Array, Uint8Array, ArrayBuffer, etc.)
764
+ * @param version - Audio frame version
765
+ * @param encodingId - Audio encoding ID (0-5, e.g., AudioEncoding.LINEAR16)
766
+ * @param sampleRate - Sample rate in Hz (e.g., 16000)
767
+ */
768
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView, version: V, encodingId: number, sampleRate: number): void;
769
+ /**
770
+ * Get current WebSocket buffer size
771
+ */
772
+ getBufferedAmount(): number;
773
+ /**
774
+ * Check if local buffer is backpressured
775
+ */
776
+ isLocalBackpressured(): boolean;
777
+ /**
778
+ * Check if ready to send audio
779
+ * Verifies: connection open, no local buffer pressure
780
+ */
781
+ canSend(): boolean;
782
+ /**
783
+ * Check if connection is open
784
+ */
785
+ isOpen(): boolean;
786
+ /**
787
+ * Get current connection state
788
+ */
789
+ getReadyState(): number;
790
+ /**
791
+ * Close the WebSocket connection
792
+ * Protected method for subclasses to implement disconnect logic
793
+ * @param code - WebSocket close code (default: 1000 = normal closure)
794
+ * @param reason - Human-readable close reason
795
+ */
796
+ protected closeConnection(code?: number, reason?: string): void;
797
+ }
798
+
799
+ /**
800
+ * Recognition Client Types
801
+ *
802
+ * Type definitions and interfaces for the recognition client SDK.
803
+ * These interfaces enable dependency injection, testing, and alternative implementations.
804
+ */
805
+
806
+ /**
807
+ * Client connection state enum
808
+ * Represents the various states a recognition client can be in during its lifecycle
809
+ */
810
+ declare enum ClientState {
811
+ /** Initial state, no connection established */
812
+ INITIAL = "initial",
813
+ /** Actively establishing WebSocket connection */
814
+ CONNECTING = "connecting",
815
+ /** WebSocket connected but waiting for server ready signal */
816
+ CONNECTED = "connected",
817
+ /** Server ready, can send audio */
818
+ READY = "ready",
819
+ /** Sent stop signal, waiting for final transcript */
820
+ STOPPING = "stopping",
821
+ /** Connection closed normally after stop */
822
+ STOPPED = "stopped",
823
+ /** Connection failed or lost unexpectedly */
824
+ FAILED = "failed"
825
+ }
826
+ /**
827
+ * Callback URL configuration with message type filtering
828
+ */
829
+ interface RecognitionCallbackUrl {
830
+ /** The callback URL endpoint */
831
+ url: string;
832
+ /** Array of message types to send to this URL. If empty/undefined, all types are sent */
833
+ messageTypes?: Array<string | number>;
834
+ }
835
+ interface IRecognitionClientConfig {
836
+ /**
837
+ * WebSocket endpoint URL (optional)
838
+ * Either `url` or `stage` must be provided.
839
+ * If both are provided, `url` takes precedence.
840
+ *
841
+ * Example with explicit URL:
842
+ * ```typescript
843
+ * { url: 'wss://custom-endpoint.example.com/ws/v1/recognize' }
844
+ * ```
845
+ */
846
+ url?: string;
847
+ /**
848
+ * Stage for recognition service (recommended)
849
+ * Either `url` or `stage` must be provided.
850
+ * If both are provided, `url` takes precedence.
851
+ * Defaults to production if neither is provided.
852
+ *
853
+ * Example with STAGES enum (recommended):
854
+ * ```typescript
855
+ * import { STAGES } from '@recog/shared-types';
856
+ * { stage: STAGES.STAGING }
857
+ * ```
858
+ *
859
+ * String values also accepted:
860
+ * ```typescript
861
+ * { stage: 'staging' } // STAGES.LOCAL | STAGES.DEV | STAGES.STAGING | STAGES.PRODUCTION
862
+ * ```
863
+ */
864
+ stage?: Stage | string;
865
+ /** ASR configuration (provider, model, language, etc.) - optional */
866
+ asrRequestConfig?: ASRRequestConfig;
867
+ /** Game context for improved recognition accuracy */
868
+ gameContext?: GameContextV1;
869
+ /**
870
+ * Game ID for tracking and routing purposes (optional)
871
+ * If provided, this is added to the WebSocket URL as a query parameter.
872
+ * If gameContext is also provided, this takes precedence over gameContext.gameId.
873
+ */
874
+ gameId?: string;
875
+ /** Audio utterance ID (optional) - if not provided, a UUID v4 will be generated */
876
+ audioUtteranceId?: string;
877
+ /** Callback URLs for server-side notifications with optional message type filtering (optional)
878
+ * Game side only need to use it if another service need to be notified about the transcription results.
879
+ */
880
+ callbackUrls?: RecognitionCallbackUrl[];
881
+ /** User identification (optional) */
882
+ userId?: string;
883
+ /** Game session identification (optional). called 'sessionId' in Platform and most games. */
884
+ gameSessionId?: string;
885
+ /** Device identification (optional) */
886
+ deviceId?: string;
887
+ /** Account identification (optional) */
888
+ accountId?: string;
889
+ /** Question answer identifier for tracking Q&A sessions (optional and tracking purpose only) */
890
+ questionAnswerId?: string;
891
+ /** Platform for audio recording device (optional, e.g., 'ios', 'android', 'web', 'unity') */
892
+ platform?: string;
893
+ /** Callback when transcript is received */
894
+ onTranscript?: (result: TranscriptionResultV1) => void;
895
+ /**
896
+ * Callback when function call is received
897
+ * Note: Not supported in 2025. P2 feature for future speech-to-function-call capability.
898
+ */
899
+ onFunctionCall?: (result: FunctionCallResultV1) => void;
900
+ /** Callback when metadata is received. Only once after transcription is complete.*/
901
+ onMetadata?: (metadata: MetadataResultV1) => void;
902
+ /** Callback when error occurs */
903
+ onError?: (error: ErrorResultV1) => void;
904
+ /** Callback when connected to WebSocket */
905
+ onConnected?: () => void;
906
+ /**
907
+ * Callback when WebSocket disconnects
908
+ * @param code - WebSocket close code (1000 = normal, 1006 = abnormal, etc.)
909
+ * @param reason - Close reason string
910
+ */
911
+ onDisconnected?: (code: number, reason: string) => void;
912
+ /** High water mark for backpressure control (bytes) */
913
+ highWaterMark?: number;
914
+ /** Low water mark for backpressure control (bytes) */
915
+ lowWaterMark?: number;
916
+ /** Maximum buffer duration in seconds (default: 60s) */
917
+ maxBufferDurationSec?: number;
918
+ /** Expected chunks per second for ring buffer sizing (default: 100) */
919
+ chunksPerSecond?: number;
920
+ /**
921
+ * Connection retry configuration (optional)
922
+ * Only applies to initial connection establishment, not mid-stream interruptions.
923
+ *
924
+ * Default: { maxAttempts: 4, delayMs: 200 } (try once, retry 3 times = 4 total attempts)
925
+ *
926
+ * Timing: Attempt 1 → FAIL → wait 200ms → Attempt 2 → FAIL → wait 200ms → Attempt 3 → FAIL → wait 200ms → Attempt 4
927
+ *
928
+ * Example:
929
+ * ```typescript
930
+ * {
931
+ * connectionRetry: {
932
+ * maxAttempts: 2, // Try connecting up to 2 times (1 retry)
933
+ * delayMs: 500 // Wait 500ms between attempts
934
+ * }
935
+ * }
936
+ * ```
937
+ */
938
+ connectionRetry?: {
939
+ /** Maximum number of connection attempts (default: 4, min: 1, max: 5) */
940
+ maxAttempts?: number;
941
+ /** Delay in milliseconds between retry attempts (default: 200ms) */
942
+ delayMs?: number;
943
+ };
944
+ /**
945
+ * Optional logger function for debugging
946
+ * If not provided, no logging will occur
947
+ * @param level - Log level: 'debug', 'info', 'warn', 'error'
948
+ * @param message - Log message
949
+ * @param data - Optional additional data
950
+ */
951
+ logger?: (level: 'debug' | 'info' | 'warn' | 'error', message: string, data?: any) => void;
952
+ }
953
+ /**
954
+ * Recognition Client Interface
955
+ *
956
+ * Main interface for real-time speech recognition clients.
957
+ * Provides methods for connection management, audio streaming, and session control.
958
+ */
959
+ interface IRecognitionClient {
960
+ /**
961
+ * Connect to the WebSocket endpoint
962
+ * @returns Promise that resolves when connected
963
+ * @throws Error if connection fails or times out
964
+ */
965
+ connect(): Promise<void>;
966
+ /**
967
+ * Send audio data to the recognition service
968
+ * Audio is buffered locally and sent when connection is ready.
969
+ * @param audioData - PCM audio data as ArrayBuffer, typed array view, or Blob
970
+ */
971
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
972
+ /**
973
+ * Stop recording and wait for final transcript
974
+ * The server will close the connection after sending the final transcript.
975
+ * @returns Promise that resolves when final transcript is received
976
+ */
977
+ stopRecording(): Promise<void>;
978
+ /**
979
+ * Force stop and immediately close connection without waiting for server
980
+ *
981
+ * WARNING: This is an abnormal shutdown that bypasses the graceful stop flow:
982
+ * - Does NOT wait for server to process remaining audio
983
+ * - Does NOT receive final transcript from server
984
+ * - Immediately closes WebSocket connection
985
+ * - Cleans up resources (buffers, listeners)
986
+ *
987
+ * Use Cases:
988
+ * - User explicitly cancels/abandons session
989
+ * - Timeout scenarios where waiting is not acceptable
990
+ * - Need immediate cleanup and can't wait for server
991
+ *
992
+ * RECOMMENDED: Use stopRecording() for normal shutdown.
993
+ * Only use this when immediate disconnection is required.
994
+ */
995
+ stopAbnormally(): void;
996
+ /**
997
+ * Get the audio utterance ID for this session
998
+ * Available immediately after client construction.
999
+ * @returns UUID v4 string identifying this recognition session
1000
+ */
1001
+ getAudioUtteranceId(): string;
1002
+ /**
1003
+ * Get the current state of the client
1004
+ * @returns Current ClientState value
1005
+ */
1006
+ getState(): ClientState;
1007
+ /**
1008
+ * Check if WebSocket connection is open
1009
+ * @returns true if connected and ready to communicate
1010
+ */
1011
+ isConnected(): boolean;
1012
+ /**
1013
+ * Check if client is currently connecting
1014
+ * @returns true if connection is in progress
1015
+ */
1016
+ isConnecting(): boolean;
1017
+ /**
1018
+ * Check if client is currently stopping
1019
+ * @returns true if stopRecording() is in progress
1020
+ */
1021
+ isStopping(): boolean;
1022
+ /**
1023
+ * Check if transcription has finished
1024
+ * @returns true if the transcription is complete
1025
+ */
1026
+ isTranscriptionFinished(): boolean;
1027
+ /**
1028
+ * Check if the audio buffer has overflowed
1029
+ * @returns true if the ring buffer has wrapped around
1030
+ */
1031
+ isBufferOverflowing(): boolean;
1032
+ /**
1033
+ * Get client statistics
1034
+ * @returns Statistics about audio transmission and buffering
1035
+ */
1036
+ getStats(): IRecognitionClientStats;
1037
+ /**
1038
+ * Get the WebSocket URL being used by this client
1039
+ * Available immediately after client construction.
1040
+ * @returns WebSocket URL string
1041
+ */
1042
+ getUrl(): string;
1043
+ }
1044
+ /**
1045
+ * Client statistics interface
1046
+ */
1047
+ interface IRecognitionClientStats {
1048
+ /** Total audio bytes sent to server */
1049
+ audioBytesSent: number;
1050
+ /** Total number of audio chunks sent */
1051
+ audioChunksSent: number;
1052
+ /** Total number of audio chunks buffered */
1053
+ audioChunksBuffered: number;
1054
+ /** Number of times the ring buffer overflowed */
1055
+ bufferOverflowCount: number;
1056
+ /** Current number of chunks in buffer */
1057
+ currentBufferedChunks: number;
1058
+ /** Whether the ring buffer has wrapped (overwritten old data) */
1059
+ hasWrapped: boolean;
1060
+ }
1061
+ /**
1062
+ * Configuration for RealTimeTwoWayWebSocketRecognitionClient
1063
+ * This extends IRecognitionClientConfig and is the main configuration interface
1064
+ * for creating a new RealTimeTwoWayWebSocketRecognitionClient instance.
1065
+ */
1066
+ interface RealTimeTwoWayWebSocketRecognitionClientConfig extends IRecognitionClientConfig {
1067
+ }
1068
+
1069
+ /**
1070
+ * RealTimeTwoWayWebSocketRecognitionClient - Clean, compact SDK for real-time speech recognition
1071
+ *
1072
+ * Features:
1073
+ * - Ring buffer-based audio storage with fixed memory footprint
1074
+ * - Automatic buffering when disconnected, immediate send when connected
1075
+ * - Buffer persists after flush (for future retry/reconnection scenarios)
1076
+ * - Built on WebSocketAudioClient for robust protocol handling
1077
+ * - Simple API: connect() → sendAudio() → stopRecording()
1078
+ * - Type-safe message handling with callbacks
1079
+ * - Automatic backpressure management
1080
+ * - Overflow detection with buffer state tracking
1081
+ *
1082
+ * Example:
1083
+ * ```typescript
1084
+ * const client = new RealTimeTwoWayWebSocketRecognitionClient({
1085
+ * url: 'ws://localhost:3101/ws/v1/recognize',
1086
+ * onTranscript: (result) => console.log(result.finalTranscript),
1087
+ * onError: (error) => console.error(error),
1088
+ * maxBufferDurationSec: 60 // Ring buffer for 60 seconds
1089
+ * });
1090
+ *
1091
+ * await client.connect();
1092
+ *
1093
+ * // Send audio chunks - always stored in ring buffer, sent if connected
1094
+ * micStream.on('data', (chunk) => client.sendAudio(chunk));
1095
+ *
1096
+ * // Signal end of audio and wait for final results
1097
+ * await client.stopRecording();
1098
+ *
1099
+ * // Server will close connection after sending finals
1100
+ * // No manual cleanup needed - browser handles it
1101
+ * ```
1102
+ */
1103
+
1104
+ /**
1105
+ * Re-export TranscriptionResultV1 as TranscriptionResult for backward compatibility
1106
+ */
1107
+ type TranscriptionResult = TranscriptionResultV1;
1108
+
1109
+ /**
1110
+ * RealTimeTwoWayWebSocketRecognitionClient - SDK-level client for real-time speech recognition
1111
+ *
1112
+ * Implements IRecognitionClient interface for dependency injection and testing.
1113
+ * Extends WebSocketAudioClient with local audio buffering and simple callback-based API.
1114
+ */
1115
+ declare class RealTimeTwoWayWebSocketRecognitionClient extends WebSocketAudioClient<number, any, any> implements IRecognitionClient {
1116
+ private static readonly PROTOCOL_VERSION;
1117
+ private config;
1118
+ private audioBuffer;
1119
+ private messageHandler;
1120
+ private state;
1121
+ private connectionPromise;
1122
+ private isDebugLogEnabled;
1123
+ private audioBytesSent;
1124
+ private audioChunksSent;
1125
+ private audioStatsLogInterval;
1126
+ private lastAudioStatsLog;
1127
+ constructor(config: RealTimeTwoWayWebSocketRecognitionClientConfig);
1128
+ /**
1129
+ * Internal logging helper - only logs if a logger was provided in config
1130
+ * Debug logs are additionally gated by isDebugLogEnabled flag
1131
+ * @param level - Log level: debug, info, warn, or error
1132
+ * @param message - Message to log
1133
+ * @param data - Optional additional data to log
1134
+ */
1135
+ private log;
1136
+ /**
1137
+ * Clean up internal resources to free memory
1138
+ * Called when connection closes (normally or abnormally)
1139
+ */
1140
+ private cleanup;
1141
+ connect(): Promise<void>;
1142
+ /**
1143
+ * Attempt to connect with retry logic
1144
+ * Only retries on initial connection establishment, not mid-stream interruptions
1145
+ */
1146
+ private connectWithRetry;
1147
+ sendAudio(audioData: ArrayBuffer | ArrayBufferView | Blob): void;
1148
+ private sendAudioInternal;
1149
+ stopRecording(): Promise<void>;
1150
+ stopAbnormally(): void;
1151
+ getAudioUtteranceId(): string;
1152
+ getUrl(): string;
1153
+ getState(): ClientState;
1154
+ isConnected(): boolean;
1155
+ isConnecting(): boolean;
1156
+ isStopping(): boolean;
1157
+ isTranscriptionFinished(): boolean;
1158
+ isBufferOverflowing(): boolean;
1159
+ getStats(): IRecognitionClientStats;
1160
+ protected onConnected(): void;
1161
+ protected onDisconnected(code: number, reason: string): void;
1162
+ /**
1163
+ * Get human-readable description for WebSocket close code
1164
+ */
1165
+ private getCloseCodeDescription;
1166
+ protected onError(error: Event): void;
1167
+ protected onMessage(msg: {
1168
+ v: number;
1169
+ type: string;
1170
+ data: any;
1171
+ }): void;
1172
+ /**
1173
+ * Handle control messages from server
1174
+ * @param msg - Control message containing server actions
1175
+ */
1176
+ private handleControlMessage;
1177
+ /**
1178
+ * Send audio immediately to the server (without buffering)
1179
+ * @param audioData - Audio data to send
1180
+ */
1181
+ private sendAudioNow;
1182
+ }
1183
+
1184
+ export { AudioEncoding, ControlSignalTypeV1 as ControlSignal, RealTimeTwoWayWebSocketRecognitionClient, RecognitionContextTypeV1 };
1185
+ export type { GameContextV1, RealTimeTwoWayWebSocketRecognitionClientConfig, TranscriptionResult };