@amaster.ai/asr-client 1.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,297 @@
1
+ import { HttpClient } from '@amaster.ai/http-client';
2
+
3
+ /**
4
+ * ASR Realtime WebSocket Client for Qwen-ASR Realtime API
5
+ *
6
+ * WebSocket-based real-time speech recognition for streaming transcription.
7
+ * Follows the Qwen-ASR Realtime API protocol with proper event handling.
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * const client = createASRClient({
12
+ * language: "zh",
13
+ * enableVAD: true,
14
+ * onReady() {
15
+ * console.log("ASR connected");
16
+ * },
17
+ * onTranscript(text, isFinal) {
18
+ * console.log(isFinal ? "[Final]" : "[Interim]", text);
19
+ * },
20
+ * onError(err) {
21
+ * console.error("ASR error:", err);
22
+ * },
23
+ * });
24
+ *
25
+ * await client.connect();
26
+ * await client.startRecording();
27
+ * // ... stop ...
28
+ * await client.stopRecording();
29
+ * await client.close();
30
+ * ```
31
+ */
32
+ type ASRLanguage = "zh" | "yue" | "en" | "ja" | "de" | "ko" | "ru" | "fr" | "pt" | "ar" | "it" | "es" | "hi" | "id" | "th" | "tr" | "uk" | "vi" | "cs" | "da" | "fil" | "fi" | "is" | "ms" | "no" | "pl" | "sv";
33
+ type ClientEventType = "session.update" | "input_audio_buffer.append" | "input_audio_buffer.commit" | "session.finish";
34
+ type ServerEventType = "session.created" | "session.updated" | "input_audio_buffer.speech_started" | "input_audio_buffer.speech_stopped" | "input_audio_buffer.committed" | "conversation.item.input_audio_transcription.text" | "conversation.item.input_audio_transcription.completed" | "session.finished" | "error";
35
+ interface BaseEvent {
36
+ event_id: string;
37
+ type: ClientEventType | ServerEventType;
38
+ }
39
+ interface SessionUpdateEvent extends BaseEvent {
40
+ type: "session.update";
41
+ session: SessionConfig;
42
+ }
43
+ interface InputAudioBufferAppendEvent extends BaseEvent {
44
+ type: "input_audio_buffer.append";
45
+ audio: string;
46
+ }
47
+ interface InputAudioBufferCommitEvent extends BaseEvent {
48
+ type: "input_audio_buffer.commit";
49
+ }
50
+ interface SessionFinishEvent extends BaseEvent {
51
+ type: "session.finish";
52
+ }
53
+ type ClientEvent = SessionUpdateEvent | InputAudioBufferAppendEvent | InputAudioBufferCommitEvent | SessionFinishEvent;
54
+ interface SessionCreatedEvent extends BaseEvent {
55
+ type: "session.created";
56
+ session: {
57
+ id: string;
58
+ };
59
+ }
60
+ interface SessionUpdatedEvent extends BaseEvent {
61
+ type: "session.updated";
62
+ session: SessionConfig;
63
+ }
64
+ interface SpeechStartedEvent extends BaseEvent {
65
+ type: "input_audio_buffer.speech_started";
66
+ }
67
+ interface SpeechStoppedEvent extends BaseEvent {
68
+ type: "input_audio_buffer.speech_stopped";
69
+ }
70
+ interface InputAudioBufferCommittedEvent extends BaseEvent {
71
+ type: "input_audio_buffer.committed";
72
+ }
73
+ interface TranscriptionTextEvent extends BaseEvent {
74
+ type: "conversation.item.input_audio_transcription.text";
75
+ text?: string;
76
+ stash?: string;
77
+ transcript?: string;
78
+ }
79
+ interface TranscriptionCompletedEvent extends BaseEvent {
80
+ type: "conversation.item.input_audio_transcription.completed";
81
+ text?: string;
82
+ transcript?: string;
83
+ }
84
+ interface SessionFinishedEvent extends BaseEvent {
85
+ type: "session.finished";
86
+ }
87
+ interface ErrorEvent extends BaseEvent {
88
+ type: "error";
89
+ error: {
90
+ message: string;
91
+ code?: string;
92
+ };
93
+ }
94
+ type ServerEvent = SessionCreatedEvent | SessionUpdatedEvent | SpeechStartedEvent | SpeechStoppedEvent | InputAudioBufferCommittedEvent | TranscriptionTextEvent | TranscriptionCompletedEvent | SessionFinishedEvent | ErrorEvent;
95
+ interface TurnDetectionConfig {
96
+ type: "server_vad";
97
+ /** VAD检测阈值,推荐设为 0.0,默认值 0.2,范围 [-1, 1] */
98
+ threshold?: number;
99
+ /** VAD断句检测阈值(ms),推荐设为 400,默认值 800,范围 [200, 6000] */
100
+ silence_duration_ms?: number;
101
+ }
102
+ interface InputAudioTranscriptionConfig {
103
+ language?: ASRLanguage;
104
+ }
105
+ interface SessionConfig {
106
+ input_audio_format?: "pcm" | "opus";
107
+ sample_rate?: 16000 | 8000;
108
+ input_audio_transcription?: InputAudioTranscriptionConfig;
109
+ turn_detection?: TurnDetectionConfig | null;
110
+ }
111
+ interface ASRClientConfig {
112
+ /**
113
+ * Audio format
114
+ * @default "pcm"
115
+ */
116
+ audioFormat?: "pcm" | "opus";
117
+ /**
118
+ * Sample rate in Hz
119
+ * @default 16000
120
+ * @description 支持 16000 和 8000。设置为 8000 时,服务端会先升采样到16000Hz再进行识别,可能引入微小延迟。
121
+ */
122
+ sampleRate?: 16000 | 8000;
123
+ /**
124
+ * Audio source language
125
+ * @default "zh"
126
+ * @description 支持多种语言,包括 zh(中文)、yue(粤语)、en(英文)、ja(日语)等
127
+ */
128
+ language?: ASRLanguage;
129
+ /**
130
+ * Enable VAD (Voice Activity Detection) mode
131
+ * @default true
132
+ * @description true = VAD模式(服务端自动检测语音开始/结束),false = Manual模式(客户端手动控制)
133
+ */
134
+ enableVAD?: boolean;
135
+ /**
136
+ * VAD detection threshold
137
+ * @default 0.2
138
+ * @description 推荐设为 0.0。取值范围 [-1, 1]。较低的阈值会提高 VAD 的灵敏度。
139
+ */
140
+ vadThreshold?: number;
141
+ /**
142
+ * VAD silence duration threshold in milliseconds
143
+ * @default 800
144
+ * @description 推荐设为 400。取值范围 [200, 6000]。静音持续时长超过该阈值将被认为是语句结束。
145
+ */
146
+ vadSilenceDurationMs?: number;
147
+ /**
148
+ * Get access token for WebSocket authentication
149
+ */
150
+ getAccessToken?: () => string | null;
151
+ /**
152
+ * Called when connection is ready (session.created received and session.update sent)
153
+ */
154
+ onReady?: () => void;
155
+ /**
156
+ * Called when speech is detected (VAD mode only)
157
+ */
158
+ onSpeechStart?: () => void;
159
+ /**
160
+ * Called when speech stops (VAD mode only)
161
+ */
162
+ onSpeechEnd?: () => void;
163
+ /**
164
+ * Called on transcript result
165
+ * @param text - Transcribed text
166
+ * @param isFinal - Whether this is the final result
167
+ */
168
+ onTranscript?: (text: string, isFinal: boolean) => void;
169
+ /**
170
+ * Called when audio buffer is committed (non-VAD mode only)
171
+ */
172
+ onAudioBufferCommitted?: () => void;
173
+ /**
174
+ * Called when session is finished
175
+ */
176
+ onSessionFinished?: () => void;
177
+ /**
178
+ * Called on error
179
+ */
180
+ onError?: (error: Error) => void;
181
+ /**
182
+ * Called on close
183
+ */
184
+ onClose?: () => void;
185
+ }
186
+ interface ASRClient {
187
+ /** Connect to ASR service and establish session */
188
+ connect(): Promise<void>;
189
+ /** Start recording from microphone */
190
+ startRecording(): Promise<void>;
191
+ /**
192
+ * Stop recording
193
+ * @description In non-VAD mode, this triggers recognition by sending input_audio_buffer.commit
194
+ */
195
+ stopRecording(): Promise<void>;
196
+ /**
197
+ * Close connection gracefully
198
+ * @description Sends session.finish and waits for session.finished before closing
199
+ */
200
+ close(): Promise<void>;
201
+ /**
202
+ * Check if currently recording
203
+ */
204
+ isRecording(): boolean;
205
+ /**
206
+ * Check if connected to server
207
+ */
208
+ isConnected(): boolean;
209
+ }
210
+ declare const _default$1: (authConfig: Pick<ASRClientConfig, "getAccessToken">) => (config: ASRClientConfig) => ASRClient;
211
+
212
+ /**
213
+ * HTTP ASR Client - Press-to-talk style speech recognition
214
+ *
215
+ * HTTP-based speech recognition suitable for press-to-talk scenarios where you hold to speak
216
+ * and release to recognize. Good for voice messages, voice search, etc.
217
+ *
218
+ * @example
219
+ * ```typescript
220
+ * const client = createASRHttpClient({
221
+ * onRecordingStart() {
222
+ * console.log("Recording started");
223
+ * },
224
+ * onRecordingStop() {
225
+ * console.log("Recording stopped");
226
+ * },
227
+ * onResult(text) {
228
+ * console.log("Recognized:", text);
229
+ * },
230
+ * onError(err) {
231
+ * console.error("ASR error:", err);
232
+ * },
233
+ * });
234
+ *
235
+ * // Hold to speak, release to recognize
236
+ * await client.startRecording();
237
+ * // ... stop ...
238
+ * const result = await client.stopRecording();
239
+ * ```
240
+ */
241
+
242
+ interface Recorder {
243
+ /** Start recording */
244
+ start(): Promise<void>;
245
+ /**
246
+ * Stop recording and get base64-encoded WAV audio data. You can use this data to call the ASR API.
247
+ *
248
+ * @returns Base64-encoded WAV audio data
249
+ */
250
+ stop(): Promise<void>;
251
+ }
252
+ interface RecorderOptions {
253
+ /** Called when recording starts */
254
+ onStart?: () => void;
255
+ /**
256
+ * Called when recording stops, with base64-encoded WAV audio data. You can use this data to call the ASR API.
257
+ *
258
+ * @param base64 - Base64-encoded WAV audio data
259
+ * @returns void
260
+ */
261
+ onStop?: (base64: string) => void;
262
+ onError?: (error: Error) => void;
263
+ }
264
+ interface ASRHttpClientConfig {
265
+ http?: HttpClient;
266
+ /** Get access token */
267
+ getAccessToken?(): string | null;
268
+ /** Language, default 'zh' */
269
+ language?: string;
270
+ /** Sample rate, default 16000 */
271
+ sampleRate?: number;
272
+ /** Create custom recorder */
273
+ createRecorder?(options?: RecorderOptions): Promise<Recorder>;
274
+ /** Called when recording starts */
275
+ onRecordingStart?: () => void;
276
+ /** Called when recording stops */
277
+ onRecordingStop?: () => void;
278
+ /** Called with recognition result */
279
+ onResult?: (text: string) => void;
280
+ /** Called on error */
281
+ onError?: (error: Error) => void;
282
+ }
283
+ interface ASRHttpClient {
284
+ /** Start recording (press-to-talk) */
285
+ startRecording(): Promise<void>;
286
+ /** Stop recording and get result */
287
+ stopRecording(): Promise<void>;
288
+ /** Record for specific duration then recognize */
289
+ recordAndRecognize(durationMs: number): Promise<void>;
290
+ /** Recognize audio file (File or Blob) */
291
+ recognizeFile(base64: string): Promise<string>;
292
+ /** Recognize audio from URL */
293
+ recognizeUrl(audioUrl: string): Promise<string>;
294
+ }
295
+ declare const _default: (authConfig: Pick<ASRHttpClientConfig, "getAccessToken">) => (config: ASRHttpClientConfig) => ASRHttpClient;
296
+
297
+ export { type ASRClient, type ASRClientConfig, type ASRHttpClient, type ASRHttpClientConfig, type ASRLanguage, type ClientEvent, type ErrorEvent, type InputAudioBufferAppendEvent, type InputAudioBufferCommitEvent, type InputAudioTranscriptionConfig, type ServerEvent, type SessionConfig, type SessionCreatedEvent, type SessionFinishEvent, type SessionFinishedEvent, type SessionUpdateEvent, type SessionUpdatedEvent, type TranscriptionCompletedEvent, type TranscriptionTextEvent, type TurnDetectionConfig, _default$1 as createASRClient, _default as createASRHttpClient };
@@ -0,0 +1,297 @@
1
+ import { HttpClient } from '@amaster.ai/http-client';
2
+
3
+ /**
4
+ * ASR Realtime WebSocket Client for Qwen-ASR Realtime API
5
+ *
6
+ * WebSocket-based real-time speech recognition for streaming transcription.
7
+ * Follows the Qwen-ASR Realtime API protocol with proper event handling.
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * const client = createASRClient({
12
+ * language: "zh",
13
+ * enableVAD: true,
14
+ * onReady() {
15
+ * console.log("ASR connected");
16
+ * },
17
+ * onTranscript(text, isFinal) {
18
+ * console.log(isFinal ? "[Final]" : "[Interim]", text);
19
+ * },
20
+ * onError(err) {
21
+ * console.error("ASR error:", err);
22
+ * },
23
+ * });
24
+ *
25
+ * await client.connect();
26
+ * await client.startRecording();
27
+ * // ... stop ...
28
+ * await client.stopRecording();
29
+ * await client.close();
30
+ * ```
31
+ */
32
+ type ASRLanguage = "zh" | "yue" | "en" | "ja" | "de" | "ko" | "ru" | "fr" | "pt" | "ar" | "it" | "es" | "hi" | "id" | "th" | "tr" | "uk" | "vi" | "cs" | "da" | "fil" | "fi" | "is" | "ms" | "no" | "pl" | "sv";
33
+ type ClientEventType = "session.update" | "input_audio_buffer.append" | "input_audio_buffer.commit" | "session.finish";
34
+ type ServerEventType = "session.created" | "session.updated" | "input_audio_buffer.speech_started" | "input_audio_buffer.speech_stopped" | "input_audio_buffer.committed" | "conversation.item.input_audio_transcription.text" | "conversation.item.input_audio_transcription.completed" | "session.finished" | "error";
35
+ interface BaseEvent {
36
+ event_id: string;
37
+ type: ClientEventType | ServerEventType;
38
+ }
39
+ interface SessionUpdateEvent extends BaseEvent {
40
+ type: "session.update";
41
+ session: SessionConfig;
42
+ }
43
+ interface InputAudioBufferAppendEvent extends BaseEvent {
44
+ type: "input_audio_buffer.append";
45
+ audio: string;
46
+ }
47
+ interface InputAudioBufferCommitEvent extends BaseEvent {
48
+ type: "input_audio_buffer.commit";
49
+ }
50
+ interface SessionFinishEvent extends BaseEvent {
51
+ type: "session.finish";
52
+ }
53
+ type ClientEvent = SessionUpdateEvent | InputAudioBufferAppendEvent | InputAudioBufferCommitEvent | SessionFinishEvent;
54
+ interface SessionCreatedEvent extends BaseEvent {
55
+ type: "session.created";
56
+ session: {
57
+ id: string;
58
+ };
59
+ }
60
+ interface SessionUpdatedEvent extends BaseEvent {
61
+ type: "session.updated";
62
+ session: SessionConfig;
63
+ }
64
+ interface SpeechStartedEvent extends BaseEvent {
65
+ type: "input_audio_buffer.speech_started";
66
+ }
67
+ interface SpeechStoppedEvent extends BaseEvent {
68
+ type: "input_audio_buffer.speech_stopped";
69
+ }
70
+ interface InputAudioBufferCommittedEvent extends BaseEvent {
71
+ type: "input_audio_buffer.committed";
72
+ }
73
+ interface TranscriptionTextEvent extends BaseEvent {
74
+ type: "conversation.item.input_audio_transcription.text";
75
+ text?: string;
76
+ stash?: string;
77
+ transcript?: string;
78
+ }
79
+ interface TranscriptionCompletedEvent extends BaseEvent {
80
+ type: "conversation.item.input_audio_transcription.completed";
81
+ text?: string;
82
+ transcript?: string;
83
+ }
84
+ interface SessionFinishedEvent extends BaseEvent {
85
+ type: "session.finished";
86
+ }
87
+ interface ErrorEvent extends BaseEvent {
88
+ type: "error";
89
+ error: {
90
+ message: string;
91
+ code?: string;
92
+ };
93
+ }
94
+ type ServerEvent = SessionCreatedEvent | SessionUpdatedEvent | SpeechStartedEvent | SpeechStoppedEvent | InputAudioBufferCommittedEvent | TranscriptionTextEvent | TranscriptionCompletedEvent | SessionFinishedEvent | ErrorEvent;
95
+ interface TurnDetectionConfig {
96
+ type: "server_vad";
97
+ /** VAD检测阈值,推荐设为 0.0,默认值 0.2,范围 [-1, 1] */
98
+ threshold?: number;
99
+ /** VAD断句检测阈值(ms),推荐设为 400,默认值 800,范围 [200, 6000] */
100
+ silence_duration_ms?: number;
101
+ }
102
+ interface InputAudioTranscriptionConfig {
103
+ language?: ASRLanguage;
104
+ }
105
+ interface SessionConfig {
106
+ input_audio_format?: "pcm" | "opus";
107
+ sample_rate?: 16000 | 8000;
108
+ input_audio_transcription?: InputAudioTranscriptionConfig;
109
+ turn_detection?: TurnDetectionConfig | null;
110
+ }
111
+ interface ASRClientConfig {
112
+ /**
113
+ * Audio format
114
+ * @default "pcm"
115
+ */
116
+ audioFormat?: "pcm" | "opus";
117
+ /**
118
+ * Sample rate in Hz
119
+ * @default 16000
120
+ * @description 支持 16000 和 8000。设置为 8000 时,服务端会先升采样到16000Hz再进行识别,可能引入微小延迟。
121
+ */
122
+ sampleRate?: 16000 | 8000;
123
+ /**
124
+ * Audio source language
125
+ * @default "zh"
126
+ * @description 支持多种语言,包括 zh(中文)、yue(粤语)、en(英文)、ja(日语)等
127
+ */
128
+ language?: ASRLanguage;
129
+ /**
130
+ * Enable VAD (Voice Activity Detection) mode
131
+ * @default true
132
+ * @description true = VAD模式(服务端自动检测语音开始/结束),false = Manual模式(客户端手动控制)
133
+ */
134
+ enableVAD?: boolean;
135
+ /**
136
+ * VAD detection threshold
137
+ * @default 0.2
138
+ * @description 推荐设为 0.0。取值范围 [-1, 1]。较低的阈值会提高 VAD 的灵敏度。
139
+ */
140
+ vadThreshold?: number;
141
+ /**
142
+ * VAD silence duration threshold in milliseconds
143
+ * @default 800
144
+ * @description 推荐设为 400。取值范围 [200, 6000]。静音持续时长超过该阈值将被认为是语句结束。
145
+ */
146
+ vadSilenceDurationMs?: number;
147
+ /**
148
+ * Get access token for WebSocket authentication
149
+ */
150
+ getAccessToken?: () => string | null;
151
+ /**
152
+ * Called when connection is ready (session.created received and session.update sent)
153
+ */
154
+ onReady?: () => void;
155
+ /**
156
+ * Called when speech is detected (VAD mode only)
157
+ */
158
+ onSpeechStart?: () => void;
159
+ /**
160
+ * Called when speech stops (VAD mode only)
161
+ */
162
+ onSpeechEnd?: () => void;
163
+ /**
164
+ * Called on transcript result
165
+ * @param text - Transcribed text
166
+ * @param isFinal - Whether this is the final result
167
+ */
168
+ onTranscript?: (text: string, isFinal: boolean) => void;
169
+ /**
170
+ * Called when audio buffer is committed (non-VAD mode only)
171
+ */
172
+ onAudioBufferCommitted?: () => void;
173
+ /**
174
+ * Called when session is finished
175
+ */
176
+ onSessionFinished?: () => void;
177
+ /**
178
+ * Called on error
179
+ */
180
+ onError?: (error: Error) => void;
181
+ /**
182
+ * Called on close
183
+ */
184
+ onClose?: () => void;
185
+ }
186
+ interface ASRClient {
187
+ /** Connect to ASR service and establish session */
188
+ connect(): Promise<void>;
189
+ /** Start recording from microphone */
190
+ startRecording(): Promise<void>;
191
+ /**
192
+ * Stop recording
193
+ * @description In non-VAD mode, this triggers recognition by sending input_audio_buffer.commit
194
+ */
195
+ stopRecording(): Promise<void>;
196
+ /**
197
+ * Close connection gracefully
198
+ * @description Sends session.finish and waits for session.finished before closing
199
+ */
200
+ close(): Promise<void>;
201
+ /**
202
+ * Check if currently recording
203
+ */
204
+ isRecording(): boolean;
205
+ /**
206
+ * Check if connected to server
207
+ */
208
+ isConnected(): boolean;
209
+ }
210
+ declare const _default$1: (authConfig: Pick<ASRClientConfig, "getAccessToken">) => (config: ASRClientConfig) => ASRClient;
211
+
212
+ /**
213
+ * HTTP ASR Client - Press-to-talk style speech recognition
214
+ *
215
+ * HTTP-based speech recognition suitable for press-to-talk scenarios where you hold to speak
216
+ * and release to recognize. Good for voice messages, voice search, etc.
217
+ *
218
+ * @example
219
+ * ```typescript
220
+ * const client = createASRHttpClient({
221
+ * onRecordingStart() {
222
+ * console.log("Recording started");
223
+ * },
224
+ * onRecordingStop() {
225
+ * console.log("Recording stopped");
226
+ * },
227
+ * onResult(text) {
228
+ * console.log("Recognized:", text);
229
+ * },
230
+ * onError(err) {
231
+ * console.error("ASR error:", err);
232
+ * },
233
+ * });
234
+ *
235
+ * // Hold to speak, release to recognize
236
+ * await client.startRecording();
237
+ * // ... stop ...
238
+ * const result = await client.stopRecording();
239
+ * ```
240
+ */
241
+
242
+ interface Recorder {
243
+ /** Start recording */
244
+ start(): Promise<void>;
245
+ /**
246
+ * Stop recording and get base64-encoded WAV audio data. You can use this data to call the ASR API.
247
+ *
248
+ * @returns Base64-encoded WAV audio data
249
+ */
250
+ stop(): Promise<void>;
251
+ }
252
+ interface RecorderOptions {
253
+ /** Called when recording starts */
254
+ onStart?: () => void;
255
+ /**
256
+ * Called when recording stops, with base64-encoded WAV audio data. You can use this data to call the ASR API.
257
+ *
258
+ * @param base64 - Base64-encoded WAV audio data
259
+ * @returns void
260
+ */
261
+ onStop?: (base64: string) => void;
262
+ onError?: (error: Error) => void;
263
+ }
264
+ interface ASRHttpClientConfig {
265
+ http?: HttpClient;
266
+ /** Get access token */
267
+ getAccessToken?(): string | null;
268
+ /** Language, default 'zh' */
269
+ language?: string;
270
+ /** Sample rate, default 16000 */
271
+ sampleRate?: number;
272
+ /** Create custom recorder */
273
+ createRecorder?(options?: RecorderOptions): Promise<Recorder>;
274
+ /** Called when recording starts */
275
+ onRecordingStart?: () => void;
276
+ /** Called when recording stops */
277
+ onRecordingStop?: () => void;
278
+ /** Called with recognition result */
279
+ onResult?: (text: string) => void;
280
+ /** Called on error */
281
+ onError?: (error: Error) => void;
282
+ }
283
+ interface ASRHttpClient {
284
+ /** Start recording (press-to-talk) */
285
+ startRecording(): Promise<void>;
286
+ /** Stop recording and get result */
287
+ stopRecording(): Promise<void>;
288
+ /** Record for specific duration then recognize */
289
+ recordAndRecognize(durationMs: number): Promise<void>;
290
+ /** Recognize audio file (File or Blob) */
291
+ recognizeFile(base64: string): Promise<string>;
292
+ /** Recognize audio from URL */
293
+ recognizeUrl(audioUrl: string): Promise<string>;
294
+ }
295
+ declare const _default: (authConfig: Pick<ASRHttpClientConfig, "getAccessToken">) => (config: ASRHttpClientConfig) => ASRHttpClient;
296
+
297
+ export { type ASRClient, type ASRClientConfig, type ASRHttpClient, type ASRHttpClientConfig, type ASRLanguage, type ClientEvent, type ErrorEvent, type InputAudioBufferAppendEvent, type InputAudioBufferCommitEvent, type InputAudioTranscriptionConfig, type ServerEvent, type SessionConfig, type SessionCreatedEvent, type SessionFinishEvent, type SessionFinishedEvent, type SessionUpdateEvent, type SessionUpdatedEvent, type TranscriptionCompletedEvent, type TranscriptionTextEvent, type TurnDetectionConfig, _default$1 as createASRClient, _default as createASRHttpClient };