@amaster.ai/client 1.0.0-alpha.2 → 1.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@amaster.ai/client",
3
- "version": "1.0.0-alpha.2",
3
+ "version": "1.0.0-alpha.3",
4
4
  "description": "Unified API client for Amaster platform - All services in one package",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -72,16 +72,16 @@
72
72
  "registry": "https://registry.npmjs.org/"
73
73
  },
74
74
  "dependencies": {
75
- "@amaster.ai/asr-client": "1.0.0-alpha.2",
76
- "@amaster.ai/auth-client": "1.0.0-alpha.2",
77
- "@amaster.ai/copilot-client": "1.0.0-alpha.2",
78
- "@amaster.ai/bpm-client": "1.0.0-alpha.2",
79
- "@amaster.ai/entity-client": "1.0.0-alpha.2",
80
- "@amaster.ai/function-client": "1.0.0-alpha.2",
81
- "@amaster.ai/http-client": "1.0.0-alpha.2",
82
- "@amaster.ai/s3-client": "1.0.0-alpha.2",
83
- "@amaster.ai/tts-client": "1.0.0-alpha.2",
84
- "@amaster.ai/workflow-client": "1.0.0-alpha.2"
75
+ "@amaster.ai/asr-client": "1.0.0-alpha.3",
76
+ "@amaster.ai/entity-client": "1.0.0-alpha.3",
77
+ "@amaster.ai/bpm-client": "1.0.0-alpha.3",
78
+ "@amaster.ai/function-client": "1.0.0-alpha.3",
79
+ "@amaster.ai/copilot-client": "1.0.0-alpha.3",
80
+ "@amaster.ai/http-client": "1.0.0-alpha.3",
81
+ "@amaster.ai/auth-client": "1.0.0-alpha.3",
82
+ "@amaster.ai/tts-client": "1.0.0-alpha.3",
83
+ "@amaster.ai/workflow-client": "1.0.0-alpha.3",
84
+ "@amaster.ai/s3-client": "1.0.0-alpha.3"
85
85
  },
86
86
  "peerDependencies": {
87
87
  "axios": "^1.11.0"
package/types/asr.d.ts CHANGED
@@ -27,211 +27,292 @@
27
27
  * await asrClient.close();
28
28
  * ```
29
29
  */
30
- export type ASRLanguage = "zh" | "yue" | "en" | "ja" | "de" | "ko" | "ru" | "fr" | "pt" | "ar" | "it" | "es" | "hi" | "id" | "th" | "tr" | "uk" | "vi" | "cs" | "da" | "fil" | "fi" | "is" | "ms" | "no" | "pl" | "sv";
31
- export type ClientEventType = "session.update" | "input_audio_buffer.append" | "input_audio_buffer.commit" | "session.finish";
32
- export type ServerEventType = "session.created" | "session.updated" | "input_audio_buffer.speech_started" | "input_audio_buffer.speech_stopped" | "input_audio_buffer.committed" | "conversation.item.input_audio_transcription.text" | "conversation.item.input_audio_transcription.completed" | "session.finished" | "error";
30
+ export type ASRLanguage =
31
+ | "zh"
32
+ | "yue"
33
+ | "en"
34
+ | "ja"
35
+ | "de"
36
+ | "ko"
37
+ | "ru"
38
+ | "fr"
39
+ | "pt"
40
+ | "ar"
41
+ | "it"
42
+ | "es"
43
+ | "hi"
44
+ | "id"
45
+ | "th"
46
+ | "tr"
47
+ | "uk"
48
+ | "vi"
49
+ | "cs"
50
+ | "da"
51
+ | "fil"
52
+ | "fi"
53
+ | "is"
54
+ | "ms"
55
+ | "no"
56
+ | "pl"
57
+ | "sv";
58
+ export type ClientEventType =
59
+ | "session.update"
60
+ | "input_audio_buffer.append"
61
+ | "input_audio_buffer.commit"
62
+ | "session.finish";
63
+ export type ServerEventType =
64
+ | "session.created"
65
+ | "session.updated"
66
+ | "input_audio_buffer.speech_started"
67
+ | "input_audio_buffer.speech_stopped"
68
+ | "input_audio_buffer.committed"
69
+ | "conversation.item.input_audio_transcription.text"
70
+ | "conversation.item.input_audio_transcription.completed"
71
+ | "session.finished"
72
+ | "error";
33
73
  export interface BaseEvent {
34
- event_id: string;
35
- type: ClientEventType | ServerEventType;
74
+ event_id: string;
75
+ type: ClientEventType | ServerEventType;
36
76
  }
37
77
  export interface SessionUpdateEvent extends BaseEvent {
38
- type: "session.update";
39
- session: SessionConfig;
78
+ type: "session.update";
79
+ session: SessionConfig;
40
80
  }
41
81
  export interface InputAudioBufferAppendEvent extends BaseEvent {
42
- type: "input_audio_buffer.append";
43
- audio: string;
82
+ type: "input_audio_buffer.append";
83
+ audio: string;
44
84
  }
45
85
  export interface InputAudioBufferCommitEvent extends BaseEvent {
46
- type: "input_audio_buffer.commit";
86
+ type: "input_audio_buffer.commit";
47
87
  }
48
88
  export interface SessionFinishEvent extends BaseEvent {
49
- type: "session.finish";
89
+ type: "session.finish";
50
90
  }
51
- type ClientEvent = SessionUpdateEvent | InputAudioBufferAppendEvent | InputAudioBufferCommitEvent | SessionFinishEvent;
91
+ type ClientEvent =
92
+ | SessionUpdateEvent
93
+ | InputAudioBufferAppendEvent
94
+ | InputAudioBufferCommitEvent
95
+ | SessionFinishEvent;
52
96
  export interface SessionCreatedEvent extends BaseEvent {
53
- type: "session.created";
54
- session: {
55
- id: string;
56
- };
97
+ type: "session.created";
98
+ session: {
99
+ id: string;
100
+ };
57
101
  }
58
102
  export interface SessionUpdatedEvent extends BaseEvent {
59
- type: "session.updated";
60
- session: SessionConfig;
103
+ type: "session.updated";
104
+ session: SessionConfig;
61
105
  }
62
106
  export interface SpeechStartedEvent extends BaseEvent {
63
- type: "input_audio_buffer.speech_started";
107
+ type: "input_audio_buffer.speech_started";
64
108
  }
65
109
  export interface SpeechStoppedEvent extends BaseEvent {
66
- type: "input_audio_buffer.speech_stopped";
110
+ type: "input_audio_buffer.speech_stopped";
67
111
  }
68
112
  export interface InputAudioBufferCommittedEvent extends BaseEvent {
69
- type: "input_audio_buffer.committed";
113
+ type: "input_audio_buffer.committed";
70
114
  }
71
115
  export interface TranscriptionTextEvent extends BaseEvent {
72
- type: "conversation.item.input_audio_transcription.text";
73
- text?: string;
74
- stash?: string;
75
- transcript?: string;
116
+ type: "conversation.item.input_audio_transcription.text";
117
+ text?: string;
118
+ stash?: string;
119
+ transcript?: string;
76
120
  }
77
121
  export interface TranscriptionCompletedEvent extends BaseEvent {
78
- type: "conversation.item.input_audio_transcription.completed";
79
- text?: string;
80
- transcript?: string;
122
+ type: "conversation.item.input_audio_transcription.completed";
123
+ text?: string;
124
+ transcript?: string;
81
125
  }
82
126
  export interface SessionFinishedEvent extends BaseEvent {
83
- type: "session.finished";
127
+ type: "session.finished";
84
128
  }
85
129
  export interface ErrorEvent extends BaseEvent {
86
- type: "error";
87
- error: {
88
- message: string;
89
- code?: string;
90
- };
130
+ type: "error";
131
+ error: {
132
+ message: string;
133
+ code?: string;
134
+ };
91
135
  }
92
- export type ServerEvent = SessionCreatedEvent | SessionUpdatedEvent | SpeechStartedEvent | SpeechStoppedEvent | InputAudioBufferCommittedEvent | TranscriptionTextEvent | TranscriptionCompletedEvent | SessionFinishedEvent | ErrorEvent;
136
+ export type ServerEvent =
137
+ | SessionCreatedEvent
138
+ | SessionUpdatedEvent
139
+ | SpeechStartedEvent
140
+ | SpeechStoppedEvent
141
+ | InputAudioBufferCommittedEvent
142
+ | TranscriptionTextEvent
143
+ | TranscriptionCompletedEvent
144
+ | SessionFinishedEvent
145
+ | ErrorEvent;
93
146
  export interface TurnDetectionConfig {
94
- type: "server_vad";
95
- /** VAD检测阈值,推荐设为 0.0,默认值 0.2,范围 [-1, 1] */
96
- threshold?: number;
97
- /** VAD断句检测阈值(ms),推荐设为 400,默认值 800,范围 [200, 6000] */
98
- silence_duration_ms?: number;
147
+ type: "server_vad";
148
+ /** VAD检测阈值,推荐设为 0.0,默认值 0.2,范围 [-1, 1] */
149
+ threshold?: number;
150
+ /** VAD断句检测阈值(ms),推荐设为 400,默认值 800,范围 [200, 6000] */
151
+ silence_duration_ms?: number;
99
152
  }
100
153
  export interface InputAudioTranscriptionConfig {
101
- language?: ASRLanguage;
154
+ language?: ASRLanguage;
102
155
  }
103
156
  export interface SessionConfig {
104
- input_audio_format?: "pcm" | "opus";
105
- sample_rate?: 16000 | 8000;
106
- input_audio_transcription?: InputAudioTranscriptionConfig;
107
- turn_detection?: TurnDetectionConfig | null;
157
+ input_audio_format?: "pcm" | "opus";
158
+ sample_rate?: 16000 | 8000;
159
+ input_audio_transcription?: InputAudioTranscriptionConfig;
160
+ turn_detection?: TurnDetectionConfig | null;
108
161
  }
109
162
  export interface ASRClientConfig {
110
- /**
111
- * Audio format
112
- * @default "pcm"
113
- */
114
- audioFormat?: "pcm" | "opus";
115
- /**
116
- * Sample rate in Hz
117
- * @default 16000
118
- * @description 支持 16000 和 8000。设置为 8000 时,服务端会先升采样到16000Hz再进行识别,可能引入微小延迟。
119
- */
120
- sampleRate?: 16000 | 8000;
121
- /**
122
- * Audio source language
123
- * @default "zh"
124
- * @description 支持多种语言,包括 zh(中文)、yue(粤语)、en(英文)、ja(日语)等
125
- */
126
- language?: ASRLanguage;
127
- /**
128
- * Enable VAD (Voice Activity Detection) mode
129
- * @default true
130
- * @description true = VAD模式(服务端自动检测语音开始/结束),false = Manual模式(客户端手动控制)
131
- */
132
- enableVAD?: boolean;
133
- /**
134
- * VAD detection threshold
135
- * @default 0.2
136
- * @description 推荐设为 0.0。取值范围 [-1, 1]。较低的阈值会提高 VAD 的灵敏度。
137
- */
138
- vadThreshold?: number;
139
- /**
140
- * VAD silence duration threshold in milliseconds
141
- * @default 800
142
- * @description 推荐设为 400。取值范围 [200, 6000]。静音持续时长超过该阈值将被认为是语句结束。
143
- */
144
- vadSilenceDurationMs?: number;
145
- /**
146
- * Get access token for WebSocket authentication
147
- */
148
- getAccessToken?: () => string | null;
149
- /**
150
- * Called when connection is ready (session.created received and session.update sent)
151
- */
152
- onReady?: () => void;
153
- /**
154
- * Called when speech is detected (VAD mode only)
155
- */
156
- onSpeechStart?: () => void;
157
- /**
158
- * Called when speech stops (VAD mode only)
159
- */
160
- onSpeechEnd?: () => void;
161
- /**
162
- * Called on transcript result
163
- * @param text - Transcribed text
164
- * @param isFinal - Whether this is the final result
165
- */
166
- onTranscript?: (text: string, isFinal: boolean) => void;
167
- /**
168
- * Called when audio buffer is committed (non-VAD mode only)
169
- */
170
- onAudioBufferCommitted?: () => void;
171
- /**
172
- * Called when session is finished
173
- */
174
- onSessionFinished?: () => void;
175
- /**
176
- * Called on error
177
- */
178
- onError?: (error: Error) => void;
179
- /**
180
- * Called on close
181
- */
182
- onClose?: () => void;
163
+ /**
164
+ * Audio format
165
+ * @default "pcm"
166
+ */
167
+ audioFormat?: "pcm" | "opus";
168
+ /**
169
+ * Sample rate in Hz
170
+ * @default 16000
171
+ * @description 支持 16000 和 8000。设置为 8000 时,服务端会先升采样到16000Hz再进行识别,可能引入微小延迟。
172
+ */
173
+ sampleRate?: 16000 | 8000;
174
+ /**
175
+ * Audio source language
176
+ * @default "zh"
177
+ * @description 支持多种语言,包括 zh(中文)、yue(粤语)、en(英文)、ja(日语)等
178
+ */
179
+ language?: ASRLanguage;
180
+ /**
181
+ * Enable VAD (Voice Activity Detection) mode
182
+ * @default true
183
+ * @description true = VAD模式(服务端自动检测语音开始/结束),false = Manual模式(客户端手动控制)
184
+ */
185
+ enableVAD?: boolean;
186
+ /**
187
+ * VAD detection threshold
188
+ * @default 0.2
189
+ * @description 推荐设为 0.0。取值范围 [-1, 1]。较低的阈值会提高 VAD 的灵敏度。
190
+ */
191
+ vadThreshold?: number;
192
+ /**
193
+ * VAD silence duration threshold in milliseconds
194
+ * @default 800
195
+ * @description 推荐设为 400。取值范围 [200, 6000]。静音持续时长超过该阈值将被认为是语句结束。
196
+ */
197
+ vadSilenceDurationMs?: number;
198
+ /**
199
+ * Get access token for WebSocket authentication
200
+ */
201
+ getAccessToken?: () => string | null;
202
+ /**
203
+ * Called when connection is ready (session.created received and session.update sent)
204
+ */
205
+ onReady?: () => void;
206
+ /**
207
+ * Called when speech is detected (VAD mode only)
208
+ */
209
+ onSpeechStart?: () => void;
210
+ /**
211
+ * Called when speech stops (VAD mode only)
212
+ */
213
+ onSpeechEnd?: () => void;
214
+ /**
215
+ * Called on transcript result
216
+ * @param text - Transcribed text
217
+ * @param isFinal - Whether this is the final result
218
+ */
219
+ onTranscript?: (text: string, isFinal: boolean) => void;
220
+ /**
221
+ * Called when audio buffer is committed (non-VAD mode only)
222
+ */
223
+ onAudioBufferCommitted?: () => void;
224
+ /**
225
+ * Called when session is finished
226
+ */
227
+ onSessionFinished?: () => void;
228
+ /**
229
+ * Called on error
230
+ */
231
+ onError?: (error: Error) => void;
232
+ /**
233
+ * Called on close
234
+ */
235
+ onClose?: () => void;
183
236
  }
184
237
  export interface ASRClient {
185
- /** Connect to ASR service and establish session */
186
- connect(): Promise<void>;
187
- /** Start recording from microphone */
188
- startRecording(): Promise<void>;
189
- /**
190
- * Stop recording
191
- * @description In non-VAD mode, this triggers recognition by sending input_audio_buffer.commit
192
- */
193
- stopRecording(): Promise<void>;
194
- /**
195
- * Close connection gracefully
196
- * @description Sends session.finish and waits for session.finished before closing
197
- */
198
- close(): Promise<void>;
199
- /**
200
- * Check if currently recording
201
- */
202
- isRecording(): boolean;
203
- /**
204
- * Check if connected to server
205
- */
206
- isConnected(): boolean;
207
- }
208
- declare const _default$1: (authConfig: Pick<ASRClientConfig, "getAccessToken">) => (config: ASRClientConfig) => ASRClient;
238
+ /** Connect to ASR service and establish session */
239
+ connect(): Promise<void>;
240
+ /** Start recording from microphone */
241
+ startRecording(): Promise<void>;
242
+ /**
243
+ * Stop recording
244
+ * @description In non-VAD mode, this triggers recognition by sending input_audio_buffer.commit
245
+ */
246
+ stopRecording(): Promise<void>;
247
+ /**
248
+ * Close connection gracefully
249
+ * @description Sends session.finish and waits for session.finished before closing
250
+ */
251
+ close(): Promise<void>;
252
+ /**
253
+ * Check if currently recording
254
+ */
255
+ isRecording(): boolean;
256
+ /**
257
+ * Check if connected to server
258
+ */
259
+ isConnected(): boolean;
260
+ }
261
+ declare const _default$1: (
262
+ authConfig: Pick<ASRClientConfig, "getAccessToken">
263
+ ) => (config: ASRClientConfig) => ASRClient;
264
+
265
+ export interface Recorder {
266
+ /** Start recording */
267
+ start(): Promise<void>;
268
+ /**
269
+ * Stop recording and get base64-encoded WAV audio data. You can use this data to call the ASR API.
270
+ *
271
+ * @returns Base64-encoded WAV audio data
272
+ */
273
+ stop(): Promise<void>;
274
+ }
275
+
276
+ export interface RecorderOptions {
277
+ /** Called when recording starts */
278
+ onStart?: () => void;
279
+ /**
280
+ * Called when recording stops, with base64-encoded WAV audio data. You can use this data to call the ASR API.
281
+ *
282
+ * @param base64 - Base64-encoded WAV audio data
283
+ * @returns void
284
+ */
285
+ onStop?: (base64: string) => void;
286
+ onError?: (error: Error) => void;
287
+ }
209
288
 
210
289
  export interface ASRHttpClientConfig {
211
- /** Get access token */
212
- getAccessToken?(): string | null;
213
- /** Language, default 'zh' */
214
- language?: string;
215
- /** Sample rate, default 16000 */
216
- sampleRate?: number;
217
- /** Called when recording starts */
218
- onRecordingStart?: () => void;
219
- /** Called when recording stops */
220
- onRecordingStop?: () => void;
221
- /** Called with recognition result */
222
- onResult?: (text: string) => void;
223
- /** Called on error */
224
- onError?: (error: Error) => void;
290
+ /** Get access token */
291
+ getAccessToken?(): string | null;
292
+ /** Language, default 'zh' */
293
+ language?: string;
294
+ /** Create custom recorder */
295
+ createRecorder?(options?: RecorderOptions): Promise<Recorder>;
296
+ /** Sample rate, default 16000 */
297
+ sampleRate?: number;
298
+ /** Called when recording starts */
299
+ onRecordingStart?: () => void;
300
+ /** Called when recording stops */
301
+ onRecordingStop?: () => void;
302
+ /** Called with recognition result */
303
+ onResult?: (text: string) => void;
304
+ /** Called on error */
305
+ onError?: (error: Error) => void;
225
306
  }
226
307
  export interface ASRHttpClient {
227
- /** Start recording (press-to-talk) */
228
- startRecording(): Promise<void>;
229
- /** Stop recording and get result */
230
- stopRecording(): Promise<string>;
231
- /** Record for specific duration then recognize */
232
- recordAndRecognize(durationMs: number): Promise<string>;
233
- /** Recognize audio file (File or Blob) */
234
- recognizeFile(file: File | Blob): Promise<string>;
235
- /** Recognize audio from URL */
236
- recognizeUrl(audioUrl: string): Promise<string>;
237
- }
308
+ /** Start recording (press-to-talk) */
309
+ startRecording(): Promise<void>;
310
+ /** Stop recording and get result */
311
+ stopRecording(): Promise<string>;
312
+ /** Record for specific duration then recognize */
313
+ recordAndRecognize(durationMs: number): Promise<string>;
314
+ /** Recognize audio file (File or Blob) */
315
+ recognizeFile(file: File | Blob): Promise<string>;
316
+ /** Recognize audio from URL */
317
+ recognizeUrl(audioUrl: string): Promise<string>;
318
+ }
package/types/index.d.ts CHANGED
@@ -323,7 +323,7 @@ export type { AuthClientAPI } from "./auth";
323
323
  export type { EntityClientAPI } from "./entity";
324
324
  export type { BpmClientAPI } from "./bpm";
325
325
  export type { WorkflowClientAPI } from "./workflow";
326
- export type { ASRClient, ASRClientConfig, ASRHttpClient, ASRHttpClientConfig } from "./asr";
326
+ export type { ASRClient, ASRClientConfig, ASRHttpClient, ASRHttpClientConfig, Recorder, RecorderOptions } from "./asr";
327
327
  export type { CopilotClientAPI } from "./copilot";
328
328
  export type { FunctionClientAPI } from "./function";
329
329
  export type { TTSClientAPI } from "./tts";
package/types/tts.d.ts CHANGED
@@ -78,6 +78,12 @@ export interface TTSClientAPI {
78
78
  */
79
79
  play(): void;
80
80
 
81
+ /**
82
+ * Stop audio playback
83
+ *
84
+ */
85
+ stop(): void;
86
+
81
87
  /**
82
88
  * Close connection
83
89
  *