@amaster.ai/tts-client 1.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +144 -0
- package/dist/index.cjs +168 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +34 -0
- package/dist/index.d.ts +34 -0
- package/dist/index.js +141 -0
- package/dist/index.js.map +1 -0
- package/package.json +45 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Amaster Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# @amaster.ai/tts-client
|
|
2
|
+
|
|
3
|
+
通义千问实时语音合成(TTS)WebSocket 客户端,支持自动音频播放。
|
|
4
|
+
|
|
5
|
+
## 特性
|
|
6
|
+
|
|
7
|
+
- ✅ 完整的 WebSocket 协议封装
|
|
8
|
+
- ✅ 自动音频解码和播放(Web Audio API)
|
|
9
|
+
- ✅ 事件驱动的 API 设计
|
|
10
|
+
- ✅ TypeScript 类型支持
|
|
11
|
+
- ✅ 基于实际调试验证的实现
|
|
12
|
+
|
|
13
|
+
## 安装
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install @amaster.ai/tts-client
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## 快速开始
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import { createTTSClient } from '@amaster.ai/tts-client';
|
|
23
|
+
|
|
24
|
+
// 创建客户端
|
|
25
|
+
const tts = createTTSClient({
|
|
26
|
+
gatewayUrl: 'ws://www.appok.ai/api/proxy/builtin/platform/qwen-tts/api-ws/v1/realtime',
|
|
27
|
+
voice: 'Cherry',
|
|
28
|
+
autoPlay: true, // 自动播放音频
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
// 监听事件
|
|
32
|
+
tts.on('session-created', (session) => {
|
|
33
|
+
console.log('会话创建:', session.id, session.model);
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
tts.on('audio-chunk', ({ chunk, count }) => {
|
|
37
|
+
console.log('接收音频片段:', count);
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
tts.on('completed', () => {
|
|
41
|
+
console.log('合成完成');
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
// 连接并合成
|
|
45
|
+
await tts.connect();
|
|
46
|
+
await tts.speak('你好,欢迎使用通义千问实时语音合成服务。');
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## API 文档
|
|
50
|
+
|
|
51
|
+
### createTTSClient(config)
|
|
52
|
+
|
|
53
|
+
创建 TTS 客户端实例。
|
|
54
|
+
|
|
55
|
+
**参数**:
|
|
56
|
+
- `gatewayUrl`: Gateway WebSocket URL(会自动追加 model 参数)
|
|
57
|
+
- `voice`: 音色名称,默认 `'Cherry'`
|
|
58
|
+
- `autoPlay`: 是否自动播放,默认 `true`
|
|
59
|
+
- `audioFormat`: 音频格式,默认 `'pcm'`
|
|
60
|
+
- `sampleRate`: 采样率,默认 `24000`
|
|
61
|
+
|
|
62
|
+
**返回**:`TTSClient` 实例
|
|
63
|
+
|
|
64
|
+
### TTSClient
|
|
65
|
+
|
|
66
|
+
#### connect()
|
|
67
|
+
|
|
68
|
+
连接到 TTS 服务。
|
|
69
|
+
|
|
70
|
+
**返回**:`Promise<void>`
|
|
71
|
+
|
|
72
|
+
#### speak(text)
|
|
73
|
+
|
|
74
|
+
合成语音。
|
|
75
|
+
|
|
76
|
+
**参数**:
|
|
77
|
+
- `text`: 要合成的文本
|
|
78
|
+
|
|
79
|
+
**返回**:`Promise<void>`
|
|
80
|
+
|
|
81
|
+
#### on(event, callback)
|
|
82
|
+
|
|
83
|
+
监听事件。
|
|
84
|
+
|
|
85
|
+
**事件类型**:
|
|
86
|
+
- `connected`: WebSocket 连接建立
|
|
87
|
+
- `session-created`: 会话创建成功
|
|
88
|
+
- `session-updated`: 会话配置完成
|
|
89
|
+
- `audio-chunk`: 收到音频片段
|
|
90
|
+
- `audio-done`: 音频生成完成
|
|
91
|
+
- `completed`: 响应完成
|
|
92
|
+
- `error`: 发生错误
|
|
93
|
+
- `closed`: 连接关闭
|
|
94
|
+
|
|
95
|
+
#### disconnect()
|
|
96
|
+
|
|
97
|
+
断开连接并清理资源。
|
|
98
|
+
|
|
99
|
+
#### getSession()
|
|
100
|
+
|
|
101
|
+
获取当前会话信息。
|
|
102
|
+
|
|
103
|
+
**返回**:`TTSSession | null`
|
|
104
|
+
|
|
105
|
+
## 实现细节
|
|
106
|
+
|
|
107
|
+
### 音频格式处理
|
|
108
|
+
|
|
109
|
+
- 接收:Base64 编码的 PCM 16-bit 数据
|
|
110
|
+
- 解码:自动转换为 Float32 AudioBuffer
|
|
111
|
+
- 播放:使用 Web Audio API
|
|
112
|
+
- 采样率:24kHz(高清音质)
|
|
113
|
+
|
|
114
|
+
### WebSocket 协议
|
|
115
|
+
|
|
116
|
+
基于实际验证的消息类型:
|
|
117
|
+
- `session.update` - 配置会话
|
|
118
|
+
- `input_text_buffer.append` - 发送文本
|
|
119
|
+
- `input_text_buffer.commit` - 提交文本
|
|
120
|
+
- `session.finish` - 结束会话
|
|
121
|
+
|
|
122
|
+
### 错误处理
|
|
123
|
+
|
|
124
|
+
自动处理:
|
|
125
|
+
- WebSocket 连接错误
|
|
126
|
+
- 会话配置错误
|
|
127
|
+
- 音频解码错误
|
|
128
|
+
|
|
129
|
+
## 开发
|
|
130
|
+
|
|
131
|
+
```bash
|
|
132
|
+
# 构建
|
|
133
|
+
npm run build
|
|
134
|
+
|
|
135
|
+
# 开发模式
|
|
136
|
+
npm run dev
|
|
137
|
+
|
|
138
|
+
# 类型检查
|
|
139
|
+
npm run type-check
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## License
|
|
143
|
+
|
|
144
|
+
MIT
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
createTTSClient: () => createTTSClient
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(index_exports);
|
|
26
|
+
|
|
27
|
+
// src/tts-client.ts
|
|
28
|
+
function createTTSClient(config) {
|
|
29
|
+
const {
|
|
30
|
+
url,
|
|
31
|
+
voice = "Cherry",
|
|
32
|
+
autoPlay = true,
|
|
33
|
+
audioFormat = "pcm",
|
|
34
|
+
sampleRate = 24e3,
|
|
35
|
+
onReady,
|
|
36
|
+
onAudioStart,
|
|
37
|
+
onAudioEnd,
|
|
38
|
+
onError
|
|
39
|
+
} = config;
|
|
40
|
+
let ws = null;
|
|
41
|
+
let audioChunks = [];
|
|
42
|
+
let audioContext = null;
|
|
43
|
+
async function connect() {
|
|
44
|
+
return new Promise((resolve, reject) => {
|
|
45
|
+
ws = new WebSocket(url);
|
|
46
|
+
ws.onopen = () => {
|
|
47
|
+
};
|
|
48
|
+
ws.onmessage = (event) => {
|
|
49
|
+
const data = JSON.parse(event.data);
|
|
50
|
+
if (data.type === "session.created") {
|
|
51
|
+
ws.send(JSON.stringify({
|
|
52
|
+
type: "session.update",
|
|
53
|
+
session: {
|
|
54
|
+
mode: "server_commit",
|
|
55
|
+
voice,
|
|
56
|
+
response_format: audioFormat,
|
|
57
|
+
sample_rate: sampleRate
|
|
58
|
+
}
|
|
59
|
+
}));
|
|
60
|
+
}
|
|
61
|
+
if (data.type === "session.updated") {
|
|
62
|
+
onReady?.();
|
|
63
|
+
resolve();
|
|
64
|
+
}
|
|
65
|
+
if (data.type === "response.audio.delta") {
|
|
66
|
+
audioChunks.push(data.delta);
|
|
67
|
+
}
|
|
68
|
+
if (data.type === "response.audio.done") {
|
|
69
|
+
if (autoPlay && typeof window !== "undefined") {
|
|
70
|
+
playAudio(audioChunks);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
if (data.type === "response.done") {
|
|
74
|
+
ws.send(JSON.stringify({ type: "session.finish" }));
|
|
75
|
+
}
|
|
76
|
+
if (data.type === "error") {
|
|
77
|
+
const err = new Error(data.error?.message || "Unknown error");
|
|
78
|
+
onError?.(err);
|
|
79
|
+
reject(err);
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
ws.onerror = () => {
|
|
83
|
+
const err = new Error("WebSocket connection error");
|
|
84
|
+
onError?.(err);
|
|
85
|
+
reject(err);
|
|
86
|
+
};
|
|
87
|
+
ws.onclose = () => {
|
|
88
|
+
ws = null;
|
|
89
|
+
};
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
async function speak(text) {
|
|
93
|
+
if (!ws || ws.readyState !== WebSocket.OPEN) {
|
|
94
|
+
throw new Error("WebSocket not connected");
|
|
95
|
+
}
|
|
96
|
+
audioChunks = [];
|
|
97
|
+
ws.send(JSON.stringify({
|
|
98
|
+
type: "input_text_buffer.append",
|
|
99
|
+
text
|
|
100
|
+
}));
|
|
101
|
+
setTimeout(() => {
|
|
102
|
+
ws.send(JSON.stringify({
|
|
103
|
+
type: "input_text_buffer.commit"
|
|
104
|
+
}));
|
|
105
|
+
}, 100);
|
|
106
|
+
}
|
|
107
|
+
function playAudio(chunks) {
|
|
108
|
+
if (typeof window === "undefined") return;
|
|
109
|
+
try {
|
|
110
|
+
if (!audioContext) {
|
|
111
|
+
audioContext = new AudioContext();
|
|
112
|
+
}
|
|
113
|
+
onAudioStart?.();
|
|
114
|
+
let totalBytes = 0;
|
|
115
|
+
const allBytes = [];
|
|
116
|
+
for (const chunk of chunks) {
|
|
117
|
+
const binaryString = atob(chunk);
|
|
118
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
119
|
+
for (let i = 0; i < binaryString.length; i++) {
|
|
120
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
121
|
+
}
|
|
122
|
+
allBytes.push(bytes);
|
|
123
|
+
totalBytes += bytes.length;
|
|
124
|
+
}
|
|
125
|
+
const combined = new Uint8Array(totalBytes);
|
|
126
|
+
let offset = 0;
|
|
127
|
+
for (const bytes of allBytes) {
|
|
128
|
+
combined.set(bytes, offset);
|
|
129
|
+
offset += bytes.length;
|
|
130
|
+
}
|
|
131
|
+
const numSamples = combined.length / 2;
|
|
132
|
+
const audioBuffer = audioContext.createBuffer(1, numSamples, sampleRate);
|
|
133
|
+
const channelData = audioBuffer.getChannelData(0);
|
|
134
|
+
const dataView = new DataView(combined.buffer);
|
|
135
|
+
for (let i = 0; i < numSamples; i++) {
|
|
136
|
+
const int16 = dataView.getInt16(i * 2, true);
|
|
137
|
+
channelData[i] = int16 / 32768;
|
|
138
|
+
}
|
|
139
|
+
const source = audioContext.createBufferSource();
|
|
140
|
+
source.buffer = audioBuffer;
|
|
141
|
+
source.connect(audioContext.destination);
|
|
142
|
+
source.onended = () => onAudioEnd?.();
|
|
143
|
+
source.start(0);
|
|
144
|
+
} catch (err) {
|
|
145
|
+
onError?.(err);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
function close() {
|
|
149
|
+
if (ws) {
|
|
150
|
+
ws.close();
|
|
151
|
+
ws = null;
|
|
152
|
+
}
|
|
153
|
+
if (audioContext) {
|
|
154
|
+
audioContext.close();
|
|
155
|
+
audioContext = null;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
return {
|
|
159
|
+
connect,
|
|
160
|
+
speak,
|
|
161
|
+
close
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
165
|
+
0 && (module.exports = {
|
|
166
|
+
createTTSClient
|
|
167
|
+
});
|
|
168
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/tts-client.ts"],"sourcesContent":["export type { TTSClient, TTSClientConfig } from './tts-client';\nexport { createTTSClient } from './tts-client';\n","/**\n * TTS Realtime WebSocket Client\n */\n\nexport interface TTSClientConfig {\n /** WebSocket endpoint URL */\n url: string;\n /** Voice name, default 'Cherry' */\n voice?: string;\n /** Auto play audio, default true */\n autoPlay?: boolean;\n /** Audio format, default 'pcm' */\n audioFormat?: 'pcm' | 'mp3' | 'wav' | 'opus';\n /** Sample rate, default 24000 */\n sampleRate?: number;\n /** Called when connection is ready */\n onReady?: () => void;\n /** Called when audio playback starts */\n onAudioStart?: () => void;\n /** Called when audio playback ends */\n onAudioEnd?: () => void;\n /** Called on error */\n onError?: (error: Error) => void;\n}\n\nexport interface TTSClient {\n /** Connect to TTS service */\n connect(): Promise<void>;\n /** Synthesize speech from text */\n speak(text: string): Promise<void>;\n /** Close connection */\n close(): void;\n}\n\nexport function createTTSClient(config: TTSClientConfig): TTSClient {\n const {\n url,\n voice = 'Cherry',\n autoPlay = true,\n audioFormat = 'pcm',\n sampleRate = 24000,\n onReady,\n onAudioStart,\n onAudioEnd,\n onError,\n } = config;\n\n let ws: WebSocket | null = null;\n let audioChunks: string[] = [];\n let audioContext: AudioContext | null = null;\n\n async function connect(): Promise<void> {\n return new Promise((resolve, reject) => {\n ws = new WebSocket(url);\n\n ws.onopen = () => {};\n\n ws.onmessage = (event) => {\n const data = JSON.parse(event.data);\n\n if (data.type === 'session.created') {\n ws!.send(JSON.stringify({\n type: 'session.update',\n session: {\n mode: 'server_commit',\n voice,\n response_format: audioFormat,\n sample_rate: sampleRate,\n },\n }));\n }\n\n if (data.type === 'session.updated') {\n onReady?.();\n resolve();\n }\n\n if (data.type === 'response.audio.delta') {\n audioChunks.push(data.delta);\n }\n\n if (data.type === 'response.audio.done') {\n if (autoPlay && typeof window !== 'undefined') {\n playAudio(audioChunks);\n }\n }\n\n if (data.type === 'response.done') {\n ws!.send(JSON.stringify({ type: 'session.finish' }));\n }\n\n if (data.type === 'error') {\n const err = new Error(data.error?.message || 'Unknown error');\n onError?.(err);\n reject(err);\n }\n };\n\n ws.onerror = () => {\n const err = new Error('WebSocket connection error');\n onError?.(err);\n reject(err);\n };\n\n ws.onclose = () => {\n ws = null;\n };\n });\n }\n\n async function speak(text: string): Promise<void> {\n if (!ws || ws.readyState !== WebSocket.OPEN) {\n throw new Error('WebSocket not connected');\n }\n\n audioChunks = [];\n\n ws.send(JSON.stringify({\n type: 'input_text_buffer.append',\n text,\n }));\n\n setTimeout(() => {\n ws!.send(JSON.stringify({\n type: 'input_text_buffer.commit',\n }));\n }, 100);\n }\n\n function playAudio(chunks: string[]) {\n if (typeof window === 'undefined') return;\n\n try {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n\n onAudioStart?.();\n\n let totalBytes = 0;\n const allBytes: Uint8Array[] = [];\n\n for (const chunk of chunks) {\n const binaryString = atob(chunk);\n const bytes = new Uint8Array(binaryString.length);\n for (let i = 0; i < binaryString.length; i++) {\n bytes[i] = binaryString.charCodeAt(i);\n }\n allBytes.push(bytes);\n totalBytes += bytes.length;\n }\n\n const combined = new Uint8Array(totalBytes);\n let offset = 0;\n for (const bytes of allBytes) {\n combined.set(bytes, offset);\n offset += bytes.length;\n }\n\n const numSamples = combined.length / 2;\n const audioBuffer = audioContext.createBuffer(1, numSamples, sampleRate);\n const channelData = audioBuffer.getChannelData(0);\n\n const dataView = new DataView(combined.buffer);\n for (let i = 0; i < numSamples; i++) {\n const int16 = dataView.getInt16(i * 2, true);\n channelData[i] = int16 / 32768.0;\n }\n\n const source = audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(audioContext.destination);\n source.onended = () => onAudioEnd?.();\n source.start(0);\n } catch (err) {\n onError?.(err as Error);\n }\n }\n\n function close() {\n if (ws) {\n ws.close();\n ws = null;\n }\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n }\n\n return {\n connect,\n speak,\n close,\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACkCO,SAAS,gBAAgB,QAAoC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,cAAc;AAAA,IACd,aAAa;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,MAAI,KAAuB;AAC3B,MAAI,cAAwB,CAAC;AAC7B,MAAI,eAAoC;AAExC,iBAAe,UAAyB;AACtC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,WAAK,IAAI,UAAU,GAAG;AAEtB,SAAG,SAAS,MAAM;AAAA,MAAC;AAEnB,SAAG,YAAY,CAAC,UAAU;AACxB,cAAM,OAAO,KAAK,MAAM,MAAM,IAAI;AAElC,YAAI,KAAK,SAAS,mBAAmB;AACnC,aAAI,KAAK,KAAK,UAAU;AAAA,YACtB,MAAM;AAAA,YACN,SAAS;AAAA,cACP,MAAM;AAAA,cACN;AAAA,cACA,iBAAiB;AAAA,cACjB,aAAa;AAAA,YACf;AAAA,UACF,CAAC,CAAC;AAAA,QACJ;AAEA,YAAI,KAAK,SAAS,mBAAmB;AACnC,oBAAU;AACV,kBAAQ;AAAA,QACV;AAEA,YAAI,KAAK,SAAS,wBAAwB;AACxC,sBAAY,KAAK,KAAK,KAAK;AAAA,QAC7B;AAEA,YAAI,KAAK,SAAS,uBAAuB;AACvC,cAAI,YAAY,OAAO,WAAW,aAAa;AAC7C,sBAAU,WAAW;AAAA,UACvB;AAAA,QACF;AAEA,YAAI,KAAK,SAAS,iBAAiB;AACjC,aAAI,KAAK,KAAK,UAAU,EAAE,MAAM,iBAAiB,CAAC,CAAC;AAAA,QACrD;AAEA,YAAI,KAAK,SAAS,SAAS;AACzB,gBAAM,MAAM,IAAI,MAAM,KAAK,OAAO,WAAW,eAAe;AAC5D,oBAAU,GAAG;AACb,iBAAO,GAAG;AAAA,QACZ;AAAA,MACF;AAEA,SAAG,UAAU,MAAM;AACjB,cAAM,MAAM,IAAI,MAAM,4BAA4B;AAClD,kBAAU,GAAG;AACb,eAAO,GAAG;AAAA,MACZ;AAEA,SAAG,UAAU,MAAM;AACjB,aAAK;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAEA,iBAAe,MAAM,MAA6B;AAChD,QAAI,CAAC,MAAM,GAAG,eAAe,UAAU,MAAM;AAC3C,YAAM,IAAI,MAAM,yBAAyB;AAAA,IAC3C;AAEA,kBAAc,CAAC;AAEf,OAAG,KAAK,KAAK,UAAU;AAAA,MACrB,MAAM;AAAA,MACN;AAAA,IACF,CAAC,CAAC;AAEF,eAAW,MAAM;AACf,SAAI,KAAK,KAAK,UAAU;AAAA,QACtB,MAAM;AAAA,MACR,CAAC,CAAC;AAAA,IACJ,GAAG,GAAG;AAAA,EACR;AAEA,WAAS,UAAU,QAAkB;AACnC,QAAI,OAAO,WAAW,YAAa;AAEnC,QAAI;AACF,UAAI,CAAC,cAAc;AACjB,uBAAe,IAAI,aAAa;AAAA,MAClC;AAEA,qBAAe;AAEf,UAAI,aAAa;AACjB,YAAM,WAAyB,CAAC;AAEhC,iBAAW,SAAS,QAAQ;AAC1B,cAAM,eAAe,KAAK,KAAK;AAC/B,cAAM,QAAQ,IAAI,WAAW,aAAa,MAAM;AAChD,iBAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,gBAAM,CAAC,IAAI,aAAa,WAAW,CAAC;AAAA,QACtC;AACA,iBAAS,KAAK,KAAK;AACnB,sBAAc,MAAM;AAAA,MACtB;AAEA,YAAM,WAAW,IAAI,WAAW,UAAU;AAC1C,UAAI,SAAS;AACb,iBAAW,SAAS,UAAU;AAC5B,iBAAS,IAAI,OAAO,MAAM;AAC1B,kBAAU,MAAM;AAAA,MAClB;AAEA,YAAM,aAAa,SAAS,SAAS;AACrC,YAAM,cAAc,aAAa,aAAa,GAAG,YAAY,UAAU;AACvE,YAAM,cAAc,YAAY,eAAe,CAAC;AAEhD,YAAM,WAAW,IAAI,SAAS,SAAS,MAAM;AAC7C,eAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAM,QAAQ,SAAS,SAAS,IAAI,GAAG,IAAI;AAC3C,oBAAY,CAAC,IAAI,QAAQ;AAAA,MAC3B;AAEA,YAAM,SAAS,aAAa,mBAAmB;AAC/C,aAAO,SAAS;AAChB,aAAO,QAAQ,aAAa,WAAW;AACvC,aAAO,UAAU,MAAM,aAAa;AACpC,aAAO,MAAM,CAAC;AAAA,IAChB,SAAS,KAAK;AACZ,gBAAU,GAAY;AAAA,IACxB;AAAA,EACF;AAEA,WAAS,QAAQ;AACf,QAAI,IAAI;AACN,SAAG,MAAM;AACT,WAAK;AAAA,IACP;AACA,QAAI,cAAc;AAChB,mBAAa,MAAM;AACnB,qBAAe;AAAA,IACjB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;","names":[]}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TTS Realtime WebSocket Client
|
|
3
|
+
*/
|
|
4
|
+
interface TTSClientConfig {
|
|
5
|
+
/** WebSocket endpoint URL */
|
|
6
|
+
url: string;
|
|
7
|
+
/** Voice name, default 'Cherry' */
|
|
8
|
+
voice?: string;
|
|
9
|
+
/** Auto play audio, default true */
|
|
10
|
+
autoPlay?: boolean;
|
|
11
|
+
/** Audio format, default 'pcm' */
|
|
12
|
+
audioFormat?: 'pcm' | 'mp3' | 'wav' | 'opus';
|
|
13
|
+
/** Sample rate, default 24000 */
|
|
14
|
+
sampleRate?: number;
|
|
15
|
+
/** Called when connection is ready */
|
|
16
|
+
onReady?: () => void;
|
|
17
|
+
/** Called when audio playback starts */
|
|
18
|
+
onAudioStart?: () => void;
|
|
19
|
+
/** Called when audio playback ends */
|
|
20
|
+
onAudioEnd?: () => void;
|
|
21
|
+
/** Called on error */
|
|
22
|
+
onError?: (error: Error) => void;
|
|
23
|
+
}
|
|
24
|
+
interface TTSClient {
|
|
25
|
+
/** Connect to TTS service */
|
|
26
|
+
connect(): Promise<void>;
|
|
27
|
+
/** Synthesize speech from text */
|
|
28
|
+
speak(text: string): Promise<void>;
|
|
29
|
+
/** Close connection */
|
|
30
|
+
close(): void;
|
|
31
|
+
}
|
|
32
|
+
declare function createTTSClient(config: TTSClientConfig): TTSClient;
|
|
33
|
+
|
|
34
|
+
export { type TTSClient, type TTSClientConfig, createTTSClient };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TTS Realtime WebSocket Client
|
|
3
|
+
*/
|
|
4
|
+
interface TTSClientConfig {
|
|
5
|
+
/** WebSocket endpoint URL */
|
|
6
|
+
url: string;
|
|
7
|
+
/** Voice name, default 'Cherry' */
|
|
8
|
+
voice?: string;
|
|
9
|
+
/** Auto play audio, default true */
|
|
10
|
+
autoPlay?: boolean;
|
|
11
|
+
/** Audio format, default 'pcm' */
|
|
12
|
+
audioFormat?: 'pcm' | 'mp3' | 'wav' | 'opus';
|
|
13
|
+
/** Sample rate, default 24000 */
|
|
14
|
+
sampleRate?: number;
|
|
15
|
+
/** Called when connection is ready */
|
|
16
|
+
onReady?: () => void;
|
|
17
|
+
/** Called when audio playback starts */
|
|
18
|
+
onAudioStart?: () => void;
|
|
19
|
+
/** Called when audio playback ends */
|
|
20
|
+
onAudioEnd?: () => void;
|
|
21
|
+
/** Called on error */
|
|
22
|
+
onError?: (error: Error) => void;
|
|
23
|
+
}
|
|
24
|
+
interface TTSClient {
|
|
25
|
+
/** Connect to TTS service */
|
|
26
|
+
connect(): Promise<void>;
|
|
27
|
+
/** Synthesize speech from text */
|
|
28
|
+
speak(text: string): Promise<void>;
|
|
29
|
+
/** Close connection */
|
|
30
|
+
close(): void;
|
|
31
|
+
}
|
|
32
|
+
declare function createTTSClient(config: TTSClientConfig): TTSClient;
|
|
33
|
+
|
|
34
|
+
export { type TTSClient, type TTSClientConfig, createTTSClient };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
// src/tts-client.ts
|
|
2
|
+
function createTTSClient(config) {
|
|
3
|
+
const {
|
|
4
|
+
url,
|
|
5
|
+
voice = "Cherry",
|
|
6
|
+
autoPlay = true,
|
|
7
|
+
audioFormat = "pcm",
|
|
8
|
+
sampleRate = 24e3,
|
|
9
|
+
onReady,
|
|
10
|
+
onAudioStart,
|
|
11
|
+
onAudioEnd,
|
|
12
|
+
onError
|
|
13
|
+
} = config;
|
|
14
|
+
let ws = null;
|
|
15
|
+
let audioChunks = [];
|
|
16
|
+
let audioContext = null;
|
|
17
|
+
async function connect() {
|
|
18
|
+
return new Promise((resolve, reject) => {
|
|
19
|
+
ws = new WebSocket(url);
|
|
20
|
+
ws.onopen = () => {
|
|
21
|
+
};
|
|
22
|
+
ws.onmessage = (event) => {
|
|
23
|
+
const data = JSON.parse(event.data);
|
|
24
|
+
if (data.type === "session.created") {
|
|
25
|
+
ws.send(JSON.stringify({
|
|
26
|
+
type: "session.update",
|
|
27
|
+
session: {
|
|
28
|
+
mode: "server_commit",
|
|
29
|
+
voice,
|
|
30
|
+
response_format: audioFormat,
|
|
31
|
+
sample_rate: sampleRate
|
|
32
|
+
}
|
|
33
|
+
}));
|
|
34
|
+
}
|
|
35
|
+
if (data.type === "session.updated") {
|
|
36
|
+
onReady?.();
|
|
37
|
+
resolve();
|
|
38
|
+
}
|
|
39
|
+
if (data.type === "response.audio.delta") {
|
|
40
|
+
audioChunks.push(data.delta);
|
|
41
|
+
}
|
|
42
|
+
if (data.type === "response.audio.done") {
|
|
43
|
+
if (autoPlay && typeof window !== "undefined") {
|
|
44
|
+
playAudio(audioChunks);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (data.type === "response.done") {
|
|
48
|
+
ws.send(JSON.stringify({ type: "session.finish" }));
|
|
49
|
+
}
|
|
50
|
+
if (data.type === "error") {
|
|
51
|
+
const err = new Error(data.error?.message || "Unknown error");
|
|
52
|
+
onError?.(err);
|
|
53
|
+
reject(err);
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
ws.onerror = () => {
|
|
57
|
+
const err = new Error("WebSocket connection error");
|
|
58
|
+
onError?.(err);
|
|
59
|
+
reject(err);
|
|
60
|
+
};
|
|
61
|
+
ws.onclose = () => {
|
|
62
|
+
ws = null;
|
|
63
|
+
};
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
async function speak(text) {
|
|
67
|
+
if (!ws || ws.readyState !== WebSocket.OPEN) {
|
|
68
|
+
throw new Error("WebSocket not connected");
|
|
69
|
+
}
|
|
70
|
+
audioChunks = [];
|
|
71
|
+
ws.send(JSON.stringify({
|
|
72
|
+
type: "input_text_buffer.append",
|
|
73
|
+
text
|
|
74
|
+
}));
|
|
75
|
+
setTimeout(() => {
|
|
76
|
+
ws.send(JSON.stringify({
|
|
77
|
+
type: "input_text_buffer.commit"
|
|
78
|
+
}));
|
|
79
|
+
}, 100);
|
|
80
|
+
}
|
|
81
|
+
function playAudio(chunks) {
|
|
82
|
+
if (typeof window === "undefined") return;
|
|
83
|
+
try {
|
|
84
|
+
if (!audioContext) {
|
|
85
|
+
audioContext = new AudioContext();
|
|
86
|
+
}
|
|
87
|
+
onAudioStart?.();
|
|
88
|
+
let totalBytes = 0;
|
|
89
|
+
const allBytes = [];
|
|
90
|
+
for (const chunk of chunks) {
|
|
91
|
+
const binaryString = atob(chunk);
|
|
92
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
93
|
+
for (let i = 0; i < binaryString.length; i++) {
|
|
94
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
95
|
+
}
|
|
96
|
+
allBytes.push(bytes);
|
|
97
|
+
totalBytes += bytes.length;
|
|
98
|
+
}
|
|
99
|
+
const combined = new Uint8Array(totalBytes);
|
|
100
|
+
let offset = 0;
|
|
101
|
+
for (const bytes of allBytes) {
|
|
102
|
+
combined.set(bytes, offset);
|
|
103
|
+
offset += bytes.length;
|
|
104
|
+
}
|
|
105
|
+
const numSamples = combined.length / 2;
|
|
106
|
+
const audioBuffer = audioContext.createBuffer(1, numSamples, sampleRate);
|
|
107
|
+
const channelData = audioBuffer.getChannelData(0);
|
|
108
|
+
const dataView = new DataView(combined.buffer);
|
|
109
|
+
for (let i = 0; i < numSamples; i++) {
|
|
110
|
+
const int16 = dataView.getInt16(i * 2, true);
|
|
111
|
+
channelData[i] = int16 / 32768;
|
|
112
|
+
}
|
|
113
|
+
const source = audioContext.createBufferSource();
|
|
114
|
+
source.buffer = audioBuffer;
|
|
115
|
+
source.connect(audioContext.destination);
|
|
116
|
+
source.onended = () => onAudioEnd?.();
|
|
117
|
+
source.start(0);
|
|
118
|
+
} catch (err) {
|
|
119
|
+
onError?.(err);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
function close() {
|
|
123
|
+
if (ws) {
|
|
124
|
+
ws.close();
|
|
125
|
+
ws = null;
|
|
126
|
+
}
|
|
127
|
+
if (audioContext) {
|
|
128
|
+
audioContext.close();
|
|
129
|
+
audioContext = null;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
return {
|
|
133
|
+
connect,
|
|
134
|
+
speak,
|
|
135
|
+
close
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
export {
|
|
139
|
+
createTTSClient
|
|
140
|
+
};
|
|
141
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/tts-client.ts"],"sourcesContent":["/**\n * TTS Realtime WebSocket Client\n */\n\nexport interface TTSClientConfig {\n /** WebSocket endpoint URL */\n url: string;\n /** Voice name, default 'Cherry' */\n voice?: string;\n /** Auto play audio, default true */\n autoPlay?: boolean;\n /** Audio format, default 'pcm' */\n audioFormat?: 'pcm' | 'mp3' | 'wav' | 'opus';\n /** Sample rate, default 24000 */\n sampleRate?: number;\n /** Called when connection is ready */\n onReady?: () => void;\n /** Called when audio playback starts */\n onAudioStart?: () => void;\n /** Called when audio playback ends */\n onAudioEnd?: () => void;\n /** Called on error */\n onError?: (error: Error) => void;\n}\n\nexport interface TTSClient {\n /** Connect to TTS service */\n connect(): Promise<void>;\n /** Synthesize speech from text */\n speak(text: string): Promise<void>;\n /** Close connection */\n close(): void;\n}\n\nexport function createTTSClient(config: TTSClientConfig): TTSClient {\n const {\n url,\n voice = 'Cherry',\n autoPlay = true,\n audioFormat = 'pcm',\n sampleRate = 24000,\n onReady,\n onAudioStart,\n onAudioEnd,\n onError,\n } = config;\n\n let ws: WebSocket | null = null;\n let audioChunks: string[] = [];\n let audioContext: AudioContext | null = null;\n\n async function connect(): Promise<void> {\n return new Promise((resolve, reject) => {\n ws = new WebSocket(url);\n\n ws.onopen = () => {};\n\n ws.onmessage = (event) => {\n const data = JSON.parse(event.data);\n\n if (data.type === 'session.created') {\n ws!.send(JSON.stringify({\n type: 'session.update',\n session: {\n mode: 'server_commit',\n voice,\n response_format: audioFormat,\n sample_rate: sampleRate,\n },\n }));\n }\n\n if (data.type === 'session.updated') {\n onReady?.();\n resolve();\n }\n\n if (data.type === 'response.audio.delta') {\n audioChunks.push(data.delta);\n }\n\n if (data.type === 'response.audio.done') {\n if (autoPlay && typeof window !== 'undefined') {\n playAudio(audioChunks);\n }\n }\n\n if (data.type === 'response.done') {\n ws!.send(JSON.stringify({ type: 'session.finish' }));\n }\n\n if (data.type === 'error') {\n const err = new Error(data.error?.message || 'Unknown error');\n onError?.(err);\n reject(err);\n }\n };\n\n ws.onerror = () => {\n const err = new Error('WebSocket connection error');\n onError?.(err);\n reject(err);\n };\n\n ws.onclose = () => {\n ws = null;\n };\n });\n }\n\n async function speak(text: string): Promise<void> {\n if (!ws || ws.readyState !== WebSocket.OPEN) {\n throw new Error('WebSocket not connected');\n }\n\n audioChunks = [];\n\n ws.send(JSON.stringify({\n type: 'input_text_buffer.append',\n text,\n }));\n\n setTimeout(() => {\n ws!.send(JSON.stringify({\n type: 'input_text_buffer.commit',\n }));\n }, 100);\n }\n\n function playAudio(chunks: string[]) {\n if (typeof window === 'undefined') return;\n\n try {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n\n onAudioStart?.();\n\n let totalBytes = 0;\n const allBytes: Uint8Array[] = [];\n\n for (const chunk of chunks) {\n const binaryString = atob(chunk);\n const bytes = new Uint8Array(binaryString.length);\n for (let i = 0; i < binaryString.length; i++) {\n bytes[i] = binaryString.charCodeAt(i);\n }\n allBytes.push(bytes);\n totalBytes += bytes.length;\n }\n\n const combined = new Uint8Array(totalBytes);\n let offset = 0;\n for (const bytes of allBytes) {\n combined.set(bytes, offset);\n offset += bytes.length;\n }\n\n const numSamples = combined.length / 2;\n const audioBuffer = audioContext.createBuffer(1, numSamples, sampleRate);\n const channelData = audioBuffer.getChannelData(0);\n\n const dataView = new DataView(combined.buffer);\n for (let i = 0; i < numSamples; i++) {\n const int16 = dataView.getInt16(i * 2, true);\n channelData[i] = int16 / 32768.0;\n }\n\n const source = audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(audioContext.destination);\n source.onended = () => onAudioEnd?.();\n source.start(0);\n } catch (err) {\n onError?.(err as Error);\n }\n }\n\n function close() {\n if (ws) {\n ws.close();\n ws = null;\n }\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n }\n\n return {\n connect,\n speak,\n close,\n };\n}\n"],"mappings":";AAkCO,SAAS,gBAAgB,QAAoC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,cAAc;AAAA,IACd,aAAa;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,MAAI,KAAuB;AAC3B,MAAI,cAAwB,CAAC;AAC7B,MAAI,eAAoC;AAExC,iBAAe,UAAyB;AACtC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,WAAK,IAAI,UAAU,GAAG;AAEtB,SAAG,SAAS,MAAM;AAAA,MAAC;AAEnB,SAAG,YAAY,CAAC,UAAU;AACxB,cAAM,OAAO,KAAK,MAAM,MAAM,IAAI;AAElC,YAAI,KAAK,SAAS,mBAAmB;AACnC,aAAI,KAAK,KAAK,UAAU;AAAA,YACtB,MAAM;AAAA,YACN,SAAS;AAAA,cACP,MAAM;AAAA,cACN;AAAA,cACA,iBAAiB;AAAA,cACjB,aAAa;AAAA,YACf;AAAA,UACF,CAAC,CAAC;AAAA,QACJ;AAEA,YAAI,KAAK,SAAS,mBAAmB;AACnC,oBAAU;AACV,kBAAQ;AAAA,QACV;AAEA,YAAI,KAAK,SAAS,wBAAwB;AACxC,sBAAY,KAAK,KAAK,KAAK;AAAA,QAC7B;AAEA,YAAI,KAAK,SAAS,uBAAuB;AACvC,cAAI,YAAY,OAAO,WAAW,aAAa;AAC7C,sBAAU,WAAW;AAAA,UACvB;AAAA,QACF;AAEA,YAAI,KAAK,SAAS,iBAAiB;AACjC,aAAI,KAAK,KAAK,UAAU,EAAE,MAAM,iBAAiB,CAAC,CAAC;AAAA,QACrD;AAEA,YAAI,KAAK,SAAS,SAAS;AACzB,gBAAM,MAAM,IAAI,MAAM,KAAK,OAAO,WAAW,eAAe;AAC5D,oBAAU,GAAG;AACb,iBAAO,GAAG;AAAA,QACZ;AAAA,MACF;AAEA,SAAG,UAAU,MAAM;AACjB,cAAM,MAAM,IAAI,MAAM,4BAA4B;AAClD,kBAAU,GAAG;AACb,eAAO,GAAG;AAAA,MACZ;AAEA,SAAG,UAAU,MAAM;AACjB,aAAK;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAEA,iBAAe,MAAM,MAA6B;AAChD,QAAI,CAAC,MAAM,GAAG,eAAe,UAAU,MAAM;AAC3C,YAAM,IAAI,MAAM,yBAAyB;AAAA,IAC3C;AAEA,kBAAc,CAAC;AAEf,OAAG,KAAK,KAAK,UAAU;AAAA,MACrB,MAAM;AAAA,MACN;AAAA,IACF,CAAC,CAAC;AAEF,eAAW,MAAM;AACf,SAAI,KAAK,KAAK,UAAU;AAAA,QACtB,MAAM;AAAA,MACR,CAAC,CAAC;AAAA,IACJ,GAAG,GAAG;AAAA,EACR;AAEA,WAAS,UAAU,QAAkB;AACnC,QAAI,OAAO,WAAW,YAAa;AAEnC,QAAI;AACF,UAAI,CAAC,cAAc;AACjB,uBAAe,IAAI,aAAa;AAAA,MAClC;AAEA,qBAAe;AAEf,UAAI,aAAa;AACjB,YAAM,WAAyB,CAAC;AAEhC,iBAAW,SAAS,QAAQ;AAC1B,cAAM,eAAe,KAAK,KAAK;AAC/B,cAAM,QAAQ,IAAI,WAAW,aAAa,MAAM;AAChD,iBAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,gBAAM,CAAC,IAAI,aAAa,WAAW,CAAC;AAAA,QACtC;AACA,iBAAS,KAAK,KAAK;AACnB,sBAAc,MAAM;AAAA,MACtB;AAEA,YAAM,WAAW,IAAI,WAAW,UAAU;AAC1C,UAAI,SAAS;AACb,iBAAW,SAAS,UAAU;AAC5B,iBAAS,IAAI,OAAO,MAAM;AAC1B,kBAAU,MAAM;AAAA,MAClB;AAEA,YAAM,aAAa,SAAS,SAAS;AACrC,YAAM,cAAc,aAAa,aAAa,GAAG,YAAY,UAAU;AACvE,YAAM,cAAc,YAAY,eAAe,CAAC;AAEhD,YAAM,WAAW,IAAI,SAAS,SAAS,MAAM;AAC7C,eAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAM,QAAQ,SAAS,SAAS,IAAI,GAAG,IAAI;AAC3C,oBAAY,CAAC,IAAI,QAAQ;AAAA,MAC3B;AAEA,YAAM,SAAS,aAAa,mBAAmB;AAC/C,aAAO,SAAS;AAChB,aAAO,QAAQ,aAAa,WAAW;AACvC,aAAO,UAAU,MAAM,aAAa;AACpC,aAAO,MAAM,CAAC;AAAA,IAChB,SAAS,KAAK;AACZ,gBAAU,GAAY;AAAA,IACxB;AAAA,EACF;AAEA,WAAS,QAAQ;AACf,QAAI,IAAI;AACN,SAAG,MAAM;AACT,WAAK;AAAA,IACP;AACA,QAAI,cAAc;AAChB,mBAAa,MAAM;AACnB,qBAAe;AAAA,IACjB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@amaster.ai/tts-client",
|
|
3
|
+
"version": "1.0.0-beta.1",
|
|
4
|
+
"description": "Qwen TTS Realtime WebSocket client with audio playback",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.cjs",
|
|
7
|
+
"module": "./dist/index.js",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"files": [
|
|
17
|
+
"dist",
|
|
18
|
+
"README.md"
|
|
19
|
+
],
|
|
20
|
+
"keywords": [
|
|
21
|
+
"tts",
|
|
22
|
+
"text-to-speech",
|
|
23
|
+
"qwen",
|
|
24
|
+
"realtime",
|
|
25
|
+
"websocket",
|
|
26
|
+
"audio",
|
|
27
|
+
"speech-synthesis"
|
|
28
|
+
],
|
|
29
|
+
"author": "Amaster Team",
|
|
30
|
+
"license": "MIT",
|
|
31
|
+
"publishConfig": {
|
|
32
|
+
"access": "public",
|
|
33
|
+
"registry": "https://registry.npmjs.org/"
|
|
34
|
+
},
|
|
35
|
+
"devDependencies": {
|
|
36
|
+
"tsup": "^8.3.5",
|
|
37
|
+
"typescript": "~5.7.2"
|
|
38
|
+
},
|
|
39
|
+
"scripts": {
|
|
40
|
+
"build": "tsup",
|
|
41
|
+
"dev": "tsup --watch",
|
|
42
|
+
"clean": "rm -rf dist *.tsbuildinfo",
|
|
43
|
+
"type-check": "tsc --noEmit"
|
|
44
|
+
}
|
|
45
|
+
}
|