@huyooo/ai-chat-frontend-react 0.2.14 → 0.2.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.css +0 -1
- package/dist/index.js +1 -5418
- package/package.json +4 -5
- package/dist/index.css.map +0 -1
- package/dist/index.js.map +0 -1
- package/src/adapter.ts +0 -68
- package/src/components/ChatPanel.tsx +0 -553
- package/src/components/common/ConfirmDialog.css +0 -136
- package/src/components/common/ConfirmDialog.tsx +0 -91
- package/src/components/common/CopyButton.css +0 -22
- package/src/components/common/CopyButton.tsx +0 -46
- package/src/components/common/IndexingSettings.css +0 -207
- package/src/components/common/IndexingSettings.tsx +0 -398
- package/src/components/common/SettingsPanel.css +0 -337
- package/src/components/common/SettingsPanel.tsx +0 -215
- package/src/components/common/Toast.css +0 -50
- package/src/components/common/Toast.tsx +0 -38
- package/src/components/common/ToggleSwitch.css +0 -52
- package/src/components/common/ToggleSwitch.tsx +0 -20
- package/src/components/header/ChatHeader.css +0 -285
- package/src/components/header/ChatHeader.tsx +0 -376
- package/src/components/input/AtFilePicker.css +0 -147
- package/src/components/input/AtFilePicker.tsx +0 -519
- package/src/components/input/ChatInput.css +0 -283
- package/src/components/input/ChatInput.tsx +0 -575
- package/src/components/input/DropdownSelector.css +0 -231
- package/src/components/input/DropdownSelector.tsx +0 -333
- package/src/components/input/ImagePreviewModal.css +0 -124
- package/src/components/input/ImagePreviewModal.tsx +0 -118
- package/src/components/input/at-views/AtBranchView.tsx +0 -34
- package/src/components/input/at-views/AtBrowserView.tsx +0 -34
- package/src/components/input/at-views/AtChatsView.tsx +0 -34
- package/src/components/input/at-views/AtDocsView.tsx +0 -34
- package/src/components/input/at-views/AtFilesView.tsx +0 -168
- package/src/components/input/at-views/AtTerminalsView.tsx +0 -34
- package/src/components/input/at-views/AtViewStyles.css +0 -143
- package/src/components/input/at-views/index.ts +0 -9
- package/src/components/message/ContentRenderer.css +0 -9
- package/src/components/message/MessageBubble.css +0 -193
- package/src/components/message/MessageBubble.tsx +0 -240
- package/src/components/message/PartsRenderer.css +0 -12
- package/src/components/message/PartsRenderer.tsx +0 -168
- package/src/components/message/WelcomeMessage.css +0 -221
- package/src/components/message/WelcomeMessage.tsx +0 -93
- package/src/components/message/parts/CollapsibleCard.css +0 -80
- package/src/components/message/parts/CollapsibleCard.tsx +0 -80
- package/src/components/message/parts/ErrorPart.css +0 -9
- package/src/components/message/parts/ErrorPart.tsx +0 -40
- package/src/components/message/parts/ImagePart.css +0 -49
- package/src/components/message/parts/ImagePart.tsx +0 -54
- package/src/components/message/parts/SearchPart.css +0 -44
- package/src/components/message/parts/SearchPart.tsx +0 -63
- package/src/components/message/parts/TextPart.css +0 -579
- package/src/components/message/parts/TextPart.tsx +0 -213
- package/src/components/message/parts/ThinkingPart.css +0 -9
- package/src/components/message/parts/ThinkingPart.tsx +0 -48
- package/src/components/message/parts/ToolCallPart.css +0 -246
- package/src/components/message/parts/ToolCallPart.tsx +0 -289
- package/src/components/message/parts/ToolResultPart.css +0 -67
- package/src/components/message/parts/index.ts +0 -13
- package/src/components/message/parts/visual-predicate.ts +0 -43
- package/src/components/message/parts/visual-render.ts +0 -19
- package/src/components/message/parts/visual.ts +0 -12
- package/src/components/message/welcome-types.ts +0 -46
- package/src/context/AutoRunConfigContext.tsx +0 -13
- package/src/context/ChatAdapterContext.tsx +0 -8
- package/src/context/ChatInputContext.tsx +0 -40
- package/src/context/RenderersContext.tsx +0 -35
- package/src/hooks/useChat.ts +0 -1569
- package/src/hooks/useImageUpload.ts +0 -345
- package/src/hooks/useVoiceInput.ts +0 -454
- package/src/hooks/useVoiceToTextInput.ts +0 -87
- package/src/index.ts +0 -151
- package/src/styles.css +0 -330
- package/src/types/index.ts +0 -196
- package/src/utils/fileIcon.ts +0 -49
|
@@ -1,454 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* 语音输入 hook(React)
|
|
3
|
-
*
|
|
4
|
-
* 浏览器录音 + Electron bridge ASR(WebSocket)实时转写。
|
|
5
|
-
* 与 Vue 版本 useVoiceInput.ts 逻辑保持一致,但使用 React state。
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
|
9
|
-
import type { ChatAdapter, AsrResultData } from '@huyooo/ai-chat-bridge-electron/renderer';
|
|
10
|
-
|
|
11
|
-
export type VoiceInputStatus = 'idle' | 'connecting' | 'recording' | 'processing' | 'error';
|
|
12
|
-
|
|
13
|
-
export interface VoiceInputConfig {
|
|
14
|
-
sampleRate?: number;
|
|
15
|
-
sendInterval?: number;
|
|
16
|
-
enablePunc?: boolean;
|
|
17
|
-
enableItn?: boolean;
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
export interface UseVoiceInputReturn {
|
|
21
|
-
status: VoiceInputStatus;
|
|
22
|
-
isRecording: boolean;
|
|
23
|
-
currentText: string;
|
|
24
|
-
finalText: string;
|
|
25
|
-
error: string | null;
|
|
26
|
-
start: () => Promise<void>;
|
|
27
|
-
stop: () => Promise<string>;
|
|
28
|
-
cancel: () => void;
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
function float32ToInt16(float32Array: Float32Array): Int16Array {
|
|
32
|
-
const int16Array = new Int16Array(float32Array.length);
|
|
33
|
-
for (let i = 0; i < float32Array.length; i++) {
|
|
34
|
-
const s = Math.max(-1, Math.min(1, float32Array[i]));
|
|
35
|
-
int16Array[i] = s < 0 ? s * 0x8000 : s * 0x7fff;
|
|
36
|
-
}
|
|
37
|
-
return int16Array;
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
function resample(audioData: Float32Array, fromSampleRate: number, toSampleRate: number): Float32Array {
|
|
41
|
-
if (fromSampleRate === toSampleRate) return audioData;
|
|
42
|
-
|
|
43
|
-
const ratio = fromSampleRate / toSampleRate;
|
|
44
|
-
const newLength = Math.round(audioData.length / ratio);
|
|
45
|
-
const result = new Float32Array(newLength);
|
|
46
|
-
|
|
47
|
-
for (let i = 0; i < newLength; i++) {
|
|
48
|
-
const srcIndex = i * ratio;
|
|
49
|
-
const srcIndexFloor = Math.floor(srcIndex);
|
|
50
|
-
const srcIndexCeil = Math.min(srcIndexFloor + 1, audioData.length - 1);
|
|
51
|
-
const t = srcIndex - srcIndexFloor;
|
|
52
|
-
result[i] = audioData[srcIndexFloor] * (1 - t) + audioData[srcIndexCeil] * t;
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
return result;
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
// 全局预热标志:确保只预热一次(避免多个组件实例重复预热)
|
|
59
|
-
let asrWarmupDone = false;
|
|
60
|
-
|
|
61
|
-
async function setupAudioWorkletCapture(opts: {
|
|
62
|
-
audioContext: AudioContext;
|
|
63
|
-
source: MediaStreamAudioSourceNode;
|
|
64
|
-
chunkSize: number;
|
|
65
|
-
onChunk: (chunk: Float32Array) => void;
|
|
66
|
-
}): Promise<{
|
|
67
|
-
cleanup: () => void;
|
|
68
|
-
}> {
|
|
69
|
-
const { audioContext, source, chunkSize, onChunk } = opts;
|
|
70
|
-
|
|
71
|
-
const workletCode = `
|
|
72
|
-
class PcmCaptureProcessor extends AudioWorkletProcessor {
|
|
73
|
-
constructor(options) {
|
|
74
|
-
super();
|
|
75
|
-
const size = (options && options.processorOptions && options.processorOptions.chunkSize) || 4096;
|
|
76
|
-
this._chunkSize = size;
|
|
77
|
-
this._buf = new Float32Array(size);
|
|
78
|
-
this._offset = 0;
|
|
79
|
-
}
|
|
80
|
-
process(inputs, outputs) {
|
|
81
|
-
const input = inputs && inputs[0] && inputs[0][0];
|
|
82
|
-
const output = outputs && outputs[0] && outputs[0][0];
|
|
83
|
-
if (!input) return true;
|
|
84
|
-
if (output) output.set(input);
|
|
85
|
-
|
|
86
|
-
let i = 0;
|
|
87
|
-
while (i < input.length) {
|
|
88
|
-
const remain = this._chunkSize - this._offset;
|
|
89
|
-
const take = Math.min(remain, input.length - i);
|
|
90
|
-
this._buf.set(input.subarray(i, i + take), this._offset);
|
|
91
|
-
this._offset += take;
|
|
92
|
-
i += take;
|
|
93
|
-
if (this._offset >= this._chunkSize) {
|
|
94
|
-
const out = this._buf;
|
|
95
|
-
this.port.postMessage(out, [out.buffer]);
|
|
96
|
-
this._buf = new Float32Array(this._chunkSize);
|
|
97
|
-
this._offset = 0;
|
|
98
|
-
}
|
|
99
|
-
}
|
|
100
|
-
return true;
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
registerProcessor('pcm-capture', PcmCaptureProcessor);
|
|
104
|
-
`;
|
|
105
|
-
|
|
106
|
-
const blob = new Blob([workletCode], { type: 'text/javascript' });
|
|
107
|
-
const url = URL.createObjectURL(blob);
|
|
108
|
-
await audioContext.audioWorklet.addModule(url);
|
|
109
|
-
URL.revokeObjectURL(url);
|
|
110
|
-
|
|
111
|
-
const workletNode = new AudioWorkletNode(audioContext, 'pcm-capture', {
|
|
112
|
-
numberOfInputs: 1,
|
|
113
|
-
numberOfOutputs: 1,
|
|
114
|
-
channelCount: 1,
|
|
115
|
-
processorOptions: { chunkSize },
|
|
116
|
-
});
|
|
117
|
-
|
|
118
|
-
const silentGain = audioContext.createGain();
|
|
119
|
-
silentGain.gain.value = 0;
|
|
120
|
-
|
|
121
|
-
const onMessage = (event: MessageEvent) => {
|
|
122
|
-
const data = event.data;
|
|
123
|
-
if (data instanceof Float32Array) {
|
|
124
|
-
onChunk(data);
|
|
125
|
-
return;
|
|
126
|
-
}
|
|
127
|
-
if (data instanceof ArrayBuffer) {
|
|
128
|
-
onChunk(new Float32Array(data));
|
|
129
|
-
}
|
|
130
|
-
};
|
|
131
|
-
workletNode.port.addEventListener('message', onMessage);
|
|
132
|
-
workletNode.port.start();
|
|
133
|
-
|
|
134
|
-
source.connect(workletNode);
|
|
135
|
-
workletNode.connect(silentGain);
|
|
136
|
-
silentGain.connect(audioContext.destination);
|
|
137
|
-
|
|
138
|
-
return {
|
|
139
|
-
cleanup: () => {
|
|
140
|
-
try {
|
|
141
|
-
workletNode.port.removeEventListener('message', onMessage);
|
|
142
|
-
} catch {
|
|
143
|
-
// ignore
|
|
144
|
-
}
|
|
145
|
-
try {
|
|
146
|
-
workletNode.disconnect();
|
|
147
|
-
} catch {
|
|
148
|
-
// ignore
|
|
149
|
-
}
|
|
150
|
-
try {
|
|
151
|
-
silentGain.disconnect();
|
|
152
|
-
} catch {
|
|
153
|
-
// ignore
|
|
154
|
-
}
|
|
155
|
-
},
|
|
156
|
-
};
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
export function useVoiceInput(adapter: ChatAdapter | undefined, config: VoiceInputConfig = {}): UseVoiceInputReturn {
|
|
160
|
-
const { sampleRate = 16000, sendInterval = 200, enablePunc = true, enableItn = true } = config;
|
|
161
|
-
|
|
162
|
-
// 自动预热 ASR 连接(仅首次调用,延迟执行,避免阻塞初始化)
|
|
163
|
-
useEffect(() => {
|
|
164
|
-
if (adapter && !asrWarmupDone && typeof adapter.asrWarmup === 'function') {
|
|
165
|
-
asrWarmupDone = true;
|
|
166
|
-
// 延迟 800ms 预热,避免与首屏渲染竞争资源
|
|
167
|
-
const timer = setTimeout(() => {
|
|
168
|
-
adapter.asrWarmup?.().catch(() => {
|
|
169
|
-
// 静默失败,不影响功能
|
|
170
|
-
});
|
|
171
|
-
}, 800);
|
|
172
|
-
return () => clearTimeout(timer);
|
|
173
|
-
}
|
|
174
|
-
}, [adapter]);
|
|
175
|
-
|
|
176
|
-
const [status, setStatus] = useState<VoiceInputStatus>('idle');
|
|
177
|
-
const [currentText, setCurrentText] = useState('');
|
|
178
|
-
const [finalText, setFinalText] = useState('');
|
|
179
|
-
const [error, setError] = useState<string | null>(null);
|
|
180
|
-
|
|
181
|
-
const statusRef = useRef<VoiceInputStatus>('idle');
|
|
182
|
-
const currentTextRef = useRef('');
|
|
183
|
-
const finalTextRef = useRef('');
|
|
184
|
-
|
|
185
|
-
const mediaStreamRef = useRef<MediaStream | null>(null);
|
|
186
|
-
const audioContextRef = useRef<AudioContext | null>(null);
|
|
187
|
-
const workletCleanupRef = useRef<(() => void) | null>(null);
|
|
188
|
-
const sourceRef = useRef<MediaStreamAudioSourceNode | null>(null);
|
|
189
|
-
const audioBufferRef = useRef<Float32Array[]>([]);
|
|
190
|
-
const sendTimerRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
|
191
|
-
const cleanupFnsRef = useRef<Array<() => void>>([]);
|
|
192
|
-
|
|
193
|
-
// UI 维度的“正在录音”:包含连接中的瞬间(避免用户误以为没点到)
|
|
194
|
-
const isRecording = useMemo(() => status === 'connecting' || status === 'recording', [status]);
|
|
195
|
-
|
|
196
|
-
// 同步到 refs,避免音频回调/计时器读取到旧闭包
|
|
197
|
-
useEffect(() => {
|
|
198
|
-
statusRef.current = status;
|
|
199
|
-
}, [status]);
|
|
200
|
-
useEffect(() => {
|
|
201
|
-
currentTextRef.current = currentText;
|
|
202
|
-
}, [currentText]);
|
|
203
|
-
useEffect(() => {
|
|
204
|
-
finalTextRef.current = finalText;
|
|
205
|
-
}, [finalText]);
|
|
206
|
-
|
|
207
|
-
const setStatusSafe = useCallback((next: VoiceInputStatus) => {
|
|
208
|
-
statusRef.current = next;
|
|
209
|
-
setStatus(next);
|
|
210
|
-
}, []);
|
|
211
|
-
|
|
212
|
-
const cleanup = useCallback(() => {
|
|
213
|
-
if (sendTimerRef.current) {
|
|
214
|
-
clearInterval(sendTimerRef.current);
|
|
215
|
-
sendTimerRef.current = null;
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
if (workletCleanupRef.current) {
|
|
219
|
-
workletCleanupRef.current();
|
|
220
|
-
workletCleanupRef.current = null;
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
if (sourceRef.current) {
|
|
224
|
-
try {
|
|
225
|
-
sourceRef.current.disconnect();
|
|
226
|
-
} catch {
|
|
227
|
-
// ignore
|
|
228
|
-
}
|
|
229
|
-
sourceRef.current = null;
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
if (audioContextRef.current) {
|
|
233
|
-
audioContextRef.current.close().catch(() => {});
|
|
234
|
-
audioContextRef.current = null;
|
|
235
|
-
}
|
|
236
|
-
|
|
237
|
-
if (mediaStreamRef.current) {
|
|
238
|
-
mediaStreamRef.current.getTracks().forEach((t) => t.stop());
|
|
239
|
-
mediaStreamRef.current = null;
|
|
240
|
-
}
|
|
241
|
-
|
|
242
|
-
cleanupFnsRef.current.forEach((fn) => fn());
|
|
243
|
-
cleanupFnsRef.current = [];
|
|
244
|
-
|
|
245
|
-
audioBufferRef.current = [];
|
|
246
|
-
}, []);
|
|
247
|
-
|
|
248
|
-
const sendAudioChunk = useCallback(async () => {
|
|
249
|
-
if (!adapter?.asrSendAudio) return;
|
|
250
|
-
const buf = audioBufferRef.current;
|
|
251
|
-
if (!buf.length) return;
|
|
252
|
-
|
|
253
|
-
const totalLength = buf.reduce((sum, arr) => sum + arr.length, 0);
|
|
254
|
-
const merged = new Float32Array(totalLength);
|
|
255
|
-
let offset = 0;
|
|
256
|
-
for (const chunk of buf) {
|
|
257
|
-
merged.set(chunk, offset);
|
|
258
|
-
offset += chunk.length;
|
|
259
|
-
}
|
|
260
|
-
audioBufferRef.current = [];
|
|
261
|
-
|
|
262
|
-
const pcmData = float32ToInt16(merged);
|
|
263
|
-
const result = await adapter.asrSendAudio(pcmData.buffer);
|
|
264
|
-
if (!result.success) {
|
|
265
|
-
// 不中断录音,只记录错误
|
|
266
|
-
// eslint-disable-next-line no-console
|
|
267
|
-
console.error('[VoiceInput] 发送音频失败:', result.error);
|
|
268
|
-
}
|
|
269
|
-
}, [adapter]);
|
|
270
|
-
|
|
271
|
-
const start = useCallback(async () => {
|
|
272
|
-
// 防抖:连接中/录音中不重复 start(避免多次点击造成状态错乱)
|
|
273
|
-
if (statusRef.current === 'connecting' || statusRef.current === 'recording') return;
|
|
274
|
-
|
|
275
|
-
if (!adapter) {
|
|
276
|
-
setError('Adapter 未初始化');
|
|
277
|
-
setStatusSafe('error');
|
|
278
|
-
return;
|
|
279
|
-
}
|
|
280
|
-
if (!adapter.asrStart) {
|
|
281
|
-
setError('语音识别功能不可用');
|
|
282
|
-
setStatusSafe('error');
|
|
283
|
-
return;
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
setError(null);
|
|
287
|
-
setCurrentText('');
|
|
288
|
-
setFinalText('');
|
|
289
|
-
setStatusSafe('connecting');
|
|
290
|
-
|
|
291
|
-
try {
|
|
292
|
-
mediaStreamRef.current = await navigator.mediaDevices.getUserMedia({
|
|
293
|
-
audio: {
|
|
294
|
-
channelCount: 1,
|
|
295
|
-
sampleRate: { ideal: sampleRate },
|
|
296
|
-
echoCancellation: true,
|
|
297
|
-
noiseSuppression: true,
|
|
298
|
-
},
|
|
299
|
-
});
|
|
300
|
-
|
|
301
|
-
audioContextRef.current = new AudioContext({ sampleRate });
|
|
302
|
-
const actualSampleRate = audioContextRef.current.sampleRate;
|
|
303
|
-
|
|
304
|
-
if (adapter.onAsrResult) {
|
|
305
|
-
const off = adapter.onAsrResult((data: { result: AsrResultData; isLast: boolean }) => {
|
|
306
|
-
const text = data.result.result?.text || '';
|
|
307
|
-
setCurrentText(text);
|
|
308
|
-
if (data.isLast) setFinalText(text);
|
|
309
|
-
});
|
|
310
|
-
cleanupFnsRef.current.push(off);
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
if (adapter.onAsrError) {
|
|
314
|
-
const off = adapter.onAsrError((err: { message: string }) => {
|
|
315
|
-
// eslint-disable-next-line no-console
|
|
316
|
-
console.error('[VoiceInput] ASR 错误:', err.message);
|
|
317
|
-
setError(err.message);
|
|
318
|
-
setStatusSafe('error');
|
|
319
|
-
});
|
|
320
|
-
cleanupFnsRef.current.push(off);
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
if (adapter.onAsrClosed) {
|
|
324
|
-
const off = adapter.onAsrClosed(() => {
|
|
325
|
-
if (statusRef.current === 'recording') {
|
|
326
|
-
setStatusSafe('idle');
|
|
327
|
-
}
|
|
328
|
-
});
|
|
329
|
-
cleanupFnsRef.current.push(off);
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
const ctx = audioContextRef.current;
|
|
333
|
-
const stream = mediaStreamRef.current;
|
|
334
|
-
if (!ctx || !stream) throw new Error('AudioContext 或 MediaStream 初始化失败');
|
|
335
|
-
|
|
336
|
-
const startResult = await adapter.asrStart({
|
|
337
|
-
format: 'pcm',
|
|
338
|
-
sampleRate,
|
|
339
|
-
enablePunc,
|
|
340
|
-
enableItn,
|
|
341
|
-
showUtterances: true,
|
|
342
|
-
});
|
|
343
|
-
|
|
344
|
-
if (!startResult.success) {
|
|
345
|
-
throw new Error(startResult.error || 'ASR 启动失败');
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
// 连接成功后再开始接收语音(降低复杂度)
|
|
349
|
-
sourceRef.current = ctx.createMediaStreamSource(stream);
|
|
350
|
-
if (typeof AudioWorkletNode === 'undefined' || !ctx.audioWorklet) {
|
|
351
|
-
throw new Error('当前环境不支持 AudioWorkletNode(无法启动语音录制)');
|
|
352
|
-
}
|
|
353
|
-
const { cleanup: off } = await setupAudioWorkletCapture({
|
|
354
|
-
audioContext: ctx,
|
|
355
|
-
source: sourceRef.current,
|
|
356
|
-
chunkSize: 4096,
|
|
357
|
-
onChunk: (chunk) => {
|
|
358
|
-
if (statusRef.current !== 'recording') return;
|
|
359
|
-
const resampled = resample(chunk, actualSampleRate, sampleRate);
|
|
360
|
-
audioBufferRef.current.push(new Float32Array(resampled));
|
|
361
|
-
},
|
|
362
|
-
});
|
|
363
|
-
workletCleanupRef.current = off;
|
|
364
|
-
|
|
365
|
-
sendTimerRef.current = setInterval(() => {
|
|
366
|
-
sendAudioChunk().catch(() => {});
|
|
367
|
-
}, sendInterval);
|
|
368
|
-
|
|
369
|
-
setStatusSafe('recording');
|
|
370
|
-
} catch (e) {
|
|
371
|
-
const msg = e instanceof Error ? e.message : String(e);
|
|
372
|
-
setError(msg);
|
|
373
|
-
setStatusSafe('error');
|
|
374
|
-
cleanup();
|
|
375
|
-
}
|
|
376
|
-
}, [adapter, cleanup, enableItn, enablePunc, sampleRate, sendAudioChunk, sendInterval, setStatusSafe]);
|
|
377
|
-
|
|
378
|
-
const stop = useCallback(async (): Promise<string> => {
|
|
379
|
-
// 连接中点击停止:按“取消”处理,保证状态立刻回到 idle
|
|
380
|
-
if (statusRef.current === 'connecting') {
|
|
381
|
-
if (adapter?.asrStop) {
|
|
382
|
-
adapter.asrStop().catch(() => {});
|
|
383
|
-
}
|
|
384
|
-
cleanup();
|
|
385
|
-
setCurrentText('');
|
|
386
|
-
setFinalText('');
|
|
387
|
-
setError(null);
|
|
388
|
-
setStatusSafe('idle');
|
|
389
|
-
return '';
|
|
390
|
-
}
|
|
391
|
-
if (statusRef.current !== 'recording') return finalTextRef.current || currentTextRef.current;
|
|
392
|
-
|
|
393
|
-
setStatusSafe('processing');
|
|
394
|
-
|
|
395
|
-
try {
|
|
396
|
-
await sendAudioChunk();
|
|
397
|
-
if (adapter?.asrFinish) {
|
|
398
|
-
await adapter.asrFinish();
|
|
399
|
-
}
|
|
400
|
-
|
|
401
|
-
// 等待最终结果(最多 3s)
|
|
402
|
-
await new Promise<void>((resolve) => {
|
|
403
|
-
const startAt = Date.now();
|
|
404
|
-
const timer = setInterval(() => {
|
|
405
|
-
if (Date.now() - startAt > 3000) {
|
|
406
|
-
clearInterval(timer);
|
|
407
|
-
resolve();
|
|
408
|
-
return;
|
|
409
|
-
}
|
|
410
|
-
if (finalTextRef.current) {
|
|
411
|
-
clearInterval(timer);
|
|
412
|
-
resolve();
|
|
413
|
-
}
|
|
414
|
-
}, 100);
|
|
415
|
-
});
|
|
416
|
-
} catch (e) {
|
|
417
|
-
const msg = e instanceof Error ? e.message : String(e);
|
|
418
|
-
setError(msg);
|
|
419
|
-
} finally {
|
|
420
|
-
cleanup();
|
|
421
|
-
setStatusSafe('idle');
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
return finalTextRef.current || currentTextRef.current;
|
|
425
|
-
}, [adapter, cleanup, sendAudioChunk, setStatusSafe]);
|
|
426
|
-
|
|
427
|
-
const cancel = useCallback(() => {
|
|
428
|
-
if (adapter?.asrStop) {
|
|
429
|
-
adapter.asrStop().catch(() => {});
|
|
430
|
-
}
|
|
431
|
-
cleanup();
|
|
432
|
-
setCurrentText('');
|
|
433
|
-
setFinalText('');
|
|
434
|
-
setError(null);
|
|
435
|
-
setStatusSafe('idle');
|
|
436
|
-
}, [adapter, cleanup, setStatusSafe]);
|
|
437
|
-
|
|
438
|
-
useEffect(() => {
|
|
439
|
-
return () => cancel();
|
|
440
|
-
}, [cancel]);
|
|
441
|
-
|
|
442
|
-
return {
|
|
443
|
-
status,
|
|
444
|
-
isRecording,
|
|
445
|
-
currentText,
|
|
446
|
-
finalText,
|
|
447
|
-
error,
|
|
448
|
-
start,
|
|
449
|
-
stop,
|
|
450
|
-
cancel,
|
|
451
|
-
};
|
|
452
|
-
}
|
|
453
|
-
|
|
454
|
-
|
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* 将语音识别(useVoiceInput)封装成“写入输入框”的高内聚控制层
|
|
3
|
-
* - 处理 prefix(追加模式)
|
|
4
|
-
* - 处理实时同步到 inputText
|
|
5
|
-
* - 处理 Enter:语音中 Enter 停止/取消语音(不发送)
|
|
6
|
-
* - 输出按钮状态/禁用逻辑(ChatInput 只负责渲染)
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
import { useCallback, useEffect, useMemo, useRef } from 'react';
|
|
10
|
-
import type { ChatAdapter } from '@huyooo/ai-chat-bridge-electron/renderer';
|
|
11
|
-
import { useVoiceInput } from './useVoiceInput';
|
|
12
|
-
|
|
13
|
-
export function useVoiceToTextInput(opts: {
|
|
14
|
-
adapter: ChatAdapter | undefined;
|
|
15
|
-
inputText: string;
|
|
16
|
-
setInputText: (v: string) => void;
|
|
17
|
-
hasImages: boolean;
|
|
18
|
-
isLoading: boolean;
|
|
19
|
-
}) {
|
|
20
|
-
const { adapter, inputText, setInputText, hasImages, isLoading } = opts;
|
|
21
|
-
const voiceInput = useVoiceInput(adapter);
|
|
22
|
-
const prefixRef = useRef('');
|
|
23
|
-
|
|
24
|
-
const isVoiceActive = useMemo(
|
|
25
|
-
() => voiceInput.status === 'connecting' || voiceInput.status === 'recording',
|
|
26
|
-
[voiceInput.status]
|
|
27
|
-
);
|
|
28
|
-
|
|
29
|
-
// 实时同步语音识别文本到输入框(追加模式)
|
|
30
|
-
useEffect(() => {
|
|
31
|
-
if (!isVoiceActive) return;
|
|
32
|
-
const prefix = prefixRef.current;
|
|
33
|
-
const t = voiceInput.currentText;
|
|
34
|
-
const next = prefix ? (t ? `${prefix} ${t}` : prefix) : t;
|
|
35
|
-
setInputText(next);
|
|
36
|
-
}, [isVoiceActive, setInputText, voiceInput.currentText]);
|
|
37
|
-
|
|
38
|
-
const toggleVoice = useCallback(async () => {
|
|
39
|
-
if (isLoading) return;
|
|
40
|
-
if (!adapter) return;
|
|
41
|
-
|
|
42
|
-
if (voiceInput.status === 'connecting') {
|
|
43
|
-
voiceInput.cancel();
|
|
44
|
-
setInputText(prefixRef.current);
|
|
45
|
-
prefixRef.current = '';
|
|
46
|
-
return;
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
if (voiceInput.status === 'recording') {
|
|
50
|
-
await voiceInput.stop();
|
|
51
|
-
prefixRef.current = '';
|
|
52
|
-
return;
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
prefixRef.current = inputText.trim();
|
|
56
|
-
await voiceInput.start();
|
|
57
|
-
}, [adapter, inputText, isLoading, setInputText, voiceInput]);
|
|
58
|
-
|
|
59
|
-
const sendDisabled = useMemo(() => {
|
|
60
|
-
if (isLoading) return false; // 允许停止生成
|
|
61
|
-
if (isVoiceActive) return true;
|
|
62
|
-
return !inputText.trim() && !hasImages;
|
|
63
|
-
}, [hasImages, inputText, isLoading, isVoiceActive]);
|
|
64
|
-
|
|
65
|
-
const handleKeyDownForVoice = useCallback(
|
|
66
|
-
(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
|
67
|
-
if (!isVoiceActive) return false;
|
|
68
|
-
if (e.key === 'Enter' && !e.shiftKey) {
|
|
69
|
-
e.preventDefault();
|
|
70
|
-
toggleVoice().catch(() => {});
|
|
71
|
-
return true;
|
|
72
|
-
}
|
|
73
|
-
return false;
|
|
74
|
-
},
|
|
75
|
-
[isVoiceActive, toggleVoice]
|
|
76
|
-
);
|
|
77
|
-
|
|
78
|
-
return {
|
|
79
|
-
voiceInput,
|
|
80
|
-
isVoiceActive,
|
|
81
|
-
toggleVoice,
|
|
82
|
-
sendDisabled,
|
|
83
|
-
handleKeyDownForVoice,
|
|
84
|
-
};
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
|
package/src/index.ts
DELETED
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @huyooo/ai-chat-frontend-react
|
|
3
|
-
*
|
|
4
|
-
* AI Chat 前端组件库 - React 版本
|
|
5
|
-
*
|
|
6
|
-
* 新架构:使用 ContentPart 数组渲染消息内容
|
|
7
|
-
* - 支持流式渲染
|
|
8
|
-
* - 支持自定义 Part 类型渲染(如 weather, stock)
|
|
9
|
-
* - 支持思考、搜索、工具调用等多种内容类型
|
|
10
|
-
*/
|
|
11
|
-
|
|
12
|
-
// CSS 变量默认值(与 vue 版本保持一致:入口自动注入)
|
|
13
|
-
import './styles.css'
|
|
14
|
-
|
|
15
|
-
// ==================== 核心类型 ====================
|
|
16
|
-
|
|
17
|
-
// 从 bridge-electron 重新导出通信相关类型
|
|
18
|
-
export type {
|
|
19
|
-
ChatAdapter,
|
|
20
|
-
ChatEvent,
|
|
21
|
-
ChatEventType,
|
|
22
|
-
ChatOptions,
|
|
23
|
-
ChatMode,
|
|
24
|
-
ThinkingMode,
|
|
25
|
-
SessionRecord,
|
|
26
|
-
MessageRecord,
|
|
27
|
-
ModelOption,
|
|
28
|
-
ProviderType,
|
|
29
|
-
} from '@huyooo/ai-chat-bridge-electron/renderer'
|
|
30
|
-
|
|
31
|
-
// 导出 adapter 辅助类型
|
|
32
|
-
export type {
|
|
33
|
-
ThinkingData,
|
|
34
|
-
ToolCallData,
|
|
35
|
-
ToolResultData,
|
|
36
|
-
ImageData,
|
|
37
|
-
SendMessageOptions,
|
|
38
|
-
CreateSessionOptions,
|
|
39
|
-
UpdateSessionOptions,
|
|
40
|
-
SaveMessageOptions,
|
|
41
|
-
} from './adapter'
|
|
42
|
-
|
|
43
|
-
// 导出消息和内容类型
|
|
44
|
-
export type {
|
|
45
|
-
// 消息类型
|
|
46
|
-
ChatMessage,
|
|
47
|
-
// ContentPart 类型
|
|
48
|
-
ContentPart,
|
|
49
|
-
ContentPartType,
|
|
50
|
-
TextPart,
|
|
51
|
-
ThinkingPart,
|
|
52
|
-
SearchPart,
|
|
53
|
-
ToolCallPart,
|
|
54
|
-
ImagePart,
|
|
55
|
-
ErrorPart,
|
|
56
|
-
// 搜索结果
|
|
57
|
-
SearchResult,
|
|
58
|
-
// 错误详情
|
|
59
|
-
ErrorDetails,
|
|
60
|
-
// 输入配置
|
|
61
|
-
ChatInputOptions,
|
|
62
|
-
} from './types'
|
|
63
|
-
|
|
64
|
-
// 导出工具函数
|
|
65
|
-
export { getMessageText } from './types'
|
|
66
|
-
|
|
67
|
-
// ==================== Hooks ====================
|
|
68
|
-
|
|
69
|
-
export { useChat } from './hooks/useChat'
|
|
70
|
-
export type { UseChatOptions, ToolCompleteEvent, SideEffect } from './hooks/useChat'
|
|
71
|
-
|
|
72
|
-
// ==================== Context ====================
|
|
73
|
-
|
|
74
|
-
export { ChatInputProvider, useChatInputContext } from './context/ChatInputContext'
|
|
75
|
-
export type { ChatInputContextValue } from './context/ChatInputContext'
|
|
76
|
-
|
|
77
|
-
// Part 渲染器上下文
|
|
78
|
-
export { PartRenderersProvider, PartRenderersContext } from './context/RenderersContext'
|
|
79
|
-
export type { PartRenderers, PartRendererProps } from './context/RenderersContext'
|
|
80
|
-
|
|
81
|
-
// ==================== 主组件 ====================
|
|
82
|
-
|
|
83
|
-
export { ChatPanel } from './components/ChatPanel'
|
|
84
|
-
export type { ChatPanelHandle } from './components/ChatPanel'
|
|
85
|
-
|
|
86
|
-
// ==================== 消息组件 ====================
|
|
87
|
-
|
|
88
|
-
export { MessageBubble } from './components/message/MessageBubble'
|
|
89
|
-
export { PartsRenderer } from './components/message/PartsRenderer'
|
|
90
|
-
|
|
91
|
-
// Part 渲染组件
|
|
92
|
-
export {
|
|
93
|
-
TextPart as TextPartComponent,
|
|
94
|
-
ThinkingPart as ThinkingPartComponent,
|
|
95
|
-
SearchPart as SearchPartComponent,
|
|
96
|
-
ToolCallPart as ToolCallPartComponent,
|
|
97
|
-
ImagePart as ImagePartComponent,
|
|
98
|
-
ErrorPart as ErrorPartComponent,
|
|
99
|
-
} from './components/message/parts'
|
|
100
|
-
|
|
101
|
-
// ==================== 其他组件 ====================
|
|
102
|
-
|
|
103
|
-
// 输入组件
|
|
104
|
-
export { ChatInput } from './components/input/ChatInput'
|
|
105
|
-
|
|
106
|
-
// Header 组件
|
|
107
|
-
export { ChatHeader } from './components/header/ChatHeader'
|
|
108
|
-
|
|
109
|
-
// 欢迎消息组件
|
|
110
|
-
export { WelcomeMessage } from './components/message/WelcomeMessage'
|
|
111
|
-
export type { WelcomeConfig, WelcomeFeature, WelcomeTask } from './components/message/welcome-types'
|
|
112
|
-
export { defaultWelcomeConfig } from './components/message/welcome-types'
|
|
113
|
-
|
|
114
|
-
// 通用组件
|
|
115
|
-
export { ConfirmDialog } from './components/common/ConfirmDialog'
|
|
116
|
-
export { Toast } from './components/common/Toast'
|
|
117
|
-
|
|
118
|
-
// ==================== 工具渲染器相关 ====================
|
|
119
|
-
|
|
120
|
-
// 从 ai-chat-shared 重新导出(用于自定义工具渲染器)
|
|
121
|
-
export type {
|
|
122
|
-
ContentBlockType,
|
|
123
|
-
ContentBlock,
|
|
124
|
-
TextBlock as TextBlockType,
|
|
125
|
-
CodeBlock as CodeBlockType,
|
|
126
|
-
WeatherData,
|
|
127
|
-
SearchResultItem,
|
|
128
|
-
} from '@huyooo/ai-chat-shared'
|
|
129
|
-
export { parseContent, highlightCode, getLanguageDisplayName, renderMarkdown } from '@huyooo/ai-chat-shared'
|
|
130
|
-
|
|
131
|
-
/**
|
|
132
|
-
* 使用说明:
|
|
133
|
-
*
|
|
134
|
-
* 1. 导入样式:
|
|
135
|
-
* import '@huyooo/ai-chat-frontend-react/style.css'
|
|
136
|
-
*
|
|
137
|
-
* 2. 基本使用:
|
|
138
|
-
* import { ChatPanel } from '@huyooo/ai-chat-frontend-react'
|
|
139
|
-
* import { createElectronAdapter } from '@huyooo/ai-chat-bridge-electron/renderer'
|
|
140
|
-
* const adapter = createElectronAdapter()
|
|
141
|
-
* <ChatPanel adapter={adapter} cwd="/path/to/dir" />
|
|
142
|
-
*
|
|
143
|
-
* 3. 自定义 Part 渲染器(新架构):
|
|
144
|
-
* import WeatherCard from './WeatherCard'
|
|
145
|
-
* const partRenderers = { weather: WeatherCard }
|
|
146
|
-
* <ChatPanel adapter={adapter} partRenderers={partRenderers} />
|
|
147
|
-
*
|
|
148
|
-
* 4. 使用 useChat hook 自定义 UI:
|
|
149
|
-
* import { useChat } from '@huyooo/ai-chat-frontend-react'
|
|
150
|
-
* const { messages, sendMessage, ... } = useChat({ adapter })
|
|
151
|
-
*/
|