@contentgrowth/llm-service 0.9.99 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ui/react/components/index.cjs +133 -498
- package/dist/ui/react/components/index.cjs.map +1 -1
- package/dist/ui/react/components/index.d.cts +2 -13
- package/dist/ui/react/components/index.d.ts +2 -13
- package/dist/ui/react/components/index.js +128 -492
- package/dist/ui/react/components/index.js.map +1 -1
- package/package.json +1 -1
|
@@ -42,8 +42,7 @@ __export(components_exports, {
|
|
|
42
42
|
transcribeAudio: () => transcribeAudio,
|
|
43
43
|
useAudioRecorder: () => useAudioRecorder,
|
|
44
44
|
useChatConfig: () => useChatConfig,
|
|
45
|
-
useProactiveResize: () => useProactiveResize
|
|
46
|
-
useSpeechRecognition: () => useSpeechRecognition
|
|
45
|
+
useProactiveResize: () => useProactiveResize
|
|
47
46
|
});
|
|
48
47
|
module.exports = __toCommonJS(components_exports);
|
|
49
48
|
|
|
@@ -190,303 +189,19 @@ function ChatHeader({
|
|
|
190
189
|
}
|
|
191
190
|
|
|
192
191
|
// src/ui/react/components/ChatInputArea.tsx
|
|
193
|
-
var
|
|
192
|
+
var import_react4 = require("react");
|
|
194
193
|
var import_outline = require("@heroicons/react/24/outline");
|
|
195
194
|
|
|
196
|
-
// src/ui/react/hooks/useSpeechRecognition.ts
|
|
197
|
-
var import_react2 = require("react");
|
|
198
|
-
var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
199
|
-
const [isListening, setIsListening] = (0, import_react2.useState)(false);
|
|
200
|
-
const [transcript, setTranscript] = (0, import_react2.useState)("");
|
|
201
|
-
const [error, setError] = (0, import_react2.useState)(null);
|
|
202
|
-
const [isSupported, setIsSupported] = (0, import_react2.useState)(false);
|
|
203
|
-
const recognitionRef = (0, import_react2.useRef)(null);
|
|
204
|
-
const isSimulatingRef = (0, import_react2.useRef)(false);
|
|
205
|
-
const simulationTimeoutRef = (0, import_react2.useRef)(null);
|
|
206
|
-
const languageRef = (0, import_react2.useRef)(language);
|
|
207
|
-
const instanceIdRef = (0, import_react2.useRef)(Math.random().toString(36).slice(2));
|
|
208
|
-
const lastStartAtRef = (0, import_react2.useRef)(null);
|
|
209
|
-
const lastStopAtRef = (0, import_react2.useRef)(null);
|
|
210
|
-
const onResultRef = (0, import_react2.useRef)(onResult);
|
|
211
|
-
const onEndRef = (0, import_react2.useRef)(onEnd);
|
|
212
|
-
(0, import_react2.useEffect)(() => {
|
|
213
|
-
onResultRef.current = onResult;
|
|
214
|
-
onEndRef.current = onEnd;
|
|
215
|
-
}, [onResult, onEnd]);
|
|
216
|
-
(0, import_react2.useEffect)(() => {
|
|
217
|
-
languageRef.current = language;
|
|
218
|
-
if (recognitionRef.current) {
|
|
219
|
-
console.log("[useSpeechRecognition] Updating language to:", language);
|
|
220
|
-
recognitionRef.current.lang = language;
|
|
221
|
-
}
|
|
222
|
-
}, [language]);
|
|
223
|
-
const isStartingRef = (0, import_react2.useRef)(false);
|
|
224
|
-
(0, import_react2.useEffect)(() => {
|
|
225
|
-
var _a;
|
|
226
|
-
if (typeof window !== "undefined") {
|
|
227
|
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
228
|
-
console.log("[useSpeechRecognition] Env - isSecureContext:", window.isSecureContext, "protocol:", (_a = window.location) == null ? void 0 : _a.protocol);
|
|
229
|
-
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
230
|
-
console.log("[useSpeechRecognition] Init check - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
231
|
-
setIsSupported(!!SpeechRecognition);
|
|
232
|
-
}
|
|
233
|
-
return () => {
|
|
234
|
-
console.log("[useSpeechRecognition] Effect cleanup - stopping recognition");
|
|
235
|
-
if (isSimulatingRef.current && simulationTimeoutRef.current) {
|
|
236
|
-
clearTimeout(simulationTimeoutRef.current);
|
|
237
|
-
simulationTimeoutRef.current = null;
|
|
238
|
-
}
|
|
239
|
-
if (recognitionRef.current) {
|
|
240
|
-
try {
|
|
241
|
-
recognitionRef.current.stop();
|
|
242
|
-
} catch (e) {
|
|
243
|
-
}
|
|
244
|
-
recognitionRef.current = null;
|
|
245
|
-
}
|
|
246
|
-
if (typeof window !== "undefined") {
|
|
247
|
-
const w = window;
|
|
248
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
249
|
-
console.log("[useSpeechRecognition] Cleanup clearing global active instance lock. instanceId:", instanceIdRef.current);
|
|
250
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
251
|
-
}
|
|
252
|
-
}
|
|
253
|
-
};
|
|
254
|
-
}, []);
|
|
255
|
-
const createRecognitionInstance = (0, import_react2.useCallback)(() => {
|
|
256
|
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
257
|
-
if (!SpeechRecognition) {
|
|
258
|
-
console.error("[useSpeechRecognition] SpeechRecognition not available");
|
|
259
|
-
return null;
|
|
260
|
-
}
|
|
261
|
-
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
262
|
-
const recognition = new SpeechRecognition();
|
|
263
|
-
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
264
|
-
recognition.continuous = true;
|
|
265
|
-
recognition.interimResults = true;
|
|
266
|
-
recognition.lang = languageRef.current;
|
|
267
|
-
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
268
|
-
recognition.onaudiostart = () => {
|
|
269
|
-
console.log("[useSpeechRecognition] Native onaudiostart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
270
|
-
};
|
|
271
|
-
recognition.onaudioend = () => {
|
|
272
|
-
console.log("[useSpeechRecognition] Native onaudioend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
273
|
-
};
|
|
274
|
-
recognition.onsoundstart = () => {
|
|
275
|
-
console.log("[useSpeechRecognition] Native onsoundstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
276
|
-
};
|
|
277
|
-
recognition.onsoundend = () => {
|
|
278
|
-
console.log("[useSpeechRecognition] Native onsoundend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
279
|
-
};
|
|
280
|
-
recognition.onspeechstart = () => {
|
|
281
|
-
console.log("[useSpeechRecognition] Native onspeechstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
282
|
-
};
|
|
283
|
-
recognition.onspeechend = () => {
|
|
284
|
-
console.log("[useSpeechRecognition] Native onspeechend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
285
|
-
};
|
|
286
|
-
recognition.onnomatch = () => {
|
|
287
|
-
console.log("[useSpeechRecognition] Native onnomatch. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
288
|
-
};
|
|
289
|
-
recognition.onstart = () => {
|
|
290
|
-
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
291
|
-
isStartingRef.current = false;
|
|
292
|
-
setIsListening(true);
|
|
293
|
-
setError(null);
|
|
294
|
-
if (typeof window !== "undefined") {
|
|
295
|
-
const w = window;
|
|
296
|
-
w.__llmSpeechRecognitionActiveInstanceId = instanceIdRef.current;
|
|
297
|
-
console.log("[useSpeechRecognition] Set global active instance lock. instanceId:", instanceIdRef.current);
|
|
298
|
-
}
|
|
299
|
-
};
|
|
300
|
-
recognition.onend = () => {
|
|
301
|
-
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
302
|
-
isStartingRef.current = false;
|
|
303
|
-
if (recognitionRef.current === recognition) {
|
|
304
|
-
console.log("[useSpeechRecognition] Nullifying recognitionRef (onend)");
|
|
305
|
-
recognitionRef.current = null;
|
|
306
|
-
}
|
|
307
|
-
if (isSimulatingRef.current) {
|
|
308
|
-
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
309
|
-
return;
|
|
310
|
-
}
|
|
311
|
-
setIsListening(false);
|
|
312
|
-
if (onEndRef.current) onEndRef.current();
|
|
313
|
-
if (typeof window !== "undefined") {
|
|
314
|
-
const w = window;
|
|
315
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
316
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
317
|
-
console.log("[useSpeechRecognition] Cleared global active instance lock. instanceId:", instanceIdRef.current);
|
|
318
|
-
}
|
|
319
|
-
}
|
|
320
|
-
};
|
|
321
|
-
recognition.onresult = (event) => {
|
|
322
|
-
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
323
|
-
let interimTranscript = "";
|
|
324
|
-
let finalTranscript = "";
|
|
325
|
-
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
326
|
-
const result = event.results[i];
|
|
327
|
-
if (result.isFinal) {
|
|
328
|
-
finalTranscript += result[0].transcript;
|
|
329
|
-
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
330
|
-
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
331
|
-
} else {
|
|
332
|
-
interimTranscript += result[0].transcript;
|
|
333
|
-
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
334
|
-
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
335
|
-
}
|
|
336
|
-
}
|
|
337
|
-
setTranscript((prev) => prev + finalTranscript);
|
|
338
|
-
};
|
|
339
|
-
recognition.onerror = (event) => {
|
|
340
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
341
|
-
console.error("[useSpeechRecognition] Error context - lastStartAt:", lastStartAtRef.current, "lastStopAt:", lastStopAtRef.current, "instanceId:", instanceIdRef.current);
|
|
342
|
-
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
343
|
-
if (event.error === "aborted") {
|
|
344
|
-
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
345
|
-
} else if (event.error === "not-allowed") {
|
|
346
|
-
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
347
|
-
} else if (event.error === "no-speech") {
|
|
348
|
-
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
349
|
-
} else if (event.error === "network") {
|
|
350
|
-
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
351
|
-
}
|
|
352
|
-
if (recognitionRef.current === recognition) {
|
|
353
|
-
console.log("[useSpeechRecognition] Nullifying recognitionRef (onerror)");
|
|
354
|
-
recognitionRef.current = null;
|
|
355
|
-
}
|
|
356
|
-
isStartingRef.current = false;
|
|
357
|
-
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
358
|
-
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
359
|
-
isSimulatingRef.current = true;
|
|
360
|
-
setError(null);
|
|
361
|
-
setIsListening(true);
|
|
362
|
-
simulationTimeoutRef.current = setTimeout(() => {
|
|
363
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
364
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
365
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
366
|
-
isSimulatingRef.current = false;
|
|
367
|
-
setIsListening(false);
|
|
368
|
-
if (onEndRef.current) onEndRef.current();
|
|
369
|
-
simulationTimeoutRef.current = null;
|
|
370
|
-
}, 3e3);
|
|
371
|
-
return;
|
|
372
|
-
}
|
|
373
|
-
console.error("Speech recognition error", event.error);
|
|
374
|
-
setError(event.error);
|
|
375
|
-
setIsListening(false);
|
|
376
|
-
if (typeof window !== "undefined") {
|
|
377
|
-
const w = window;
|
|
378
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
379
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
380
|
-
console.log("[useSpeechRecognition] Cleared global active instance lock after error. instanceId:", instanceIdRef.current);
|
|
381
|
-
}
|
|
382
|
-
}
|
|
383
|
-
};
|
|
384
|
-
return recognition;
|
|
385
|
-
}, []);
|
|
386
|
-
const start = (0, import_react2.useCallback)(() => {
|
|
387
|
-
var _a;
|
|
388
|
-
const startTimestamp = Date.now();
|
|
389
|
-
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
390
|
-
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasExistingInstance:", !!recognitionRef.current);
|
|
391
|
-
if (typeof document !== "undefined") {
|
|
392
|
-
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
393
|
-
}
|
|
394
|
-
if (isSimulatingRef.current) {
|
|
395
|
-
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
396
|
-
return;
|
|
397
|
-
}
|
|
398
|
-
if (isStartingRef.current) {
|
|
399
|
-
console.warn("[useSpeechRecognition] Already starting - ignoring duplicate call");
|
|
400
|
-
return;
|
|
401
|
-
}
|
|
402
|
-
if (isListening) {
|
|
403
|
-
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
404
|
-
return;
|
|
405
|
-
}
|
|
406
|
-
if (typeof window !== "undefined") {
|
|
407
|
-
const w = window;
|
|
408
|
-
if (w.__llmSpeechRecognitionActiveInstanceId && w.__llmSpeechRecognitionActiveInstanceId !== instanceIdRef.current) {
|
|
409
|
-
console.error("[useSpeechRecognition] Another recognition instance appears active. activeInstanceId:", w.__llmSpeechRecognitionActiveInstanceId, "thisInstanceId:", instanceIdRef.current);
|
|
410
|
-
}
|
|
411
|
-
}
|
|
412
|
-
try {
|
|
413
|
-
if (recognitionRef.current) {
|
|
414
|
-
console.log("[useSpeechRecognition] Stopping existing instance before creating new one");
|
|
415
|
-
try {
|
|
416
|
-
recognitionRef.current.onend = null;
|
|
417
|
-
recognitionRef.current.onerror = null;
|
|
418
|
-
recognitionRef.current.stop();
|
|
419
|
-
} catch (e) {
|
|
420
|
-
}
|
|
421
|
-
recognitionRef.current = null;
|
|
422
|
-
}
|
|
423
|
-
const recognition = createRecognitionInstance();
|
|
424
|
-
if (!recognition) {
|
|
425
|
-
console.error("[useSpeechRecognition] Failed to create recognition instance");
|
|
426
|
-
setError("Speech recognition not available");
|
|
427
|
-
return;
|
|
428
|
-
}
|
|
429
|
-
recognitionRef.current = recognition;
|
|
430
|
-
setTranscript("");
|
|
431
|
-
isStartingRef.current = true;
|
|
432
|
-
lastStartAtRef.current = Date.now();
|
|
433
|
-
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
434
|
-
recognitionRef.current.start();
|
|
435
|
-
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
436
|
-
} catch (error2) {
|
|
437
|
-
isStartingRef.current = false;
|
|
438
|
-
console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
|
|
439
|
-
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
440
|
-
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
441
|
-
}
|
|
442
|
-
setError((error2 == null ? void 0 : error2.message) || "Failed to start speech recognition");
|
|
443
|
-
}
|
|
444
|
-
}, [isListening, createRecognitionInstance]);
|
|
445
|
-
const stop = (0, import_react2.useCallback)(() => {
|
|
446
|
-
console.log("[useSpeechRecognition] stop() called");
|
|
447
|
-
lastStopAtRef.current = Date.now();
|
|
448
|
-
if (isSimulatingRef.current) {
|
|
449
|
-
if (simulationTimeoutRef.current) {
|
|
450
|
-
clearTimeout(simulationTimeoutRef.current);
|
|
451
|
-
simulationTimeoutRef.current = null;
|
|
452
|
-
}
|
|
453
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
454
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
455
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
456
|
-
isSimulatingRef.current = false;
|
|
457
|
-
setIsListening(false);
|
|
458
|
-
if (onEndRef.current) onEndRef.current();
|
|
459
|
-
return;
|
|
460
|
-
}
|
|
461
|
-
if (recognitionRef.current) {
|
|
462
|
-
recognitionRef.current.stop();
|
|
463
|
-
console.log("[useSpeechRecognition] recognition.stop() executed");
|
|
464
|
-
}
|
|
465
|
-
}, []);
|
|
466
|
-
const resetTranscript = (0, import_react2.useCallback)(() => {
|
|
467
|
-
setTranscript("");
|
|
468
|
-
}, []);
|
|
469
|
-
return {
|
|
470
|
-
isListening,
|
|
471
|
-
transcript,
|
|
472
|
-
start,
|
|
473
|
-
stop,
|
|
474
|
-
resetTranscript,
|
|
475
|
-
isSupported,
|
|
476
|
-
error
|
|
477
|
-
};
|
|
478
|
-
};
|
|
479
|
-
|
|
480
195
|
// src/ui/react/hooks/useAudioRecorder.ts
|
|
481
|
-
var
|
|
196
|
+
var import_react2 = require("react");
|
|
482
197
|
var useAudioRecorder = (onStop) => {
|
|
483
|
-
const [isRecording, setIsRecording] = (0,
|
|
484
|
-
const [isSimulated, setIsSimulated] = (0,
|
|
485
|
-
const [blob, setBlob] = (0,
|
|
486
|
-
const [error, setError] = (0,
|
|
487
|
-
const mediaRecorderRef = (0,
|
|
488
|
-
const chunksRef = (0,
|
|
489
|
-
const start = (0,
|
|
198
|
+
const [isRecording, setIsRecording] = (0, import_react2.useState)(false);
|
|
199
|
+
const [isSimulated, setIsSimulated] = (0, import_react2.useState)(false);
|
|
200
|
+
const [blob, setBlob] = (0, import_react2.useState)(null);
|
|
201
|
+
const [error, setError] = (0, import_react2.useState)(null);
|
|
202
|
+
const mediaRecorderRef = (0, import_react2.useRef)(null);
|
|
203
|
+
const chunksRef = (0, import_react2.useRef)([]);
|
|
204
|
+
const start = (0, import_react2.useCallback)(async () => {
|
|
490
205
|
try {
|
|
491
206
|
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
|
492
207
|
if (process.env.NODE_ENV === "development") {
|
|
@@ -498,7 +213,12 @@ var useAudioRecorder = (onStop) => {
|
|
|
498
213
|
}
|
|
499
214
|
throw new Error("Media devices not available. Ensure you are using HTTPS or localhost.");
|
|
500
215
|
}
|
|
501
|
-
const stream = await navigator.mediaDevices.getUserMedia({
|
|
216
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
217
|
+
audio: {
|
|
218
|
+
noiseSuppression: true,
|
|
219
|
+
echoCancellation: true
|
|
220
|
+
}
|
|
221
|
+
});
|
|
502
222
|
const mediaRecorder = new MediaRecorder(stream);
|
|
503
223
|
mediaRecorderRef.current = mediaRecorder;
|
|
504
224
|
chunksRef.current = [];
|
|
@@ -524,7 +244,7 @@ var useAudioRecorder = (onStop) => {
|
|
|
524
244
|
setError(e.message || "Microphone access denied");
|
|
525
245
|
}
|
|
526
246
|
}, [onStop]);
|
|
527
|
-
const stop = (0,
|
|
247
|
+
const stop = (0, import_react2.useCallback)(() => {
|
|
528
248
|
if (isSimulated) {
|
|
529
249
|
setIsRecording(false);
|
|
530
250
|
setIsSimulated(false);
|
|
@@ -548,9 +268,9 @@ var useAudioRecorder = (onStop) => {
|
|
|
548
268
|
};
|
|
549
269
|
|
|
550
270
|
// src/ui/react/hooks/useProactiveResize.ts
|
|
551
|
-
var
|
|
271
|
+
var import_react3 = require("react");
|
|
552
272
|
function useProactiveResize(textareaRef, measurementRef, value, disabled) {
|
|
553
|
-
(0,
|
|
273
|
+
(0, import_react3.useEffect)(() => {
|
|
554
274
|
if (!textareaRef.current || !measurementRef.current) return;
|
|
555
275
|
const styles = window.getComputedStyle(textareaRef.current);
|
|
556
276
|
measurementRef.current.style.width = styles.width;
|
|
@@ -570,7 +290,7 @@ function useProactiveResize(textareaRef, measurementRef, value, disabled) {
|
|
|
570
290
|
|
|
571
291
|
// src/ui/react/components/ChatInputArea.tsx
|
|
572
292
|
var import_jsx_runtime5 = require("react/jsx-runtime");
|
|
573
|
-
var ChatInputArea = (0,
|
|
293
|
+
var ChatInputArea = (0, import_react4.forwardRef)(({
|
|
574
294
|
onSubmit,
|
|
575
295
|
isSending,
|
|
576
296
|
showInputForm,
|
|
@@ -586,22 +306,18 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
586
306
|
defaultInputMode = "text"
|
|
587
307
|
}, ref) => {
|
|
588
308
|
var _a, _b, _c, _d;
|
|
589
|
-
const [internalMessage, setInternalMessage] = (0,
|
|
590
|
-
const [voiceTrigger, setVoiceTrigger] = (0,
|
|
591
|
-
const [isTranscribing, setIsTranscribing] = (0,
|
|
592
|
-
const [voiceError, setVoiceError] = (0,
|
|
593
|
-
const [isFocused, setIsFocused] = (0,
|
|
594
|
-
const [showDebug, setShowDebug] = (0,
|
|
595
|
-
const [logs, setLogs] = (0,
|
|
596
|
-
const tapCountRef = (0,
|
|
597
|
-
(0,
|
|
309
|
+
const [internalMessage, setInternalMessage] = (0, import_react4.useState)("");
|
|
310
|
+
const [voiceTrigger, setVoiceTrigger] = (0, import_react4.useState)(null);
|
|
311
|
+
const [isTranscribing, setIsTranscribing] = (0, import_react4.useState)(false);
|
|
312
|
+
const [voiceError, setVoiceError] = (0, import_react4.useState)(null);
|
|
313
|
+
const [isFocused, setIsFocused] = (0, import_react4.useState)(false);
|
|
314
|
+
const [showDebug, setShowDebug] = (0, import_react4.useState)(false);
|
|
315
|
+
const [logs, setLogs] = (0, import_react4.useState)([]);
|
|
316
|
+
const tapCountRef = (0, import_react4.useRef)({ count: 0, lastTap: 0 });
|
|
317
|
+
(0, import_react4.useEffect)(() => {
|
|
598
318
|
const originalLog = console.log;
|
|
599
319
|
const originalWarn = console.warn;
|
|
600
320
|
const originalError = console.error;
|
|
601
|
-
const formatTime = () => {
|
|
602
|
-
const now = /* @__PURE__ */ new Date();
|
|
603
|
-
return `${now.getHours().toString().padStart(2, "0")}:${now.getMinutes().toString().padStart(2, "0")}:${now.getSeconds().toString().padStart(2, "0")}.${now.getMilliseconds().toString().padStart(3, "0")}`;
|
|
604
|
-
};
|
|
605
321
|
const addLog = (type, args) => {
|
|
606
322
|
try {
|
|
607
323
|
const msg = args.map((arg) => {
|
|
@@ -609,7 +325,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
609
325
|
if (typeof arg === "object") return JSON.stringify(arg);
|
|
610
326
|
return String(arg);
|
|
611
327
|
}).join(" ");
|
|
612
|
-
setLogs((prev) => [
|
|
328
|
+
setLogs((prev) => [`[${type}] ${msg}`, ...prev].slice(0, 50));
|
|
613
329
|
} catch (e) {
|
|
614
330
|
}
|
|
615
331
|
};
|
|
@@ -631,17 +347,17 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
631
347
|
console.error = originalError;
|
|
632
348
|
};
|
|
633
349
|
}, []);
|
|
634
|
-
const copyLogs = (0,
|
|
350
|
+
const copyLogs = (0, import_react4.useCallback)(() => {
|
|
635
351
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
636
352
|
}, [logs]);
|
|
637
|
-
const textareaRef = (0,
|
|
638
|
-
const measurementRef = (0,
|
|
639
|
-
const pendingSelectionRef = (0,
|
|
353
|
+
const textareaRef = (0, import_react4.useRef)(null);
|
|
354
|
+
const measurementRef = (0, import_react4.useRef)(null);
|
|
355
|
+
const pendingSelectionRef = (0, import_react4.useRef)(null);
|
|
640
356
|
const isControlled = value !== void 0;
|
|
641
357
|
const message = isControlled ? value : internalMessage;
|
|
642
|
-
const messageRef = (0,
|
|
358
|
+
const messageRef = (0, import_react4.useRef)(message);
|
|
643
359
|
messageRef.current = message;
|
|
644
|
-
(0,
|
|
360
|
+
(0, import_react4.useLayoutEffect)(() => {
|
|
645
361
|
if (pendingSelectionRef.current && textareaRef.current) {
|
|
646
362
|
const { start, end } = pendingSelectionRef.current;
|
|
647
363
|
textareaRef.current.focus();
|
|
@@ -649,18 +365,18 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
649
365
|
pendingSelectionRef.current = null;
|
|
650
366
|
}
|
|
651
367
|
}, [message]);
|
|
652
|
-
const onChangeRef = (0,
|
|
653
|
-
(0,
|
|
368
|
+
const onChangeRef = (0, import_react4.useRef)(onChange);
|
|
369
|
+
(0, import_react4.useEffect)(() => {
|
|
654
370
|
onChangeRef.current = onChange;
|
|
655
371
|
}, [onChange]);
|
|
656
372
|
const { voice: globalVoice } = useChatConfig();
|
|
657
373
|
const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
|
|
658
374
|
const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
|
|
659
|
-
const voiceConfigRef = (0,
|
|
660
|
-
(0,
|
|
375
|
+
const voiceConfigRef = (0, import_react4.useRef)(voiceConfig);
|
|
376
|
+
(0, import_react4.useEffect)(() => {
|
|
661
377
|
voiceConfigRef.current = voiceConfig;
|
|
662
378
|
}, [voiceConfig]);
|
|
663
|
-
const triggerChange = (0,
|
|
379
|
+
const triggerChange = (0, import_react4.useCallback)((newValue) => {
|
|
664
380
|
setVoiceError(null);
|
|
665
381
|
if (isControlled && onChangeRef.current) {
|
|
666
382
|
const syntheticEvent = {
|
|
@@ -674,7 +390,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
674
390
|
}, [isControlled]);
|
|
675
391
|
const isInputDisabled = (currentTask == null ? void 0 : currentTask.complete) || (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (((_b = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _b.function) === "form" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted) || ((_c = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _c.function) === "confirm" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted));
|
|
676
392
|
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger);
|
|
677
|
-
const insertTextAtCursor = (0,
|
|
393
|
+
const insertTextAtCursor = (0, import_react4.useCallback)((text) => {
|
|
678
394
|
const textarea = textareaRef.current;
|
|
679
395
|
const currentVal = messageRef.current || "";
|
|
680
396
|
if (!textarea) {
|
|
@@ -692,23 +408,16 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
692
408
|
pendingSelectionRef.current = { start: selectionStart, end: selectionEnd };
|
|
693
409
|
triggerChange(newText);
|
|
694
410
|
}, [triggerChange]);
|
|
695
|
-
const handleVoiceResult = (0,
|
|
411
|
+
const handleVoiceResult = (0, import_react4.useCallback)((text, isFinal) => {
|
|
696
412
|
if (isFinal) {
|
|
697
413
|
insertTextAtCursor(text);
|
|
698
414
|
}
|
|
699
415
|
}, [insertTextAtCursor]);
|
|
700
|
-
const handleVoiceEnd = (0,
|
|
416
|
+
const handleVoiceEnd = (0, import_react4.useCallback)(() => {
|
|
701
417
|
var _a2, _b2;
|
|
702
418
|
setVoiceTrigger(null);
|
|
703
419
|
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
704
420
|
}, []);
|
|
705
|
-
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
706
|
-
(0, import_react5.useEffect)(() => {
|
|
707
|
-
if (nativeSpeech.error) {
|
|
708
|
-
setVoiceError(nativeSpeech.error);
|
|
709
|
-
console.error("[ChatInputArea] Native Speech Error:", nativeSpeech.error);
|
|
710
|
-
}
|
|
711
|
-
}, [nativeSpeech.error]);
|
|
712
421
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
713
422
|
var _a2, _b2, _c2;
|
|
714
423
|
setVoiceTrigger(null);
|
|
@@ -736,7 +445,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
736
445
|
setIsTranscribing(false);
|
|
737
446
|
}
|
|
738
447
|
});
|
|
739
|
-
(0,
|
|
448
|
+
(0, import_react4.useImperativeHandle)(ref, () => ({
|
|
740
449
|
focus: () => {
|
|
741
450
|
var _a2;
|
|
742
451
|
(_a2 = textareaRef.current) == null ? void 0 : _a2.focus();
|
|
@@ -768,65 +477,27 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
768
477
|
handleSubmit();
|
|
769
478
|
}
|
|
770
479
|
};
|
|
771
|
-
const isMobile = (0, import_react5.useCallback)(() => {
|
|
772
|
-
if (typeof window === "undefined") return false;
|
|
773
|
-
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
774
|
-
}, []);
|
|
775
480
|
const startRecording = async (trigger) => {
|
|
776
|
-
var _a2
|
|
777
|
-
|
|
778
|
-
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
779
|
-
if (voiceTrigger || isTranscribing) {
|
|
780
|
-
console.log("[ChatInputArea] startRecording ignored - already active");
|
|
781
|
-
return;
|
|
782
|
-
}
|
|
481
|
+
var _a2;
|
|
482
|
+
if (voiceTrigger || isTranscribing) return;
|
|
783
483
|
setVoiceTrigger(trigger);
|
|
784
484
|
setVoiceError(null);
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
if (!nativeSpeech.isSupported) {
|
|
788
|
-
console.error("[ChatInputArea] Native speech not supported");
|
|
789
|
-
alert("Speech recognition is not supported in this browser.");
|
|
790
|
-
setVoiceTrigger(null);
|
|
791
|
-
return;
|
|
792
|
-
}
|
|
793
|
-
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
794
|
-
nativeSpeech.start();
|
|
795
|
-
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
796
|
-
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
797
|
-
try {
|
|
798
|
-
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
799
|
-
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
800
|
-
} catch (e) {
|
|
801
|
-
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
802
|
-
}
|
|
803
|
-
} else {
|
|
804
|
-
console.log("[ChatInputArea] Using custom recorder");
|
|
805
|
-
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists (custom mode)...");
|
|
806
|
-
try {
|
|
807
|
-
(_b2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _b2.call(voiceConfig);
|
|
808
|
-
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
809
|
-
} catch (e) {
|
|
810
|
-
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
811
|
-
}
|
|
485
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
486
|
+
try {
|
|
812
487
|
await customRecorder.start();
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
setTimeout(() => {
|
|
818
|
-
var _a3;
|
|
819
|
-
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
820
|
-
}, 0);
|
|
488
|
+
} catch (e) {
|
|
489
|
+
console.error("[ChatInputArea] Failed to start recorder:", e);
|
|
490
|
+
setVoiceError("Mic access denied");
|
|
491
|
+
setVoiceTrigger(null);
|
|
821
492
|
}
|
|
493
|
+
setTimeout(() => {
|
|
494
|
+
var _a3;
|
|
495
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
496
|
+
}, 0);
|
|
822
497
|
};
|
|
823
498
|
const stopRecording = () => {
|
|
824
499
|
if (!voiceTrigger) return;
|
|
825
|
-
|
|
826
|
-
nativeSpeech.stop();
|
|
827
|
-
} else {
|
|
828
|
-
customRecorder.stop();
|
|
829
|
-
}
|
|
500
|
+
customRecorder.stop();
|
|
830
501
|
};
|
|
831
502
|
const getPlaceholder = () => {
|
|
832
503
|
if (placeholder) return placeholder;
|
|
@@ -1005,7 +676,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
1005
676
|
ChatInputArea.displayName = "ChatInputArea";
|
|
1006
677
|
|
|
1007
678
|
// src/ui/react/components/TapToTalk.tsx
|
|
1008
|
-
var
|
|
679
|
+
var import_react5 = __toESM(require("react"), 1);
|
|
1009
680
|
var import_outline2 = require("@heroicons/react/24/outline");
|
|
1010
681
|
var import_jsx_runtime6 = require("react/jsx-runtime");
|
|
1011
682
|
var TapToTalk = ({
|
|
@@ -1020,19 +691,16 @@ var TapToTalk = ({
|
|
|
1020
691
|
var _a;
|
|
1021
692
|
const globalConfig = useChatConfig();
|
|
1022
693
|
const voiceConfig = propVoiceConfig || ((_a = globalConfig.voice) == null ? void 0 : _a.config);
|
|
1023
|
-
const [isTranscribing, setIsTranscribing] = (0,
|
|
1024
|
-
const [
|
|
1025
|
-
const [
|
|
1026
|
-
const [
|
|
1027
|
-
const
|
|
1028
|
-
|
|
694
|
+
const [isTranscribing, setIsTranscribing] = (0, import_react5.useState)(false);
|
|
695
|
+
const [voiceTrigger, setVoiceTrigger] = (0, import_react5.useState)(null);
|
|
696
|
+
const [errorMsg, setErrorMsg] = (0, import_react5.useState)(null);
|
|
697
|
+
const [showDebug, setShowDebug] = (0, import_react5.useState)(false);
|
|
698
|
+
const [logs, setLogs] = (0, import_react5.useState)([]);
|
|
699
|
+
const tapCountRef = (0, import_react5.useRef)({ count: 0, lastTap: 0 });
|
|
700
|
+
import_react5.default.useEffect(() => {
|
|
1029
701
|
const originalLog = console.log;
|
|
1030
702
|
const originalWarn = console.warn;
|
|
1031
703
|
const originalError = console.error;
|
|
1032
|
-
const formatTime = () => {
|
|
1033
|
-
const now = /* @__PURE__ */ new Date();
|
|
1034
|
-
return `${now.getHours().toString().padStart(2, "0")}:${now.getMinutes().toString().padStart(2, "0")}:${now.getSeconds().toString().padStart(2, "0")}.${now.getMilliseconds().toString().padStart(3, "0")}`;
|
|
1035
|
-
};
|
|
1036
704
|
const addLog = (type, args) => {
|
|
1037
705
|
try {
|
|
1038
706
|
const msg = args.map((arg) => {
|
|
@@ -1040,7 +708,7 @@ var TapToTalk = ({
|
|
|
1040
708
|
if (typeof arg === "object") return JSON.stringify(arg);
|
|
1041
709
|
return String(arg);
|
|
1042
710
|
}).join(" ");
|
|
1043
|
-
setLogs((prev) => [
|
|
711
|
+
setLogs((prev) => [`[${type}] ${msg}`, ...prev].slice(0, 50));
|
|
1044
712
|
} catch (e) {
|
|
1045
713
|
}
|
|
1046
714
|
};
|
|
@@ -1062,27 +730,15 @@ var TapToTalk = ({
|
|
|
1062
730
|
console.error = originalError;
|
|
1063
731
|
};
|
|
1064
732
|
}, []);
|
|
1065
|
-
const copyLogs = (0,
|
|
733
|
+
const copyLogs = (0, import_react5.useCallback)(() => {
|
|
1066
734
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
1067
735
|
}, [logs]);
|
|
1068
|
-
const
|
|
1069
|
-
|
|
1070
|
-
if (isFinal) {
|
|
1071
|
-
onResult(text);
|
|
1072
|
-
setErrorMsg(null);
|
|
1073
|
-
}
|
|
1074
|
-
}, [onResult]);
|
|
1075
|
-
const handleVoiceEnd = (0, import_react6.useCallback)(() => {
|
|
1076
|
-
console.log("[TapToTalk] Native speech ended");
|
|
736
|
+
const handleVoiceEnd = (0, import_react5.useCallback)(() => {
|
|
737
|
+
setVoiceTrigger(null);
|
|
1077
738
|
}, []);
|
|
1078
|
-
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
1079
|
-
import_react6.default.useEffect(() => {
|
|
1080
|
-
if (nativeSpeech.error) {
|
|
1081
|
-
setErrorMsg(nativeSpeech.error);
|
|
1082
|
-
console.error("[TapToTalk] Native Speech Error:", nativeSpeech.error);
|
|
1083
|
-
}
|
|
1084
|
-
}, [nativeSpeech.error]);
|
|
1085
739
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
740
|
+
var _a2;
|
|
741
|
+
setVoiceTrigger(null);
|
|
1086
742
|
setIsTranscribing(true);
|
|
1087
743
|
setErrorMsg(null);
|
|
1088
744
|
if (blob.type === "audio/simulated") {
|
|
@@ -1092,7 +748,7 @@ var TapToTalk = ({
|
|
|
1092
748
|
setIsTranscribing(false);
|
|
1093
749
|
return;
|
|
1094
750
|
}
|
|
1095
|
-
if (
|
|
751
|
+
if (voiceConfig == null ? void 0 : voiceConfig.onAudioCapture) {
|
|
1096
752
|
try {
|
|
1097
753
|
const text = await voiceConfig.onAudioCapture(blob);
|
|
1098
754
|
if (text) onResult(text);
|
|
@@ -1105,77 +761,59 @@ var TapToTalk = ({
|
|
|
1105
761
|
} else {
|
|
1106
762
|
setIsTranscribing(false);
|
|
1107
763
|
}
|
|
764
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
1108
765
|
});
|
|
1109
|
-
const isListening =
|
|
766
|
+
const isListening = !!voiceTrigger || customRecorder.isRecording;
|
|
1110
767
|
const isActive = isListening || isTranscribing;
|
|
1111
|
-
const processingRef = (0,
|
|
1112
|
-
const
|
|
1113
|
-
|
|
1114
|
-
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
1115
|
-
}, []);
|
|
1116
|
-
const toggleVoice = async (e) => {
|
|
1117
|
-
const now = Date.now();
|
|
1118
|
-
if (now - tapCountRef.current.lastTap < 500) {
|
|
1119
|
-
tapCountRef.current.count++;
|
|
1120
|
-
} else {
|
|
1121
|
-
tapCountRef.current.count = 1;
|
|
1122
|
-
}
|
|
1123
|
-
tapCountRef.current.lastTap = now;
|
|
1124
|
-
if (tapCountRef.current.count >= 5) {
|
|
1125
|
-
setShowDebug((prev) => !prev);
|
|
1126
|
-
tapCountRef.current.count = 0;
|
|
1127
|
-
if (isActive) {
|
|
1128
|
-
console.log("[TapToTalk] Debug trigger force-stop");
|
|
1129
|
-
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
1130
|
-
else customRecorder.stop();
|
|
1131
|
-
}
|
|
1132
|
-
return;
|
|
1133
|
-
}
|
|
1134
|
-
console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
|
|
768
|
+
const processingRef = (0, import_react5.useRef)(false);
|
|
769
|
+
const toggleVoice = async () => {
|
|
770
|
+
var _a2, _b, _c;
|
|
1135
771
|
if (processingRef.current) {
|
|
1136
772
|
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
1137
773
|
return;
|
|
1138
774
|
}
|
|
1139
775
|
processingRef.current = true;
|
|
1140
|
-
console.log("[TapToTalk] toggleVoice called. isActive:", isActive
|
|
776
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
1141
777
|
try {
|
|
778
|
+
const now = Date.now();
|
|
779
|
+
if (now - tapCountRef.current.lastTap < 500) {
|
|
780
|
+
tapCountRef.current.count++;
|
|
781
|
+
} else {
|
|
782
|
+
tapCountRef.current.count = 1;
|
|
783
|
+
}
|
|
784
|
+
tapCountRef.current.lastTap = now;
|
|
785
|
+
if (tapCountRef.current.count >= 5) {
|
|
786
|
+
setShowDebug((prev) => !prev);
|
|
787
|
+
tapCountRef.current.count = 0;
|
|
788
|
+
if (isActive) {
|
|
789
|
+
console.log("[TapToTalk] Debug trigger force-stop");
|
|
790
|
+
customRecorder.stop();
|
|
791
|
+
setVoiceTrigger(null);
|
|
792
|
+
}
|
|
793
|
+
return;
|
|
794
|
+
}
|
|
1142
795
|
if (isActive) {
|
|
1143
796
|
if (isTranscribing && !isListening) {
|
|
1144
797
|
console.log("[TapToTalk] Ignoring click during transcription");
|
|
1145
798
|
return;
|
|
1146
799
|
}
|
|
1147
800
|
console.log("[TapToTalk] Stopping voice...");
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
customRecorder.stop();
|
|
1152
|
-
}
|
|
801
|
+
customRecorder.stop();
|
|
802
|
+
setVoiceTrigger(null);
|
|
803
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
1153
804
|
} else {
|
|
1154
|
-
console.log("[TapToTalk] Starting voice...
|
|
805
|
+
console.log("[TapToTalk] Starting voice...");
|
|
1155
806
|
setErrorMsg(null);
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
1167
|
-
setErrorMsg("Mic access denied");
|
|
1168
|
-
}
|
|
1169
|
-
} else {
|
|
1170
|
-
console.log("[TapToTalk] Starting native speech recognition...");
|
|
1171
|
-
if (!nativeSpeech.isSupported) {
|
|
1172
|
-
console.error("[TapToTalk] Native speech not supported");
|
|
1173
|
-
setErrorMsg("Speech not supported");
|
|
1174
|
-
return;
|
|
1175
|
-
}
|
|
1176
|
-
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
1177
|
-
nativeSpeech.start();
|
|
1178
|
-
console.log("[TapToTalk] nativeSpeech.start() called");
|
|
807
|
+
onFocusTarget == null ? void 0 : onFocusTarget();
|
|
808
|
+
setVoiceTrigger("click");
|
|
809
|
+
(_b = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _b.call(voiceConfig);
|
|
810
|
+
try {
|
|
811
|
+
await customRecorder.start();
|
|
812
|
+
} catch (e) {
|
|
813
|
+
console.error("[TapToTalk] Custom recorder failed:", e);
|
|
814
|
+
setErrorMsg("Mic access denied");
|
|
815
|
+
setVoiceTrigger(null);
|
|
816
|
+
(_c = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _c.call(voiceConfig);
|
|
1179
817
|
}
|
|
1180
818
|
}
|
|
1181
819
|
} finally {
|
|
@@ -1221,9 +859,7 @@ var TapToTalk = ({
|
|
|
1221
859
|
/* @__PURE__ */ (0, import_jsx_runtime6.jsxs)(
|
|
1222
860
|
"button",
|
|
1223
861
|
{
|
|
1224
|
-
type: "button",
|
|
1225
862
|
onClick: toggleVoice,
|
|
1226
|
-
style: { touchAction: "manipulation" },
|
|
1227
863
|
disabled: disabled || isTranscribing && !isListening,
|
|
1228
864
|
className: `flex items-center justify-center gap-3 px-6 py-3 rounded-xl transition-all duration-300 w-full font-medium shadow-md active:scale-[0.98]
|
|
1229
865
|
${bgColor} text-white
|
|
@@ -1231,9 +867,9 @@ var TapToTalk = ({
|
|
|
1231
867
|
${className}`,
|
|
1232
868
|
title: label,
|
|
1233
869
|
children: [
|
|
1234
|
-
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("div", { className: "flex items-center justify-center shrink-0
|
|
1235
|
-
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "truncate
|
|
1236
|
-
errorMsg && /* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1
|
|
870
|
+
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("div", { className: "flex items-center justify-center shrink-0", children: Icon }),
|
|
871
|
+
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "truncate", children: label }),
|
|
872
|
+
errorMsg && /* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1", children: errorMsg })
|
|
1237
873
|
]
|
|
1238
874
|
}
|
|
1239
875
|
)
|
|
@@ -1241,17 +877,17 @@ var TapToTalk = ({
|
|
|
1241
877
|
};
|
|
1242
878
|
|
|
1243
879
|
// src/ui/react/components/ChatMessageList.tsx
|
|
1244
|
-
var
|
|
880
|
+
var import_react9 = require("react");
|
|
1245
881
|
|
|
1246
882
|
// src/ui/react/components/interactive/ConfirmInteraction.tsx
|
|
1247
|
-
var
|
|
883
|
+
var import_react6 = require("react");
|
|
1248
884
|
var import_jsx_runtime7 = require("react/jsx-runtime");
|
|
1249
885
|
var ConfirmInteraction = ({
|
|
1250
886
|
parameters,
|
|
1251
887
|
onResponse,
|
|
1252
888
|
isResponseSubmitted
|
|
1253
889
|
}) => {
|
|
1254
|
-
const [selectedOption, setSelectedOption] = (0,
|
|
890
|
+
const [selectedOption, setSelectedOption] = (0, import_react6.useState)(null);
|
|
1255
891
|
const params = parameters;
|
|
1256
892
|
const { yesPrompt, noPrompt } = params;
|
|
1257
893
|
console.log("[ConfirmInteraction] Parameters:", params);
|
|
@@ -1284,7 +920,7 @@ var ConfirmInteraction = ({
|
|
|
1284
920
|
var ConfirmInteraction_default = ConfirmInteraction;
|
|
1285
921
|
|
|
1286
922
|
// src/ui/react/components/interactive/SelectInteraction.tsx
|
|
1287
|
-
var
|
|
923
|
+
var import_react7 = require("react");
|
|
1288
924
|
var import_jsx_runtime8 = require("react/jsx-runtime");
|
|
1289
925
|
var SelectInteraction = ({
|
|
1290
926
|
parameters,
|
|
@@ -1292,11 +928,11 @@ var SelectInteraction = ({
|
|
|
1292
928
|
isResponseSubmitted,
|
|
1293
929
|
message
|
|
1294
930
|
}) => {
|
|
1295
|
-
const [selectedOption, setSelectedOption] = (0,
|
|
1296
|
-
const [customOption, setCustomOption] = (0,
|
|
931
|
+
const [selectedOption, setSelectedOption] = (0, import_react7.useState)(null);
|
|
932
|
+
const [customOption, setCustomOption] = (0, import_react7.useState)(null);
|
|
1297
933
|
const params = parameters;
|
|
1298
934
|
const { question, options, placeholder } = params;
|
|
1299
|
-
(0,
|
|
935
|
+
(0, import_react7.useEffect)(() => {
|
|
1300
936
|
if (isResponseSubmitted && (message == null ? void 0 : message.responseValue)) {
|
|
1301
937
|
const responseValueStr = String(message.responseValue);
|
|
1302
938
|
setSelectedOption(responseValueStr);
|
|
@@ -1335,7 +971,7 @@ var SelectInteraction = ({
|
|
|
1335
971
|
var SelectInteraction_default = SelectInteraction;
|
|
1336
972
|
|
|
1337
973
|
// src/ui/react/components/interactive/FormInteraction.tsx
|
|
1338
|
-
var
|
|
974
|
+
var import_react8 = require("react");
|
|
1339
975
|
var import_lucide_react = require("lucide-react");
|
|
1340
976
|
var import_jsx_runtime9 = require("react/jsx-runtime");
|
|
1341
977
|
var FormInteraction = ({
|
|
@@ -1344,11 +980,11 @@ var FormInteraction = ({
|
|
|
1344
980
|
isResponseSubmitted,
|
|
1345
981
|
submittedValues
|
|
1346
982
|
}) => {
|
|
1347
|
-
const [isModalOpen, setIsModalOpen] = (0,
|
|
1348
|
-
const [formValues, setFormValues] = (0,
|
|
1349
|
-
const [isExpanded, setIsExpanded] = (0,
|
|
1350
|
-
const [parsedFields, setParsedFields] = (0,
|
|
1351
|
-
const formButtonsRef = (0,
|
|
983
|
+
const [isModalOpen, setIsModalOpen] = (0, import_react8.useState)(false);
|
|
984
|
+
const [formValues, setFormValues] = (0, import_react8.useState)({});
|
|
985
|
+
const [isExpanded, setIsExpanded] = (0, import_react8.useState)(false);
|
|
986
|
+
const [parsedFields, setParsedFields] = (0, import_react8.useState)([]);
|
|
987
|
+
const formButtonsRef = (0, import_react8.useRef)(null);
|
|
1352
988
|
const parseParameters = () => {
|
|
1353
989
|
const { prompt, description, submitText = "Submit", cancelText = "Cancel" } = parameters;
|
|
1354
990
|
let fieldsArray = [];
|
|
@@ -1409,7 +1045,7 @@ var FormInteraction = ({
|
|
|
1409
1045
|
};
|
|
1410
1046
|
};
|
|
1411
1047
|
const params = parseParameters();
|
|
1412
|
-
(0,
|
|
1048
|
+
(0, import_react8.useEffect)(() => {
|
|
1413
1049
|
const processedParams = parseParameters();
|
|
1414
1050
|
setParsedFields(processedParams.fields);
|
|
1415
1051
|
const initialValues = {};
|
|
@@ -1427,7 +1063,7 @@ var FormInteraction = ({
|
|
|
1427
1063
|
setIsModalOpen(true);
|
|
1428
1064
|
}
|
|
1429
1065
|
}, [parameters, isResponseSubmitted]);
|
|
1430
|
-
(0,
|
|
1066
|
+
(0, import_react8.useEffect)(() => {
|
|
1431
1067
|
if (isModalOpen && formButtonsRef.current) {
|
|
1432
1068
|
setTimeout(() => {
|
|
1433
1069
|
var _a;
|
|
@@ -1735,10 +1371,10 @@ var ChatMessageList = ({
|
|
|
1735
1371
|
getContextExample,
|
|
1736
1372
|
onInteractiveResponse
|
|
1737
1373
|
}) => {
|
|
1738
|
-
const chatContainerRef = (0,
|
|
1739
|
-
const lastMessageRef = (0,
|
|
1740
|
-
const processingIndicatorRef = (0,
|
|
1741
|
-
(0,
|
|
1374
|
+
const chatContainerRef = (0, import_react9.useRef)(null);
|
|
1375
|
+
const lastMessageRef = (0, import_react9.useRef)(null);
|
|
1376
|
+
const processingIndicatorRef = (0, import_react9.useRef)(null);
|
|
1377
|
+
(0, import_react9.useEffect)(() => {
|
|
1742
1378
|
if (isProcessing && processingIndicatorRef.current) {
|
|
1743
1379
|
processingIndicatorRef.current.scrollIntoView({ behavior: "smooth" });
|
|
1744
1380
|
return;
|
|
@@ -1983,7 +1619,6 @@ function createWhisperVoiceConfig(config, callbacks) {
|
|
|
1983
1619
|
transcribeAudio,
|
|
1984
1620
|
useAudioRecorder,
|
|
1985
1621
|
useChatConfig,
|
|
1986
|
-
useProactiveResize
|
|
1987
|
-
useSpeechRecognition
|
|
1622
|
+
useProactiveResize
|
|
1988
1623
|
});
|
|
1989
1624
|
//# sourceMappingURL=index.cjs.map
|