@contentgrowth/llm-service 0.8.6 → 0.8.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -441,20 +441,29 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
441
441
|
const message = isControlled ? value : internalMessage;
|
|
442
442
|
const messageRef = (0, import_react5.useRef)(message);
|
|
443
443
|
messageRef.current = message;
|
|
444
|
+
const onChangeRef = (0, import_react5.useRef)(onChange);
|
|
445
|
+
(0, import_react5.useEffect)(() => {
|
|
446
|
+
onChangeRef.current = onChange;
|
|
447
|
+
}, [onChange]);
|
|
444
448
|
const { voice: globalVoice } = useChatConfig();
|
|
445
449
|
const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
|
|
446
450
|
const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
|
|
447
|
-
const
|
|
448
|
-
|
|
451
|
+
const voiceConfigRef = (0, import_react5.useRef)(voiceConfig);
|
|
452
|
+
(0, import_react5.useEffect)(() => {
|
|
453
|
+
voiceConfigRef.current = voiceConfig;
|
|
454
|
+
}, [voiceConfig]);
|
|
455
|
+
const triggerChange = (0, import_react5.useCallback)((newValue) => {
|
|
456
|
+
console.log("[ChatInputArea] triggerChange called:", { newValue, isControlled, hasOnChange: !!onChangeRef.current, hasTextarea: !!textareaRef.current });
|
|
457
|
+
if (isControlled && onChangeRef.current) {
|
|
449
458
|
const syntheticEvent = {
|
|
450
459
|
target: { value: newValue },
|
|
451
460
|
currentTarget: { value: newValue }
|
|
452
461
|
};
|
|
453
|
-
|
|
462
|
+
onChangeRef.current(syntheticEvent);
|
|
454
463
|
} else {
|
|
455
464
|
setInternalMessage(newValue);
|
|
456
465
|
}
|
|
457
|
-
};
|
|
466
|
+
}, [isControlled]);
|
|
458
467
|
const isInputDisabled = (currentTask == null ? void 0 : currentTask.complete) || (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (((_b = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _b.function) === "form" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted) || ((_c = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _c.function) === "confirm" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted));
|
|
459
468
|
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger || inputMode === "voice");
|
|
460
469
|
const handleVoiceKeyDown = (e) => {
|
|
@@ -484,20 +493,20 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
484
493
|
triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
485
494
|
}, []);
|
|
486
495
|
const handleVoiceEnd = (0, import_react5.useCallback)(() => {
|
|
487
|
-
var _a2;
|
|
496
|
+
var _a2, _b2;
|
|
488
497
|
console.log("[ChatInputArea] nativeSpeech onEnd triggered");
|
|
489
498
|
setVoiceTrigger(null);
|
|
490
|
-
(_a2 =
|
|
491
|
-
}, [
|
|
499
|
+
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
500
|
+
}, []);
|
|
492
501
|
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
493
502
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
494
|
-
var _a2;
|
|
503
|
+
var _a2, _b2, _c2;
|
|
495
504
|
console.log("[ChatInputArea] customRecorder onStop triggered");
|
|
496
505
|
setVoiceTrigger(null);
|
|
497
|
-
(_a2 =
|
|
498
|
-
if (
|
|
506
|
+
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
507
|
+
if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
|
|
499
508
|
try {
|
|
500
|
-
const text = await
|
|
509
|
+
const text = await voiceConfigRef.current.onAudioCapture(blob);
|
|
501
510
|
if (text) triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
502
511
|
} catch (e) {
|
|
503
512
|
console.error("[ChatInputArea] Audio capture failed", e);
|