@contentgrowth/llm-service 0.8.8 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ui/react/components/index.cjs +103 -167
- package/dist/ui/react/components/index.cjs.map +1 -1
- package/dist/ui/react/components/index.d.cts +1 -0
- package/dist/ui/react/components/index.d.ts +1 -0
- package/dist/ui/react/components/index.js +106 -170
- package/dist/ui/react/components/index.js.map +1 -1
- package/package.json +1 -1
|
@@ -212,14 +212,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
212
212
|
recognition.interimResults = true;
|
|
213
213
|
recognition.lang = language;
|
|
214
214
|
recognition.onstart = () => {
|
|
215
|
-
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
216
215
|
setIsListening(true);
|
|
217
216
|
setError(null);
|
|
218
217
|
};
|
|
219
218
|
recognition.onend = () => {
|
|
220
|
-
console.log("[useSpeechRecognition] Native onend event fired");
|
|
221
219
|
if (isSimulatingRef.current) {
|
|
222
|
-
console.log("[useSpeechRecognition] Ignoring onend due to simulation");
|
|
223
220
|
return;
|
|
224
221
|
}
|
|
225
222
|
setIsListening(false);
|
|
@@ -274,23 +271,18 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
274
271
|
};
|
|
275
272
|
}, [onResult, onEnd, language]);
|
|
276
273
|
const start = (0, import_react2.useCallback)(() => {
|
|
277
|
-
console.log("[useSpeechRecognition] start() called");
|
|
278
274
|
if (recognitionRef.current && !isListening) {
|
|
279
275
|
try {
|
|
280
276
|
setTranscript("");
|
|
281
277
|
recognitionRef.current.start();
|
|
282
|
-
console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
|
|
283
278
|
} catch (e) {
|
|
284
279
|
console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
|
|
285
280
|
}
|
|
286
281
|
} else {
|
|
287
|
-
console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
|
|
288
282
|
}
|
|
289
283
|
}, [isListening]);
|
|
290
284
|
const stop = (0, import_react2.useCallback)(() => {
|
|
291
|
-
console.log("[useSpeechRecognition] stop() called");
|
|
292
285
|
if (isSimulatingRef.current) {
|
|
293
|
-
console.log("[useSpeechRecognition] Stopping simulation");
|
|
294
286
|
if (simulationTimeoutRef.current) {
|
|
295
287
|
clearTimeout(simulationTimeoutRef.current);
|
|
296
288
|
simulationTimeoutRef.current = null;
|
|
@@ -304,10 +296,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
304
296
|
return;
|
|
305
297
|
}
|
|
306
298
|
if (recognitionRef.current && isListening) {
|
|
307
|
-
console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
|
|
308
299
|
recognitionRef.current.stop();
|
|
309
300
|
} else {
|
|
310
|
-
console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
|
|
311
301
|
}
|
|
312
302
|
}, [isListening, onResult, onEnd]);
|
|
313
303
|
const resetTranscript = (0, import_react2.useCallback)(() => {
|
|
@@ -328,37 +318,41 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
328
318
|
var import_react3 = require("react");
|
|
329
319
|
var useAudioRecorder = (onStop) => {
|
|
330
320
|
const [isRecording, setIsRecording] = (0, import_react3.useState)(false);
|
|
321
|
+
const [isSimulated, setIsSimulated] = (0, import_react3.useState)(false);
|
|
331
322
|
const [blob, setBlob] = (0, import_react3.useState)(null);
|
|
332
323
|
const [error, setError] = (0, import_react3.useState)(null);
|
|
333
324
|
const mediaRecorderRef = (0, import_react3.useRef)(null);
|
|
334
325
|
const chunksRef = (0, import_react3.useRef)([]);
|
|
335
326
|
const start = (0, import_react3.useCallback)(async () => {
|
|
336
|
-
console.log("[useAudioRecorder] start() called");
|
|
337
327
|
try {
|
|
328
|
+
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
|
329
|
+
if (process.env.NODE_ENV === "development") {
|
|
330
|
+
console.warn("[useAudioRecorder] MediaDevices not available. Entering simulation mode...");
|
|
331
|
+
setIsRecording(true);
|
|
332
|
+
setIsSimulated(true);
|
|
333
|
+
setError(null);
|
|
334
|
+
return;
|
|
335
|
+
}
|
|
336
|
+
throw new Error("Media devices not available. Ensure you are using HTTPS or localhost.");
|
|
337
|
+
}
|
|
338
338
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
339
|
-
console.log("[useAudioRecorder] Stream acquired", stream.id);
|
|
340
339
|
const mediaRecorder = new MediaRecorder(stream);
|
|
341
340
|
mediaRecorderRef.current = mediaRecorder;
|
|
342
341
|
chunksRef.current = [];
|
|
343
342
|
mediaRecorder.ondataavailable = (e) => {
|
|
344
|
-
console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
|
|
345
343
|
if (e.data.size > 0) {
|
|
346
344
|
chunksRef.current.push(e.data);
|
|
347
345
|
}
|
|
348
346
|
};
|
|
349
347
|
mediaRecorder.onstop = () => {
|
|
350
|
-
console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
|
|
351
348
|
const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
|
|
352
|
-
console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
|
|
353
349
|
setBlob(audioBlob);
|
|
354
350
|
setIsRecording(false);
|
|
355
351
|
if (onStop) onStop(audioBlob);
|
|
356
352
|
stream.getTracks().forEach((track) => {
|
|
357
|
-
console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
|
|
358
353
|
track.stop();
|
|
359
354
|
});
|
|
360
355
|
};
|
|
361
|
-
console.log("[useAudioRecorder] Starting MediaRecorder...");
|
|
362
356
|
mediaRecorder.start();
|
|
363
357
|
setIsRecording(true);
|
|
364
358
|
setError(null);
|
|
@@ -368,16 +362,21 @@ var useAudioRecorder = (onStop) => {
|
|
|
368
362
|
}
|
|
369
363
|
}, [onStop]);
|
|
370
364
|
const stop = (0, import_react3.useCallback)(() => {
|
|
371
|
-
|
|
365
|
+
if (isSimulated) {
|
|
366
|
+
setIsRecording(false);
|
|
367
|
+
setIsSimulated(false);
|
|
368
|
+
const simulatedBlob = new Blob(["simulated speech"], { type: "audio/simulated" });
|
|
369
|
+
setBlob(simulatedBlob);
|
|
370
|
+
if (onStop) onStop(simulatedBlob);
|
|
371
|
+
return;
|
|
372
|
+
}
|
|
372
373
|
if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
|
|
373
|
-
console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
|
|
374
374
|
mediaRecorderRef.current.stop();
|
|
375
|
-
} else {
|
|
376
|
-
console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
|
|
377
375
|
}
|
|
378
|
-
}, []);
|
|
376
|
+
}, [isSimulated, onStop]);
|
|
379
377
|
return {
|
|
380
378
|
isRecording,
|
|
379
|
+
isSimulated,
|
|
381
380
|
start,
|
|
382
381
|
stop,
|
|
383
382
|
blob,
|
|
@@ -426,21 +425,22 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
426
425
|
var _a, _b, _c, _d;
|
|
427
426
|
const [internalMessage, setInternalMessage] = (0, import_react5.useState)("");
|
|
428
427
|
const [voiceTrigger, setVoiceTrigger] = (0, import_react5.useState)(null);
|
|
429
|
-
const [inputMode, setInputMode] = (0, import_react5.useState)(defaultInputMode);
|
|
430
428
|
const [isFocused, setIsFocused] = (0, import_react5.useState)(false);
|
|
431
429
|
const textareaRef = (0, import_react5.useRef)(null);
|
|
432
430
|
const measurementRef = (0, import_react5.useRef)(null);
|
|
433
|
-
const
|
|
434
|
-
(0, import_react5.useEffect)(() => {
|
|
435
|
-
var _a2;
|
|
436
|
-
if (inputMode === "voice") {
|
|
437
|
-
(_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
438
|
-
}
|
|
439
|
-
}, [inputMode]);
|
|
431
|
+
const pendingSelectionRef = (0, import_react5.useRef)(null);
|
|
440
432
|
const isControlled = value !== void 0;
|
|
441
433
|
const message = isControlled ? value : internalMessage;
|
|
442
434
|
const messageRef = (0, import_react5.useRef)(message);
|
|
443
435
|
messageRef.current = message;
|
|
436
|
+
(0, import_react5.useLayoutEffect)(() => {
|
|
437
|
+
if (pendingSelectionRef.current && textareaRef.current) {
|
|
438
|
+
const { start, end } = pendingSelectionRef.current;
|
|
439
|
+
textareaRef.current.focus();
|
|
440
|
+
textareaRef.current.setSelectionRange(start, end);
|
|
441
|
+
pendingSelectionRef.current = null;
|
|
442
|
+
}
|
|
443
|
+
}, [message]);
|
|
444
444
|
const onChangeRef = (0, import_react5.useRef)(onChange);
|
|
445
445
|
(0, import_react5.useEffect)(() => {
|
|
446
446
|
onChangeRef.current = onChange;
|
|
@@ -453,7 +453,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
453
453
|
voiceConfigRef.current = voiceConfig;
|
|
454
454
|
}, [voiceConfig]);
|
|
455
455
|
const triggerChange = (0, import_react5.useCallback)((newValue) => {
|
|
456
|
-
console.log("[ChatInputArea] triggerChange called:", { newValue, isControlled, hasOnChange: !!onChangeRef.current, hasTextarea: !!textareaRef.current });
|
|
457
456
|
if (isControlled && onChangeRef.current) {
|
|
458
457
|
const syntheticEvent = {
|
|
459
458
|
target: { value: newValue },
|
|
@@ -465,49 +464,49 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
465
464
|
}
|
|
466
465
|
}, [isControlled]);
|
|
467
466
|
const isInputDisabled = (currentTask == null ? void 0 : currentTask.complete) || (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (((_b = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _b.function) === "form" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted) || ((_c = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _c.function) === "confirm" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted));
|
|
468
|
-
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger
|
|
469
|
-
const
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
e.preventDefault();
|
|
476
|
-
e.stopPropagation();
|
|
477
|
-
if (voiceTrigger === "click") return;
|
|
478
|
-
if (!e.repeat && !voiceTrigger) {
|
|
479
|
-
startRecording("space");
|
|
467
|
+
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger);
|
|
468
|
+
const insertTextAtCursor = (0, import_react5.useCallback)((text) => {
|
|
469
|
+
const textarea = textareaRef.current;
|
|
470
|
+
const currentVal = messageRef.current || "";
|
|
471
|
+
if (!textarea) {
|
|
472
|
+
triggerChange(currentVal + (currentVal ? " " : "") + text);
|
|
473
|
+
return;
|
|
480
474
|
}
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
475
|
+
const start = textarea.selectionStart;
|
|
476
|
+
const end = textarea.selectionEnd;
|
|
477
|
+
const before = currentVal.substring(0, start);
|
|
478
|
+
const after = currentVal.substring(end);
|
|
479
|
+
const prefix = start > 0 && !/\s$/.test(before) ? " " : "";
|
|
480
|
+
const newText = before + prefix + text + after;
|
|
481
|
+
const selectionStart = start + prefix.length;
|
|
482
|
+
const selectionEnd = selectionStart + text.length;
|
|
483
|
+
pendingSelectionRef.current = { start: selectionStart, end: selectionEnd };
|
|
484
|
+
triggerChange(newText);
|
|
485
|
+
}, [triggerChange]);
|
|
486
|
+
const handleVoiceResult = (0, import_react5.useCallback)((text, isFinal) => {
|
|
487
|
+
if (isFinal) {
|
|
488
|
+
insertTextAtCursor(text);
|
|
489
489
|
}
|
|
490
|
-
};
|
|
491
|
-
const handleVoiceResult = (0, import_react5.useCallback)((text) => {
|
|
492
|
-
console.log("[ChatInputArea] nativeSpeech result:", text);
|
|
493
|
-
triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
494
|
-
}, []);
|
|
490
|
+
}, [insertTextAtCursor]);
|
|
495
491
|
const handleVoiceEnd = (0, import_react5.useCallback)(() => {
|
|
496
492
|
var _a2, _b2;
|
|
497
|
-
console.log("[ChatInputArea] nativeSpeech onEnd triggered");
|
|
498
493
|
setVoiceTrigger(null);
|
|
499
494
|
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
500
495
|
}, []);
|
|
501
496
|
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
502
497
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
503
498
|
var _a2, _b2, _c2;
|
|
504
|
-
console.log("[ChatInputArea] customRecorder onStop triggered");
|
|
505
499
|
setVoiceTrigger(null);
|
|
506
500
|
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
501
|
+
if (blob.type === "audio/simulated") {
|
|
502
|
+
console.log("[ChatInputArea] Handling simulated audio capture");
|
|
503
|
+
insertTextAtCursor("This is a simulated transcription for development testing.");
|
|
504
|
+
return;
|
|
505
|
+
}
|
|
507
506
|
if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
|
|
508
507
|
try {
|
|
509
508
|
const text = await voiceConfigRef.current.onAudioCapture(blob);
|
|
510
|
-
if (text)
|
|
509
|
+
if (text) insertTextAtCursor(text);
|
|
511
510
|
} catch (e) {
|
|
512
511
|
console.error("[ChatInputArea] Audio capture failed", e);
|
|
513
512
|
}
|
|
@@ -515,12 +514,8 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
515
514
|
});
|
|
516
515
|
(0, import_react5.useImperativeHandle)(ref, () => ({
|
|
517
516
|
focus: () => {
|
|
518
|
-
var _a2
|
|
519
|
-
|
|
520
|
-
(_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
521
|
-
} else {
|
|
522
|
-
(_b2 = textareaRef.current) == null ? void 0 : _b2.focus();
|
|
523
|
-
}
|
|
517
|
+
var _a2;
|
|
518
|
+
(_a2 = textareaRef.current) == null ? void 0 : _a2.focus();
|
|
524
519
|
},
|
|
525
520
|
setValue: (newValue) => {
|
|
526
521
|
triggerChange(newValue);
|
|
@@ -551,8 +546,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
551
546
|
};
|
|
552
547
|
const startRecording = async (trigger) => {
|
|
553
548
|
var _a2;
|
|
554
|
-
console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
|
|
555
|
-
console.log("[ChatInputArea] voiceConfig:", voiceConfig);
|
|
556
549
|
if (voiceTrigger) return;
|
|
557
550
|
setVoiceTrigger(trigger);
|
|
558
551
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
@@ -562,15 +555,16 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
562
555
|
setVoiceTrigger(null);
|
|
563
556
|
return;
|
|
564
557
|
}
|
|
565
|
-
console.log("[ChatInputArea] Starting nativeSpeech");
|
|
566
558
|
nativeSpeech.start();
|
|
567
559
|
} else {
|
|
568
|
-
console.log("[ChatInputArea] Starting customRecorder");
|
|
569
560
|
await customRecorder.start();
|
|
570
561
|
}
|
|
562
|
+
setTimeout(() => {
|
|
563
|
+
var _a3;
|
|
564
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
565
|
+
}, 0);
|
|
571
566
|
};
|
|
572
567
|
const stopRecording = () => {
|
|
573
|
-
console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
|
|
574
568
|
if (!voiceTrigger) return;
|
|
575
569
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
576
570
|
nativeSpeech.stop();
|
|
@@ -581,9 +575,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
581
575
|
const getPlaceholder = () => {
|
|
582
576
|
if (placeholder) return placeholder;
|
|
583
577
|
if (voiceTrigger) return "Listening...";
|
|
584
|
-
if (currentTask == null ? void 0 : currentTask.complete)
|
|
585
|
-
return "Task completed!";
|
|
586
|
-
}
|
|
578
|
+
if (currentTask == null ? void 0 : currentTask.complete) return "Task completed!";
|
|
587
579
|
if ((lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted)) {
|
|
588
580
|
const interactiveType = lastInteractiveMessage.interactiveData.function;
|
|
589
581
|
switch (interactiveType) {
|
|
@@ -612,85 +604,54 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
612
604
|
{
|
|
613
605
|
type: "button",
|
|
614
606
|
onClick: () => {
|
|
615
|
-
if (
|
|
607
|
+
if (voiceTrigger) {
|
|
616
608
|
stopRecording();
|
|
609
|
+
} else {
|
|
610
|
+
startRecording("click");
|
|
617
611
|
}
|
|
618
|
-
setInputMode((prev) => prev === "text" ? "voice" : "text");
|
|
619
612
|
},
|
|
620
|
-
className:
|
|
621
|
-
title:
|
|
622
|
-
children:
|
|
623
|
-
// Voice Icon (Waveform)
|
|
624
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)("svg", { xmlns: "http://www.w3.org/2000/svg", viewBox: "0 0 24 24", fill: "currentColor", className: "w-5 h-5 text-gray-600", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("path", { d: "M11.25 4.532A.75.75 0 0 1 12 5.25v13.5a.75.75 0 0 1-1.5 0V5.25a.75.75 0 0 1 .75-.718ZM7.5 8.25a.75.75 0 0 1 .75.75v5.25a.75.75 0 0 1-1.5 0V9a.75.75 0 0 1 .75-.75Zm9 0a.75.75 0 0 1 .75.75v5.25a.75.75 0 0 1-1.5 0V9a.75.75 0 0 1 .75-.75ZM3.75 10.5a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-1.5 0v-1.5a.75.75 0 0 1 .75-.75Zm16.5 0a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-1.5 0v-1.5a.75.75 0 0 1 .75-.75Z" }) })
|
|
625
|
-
) : (
|
|
626
|
-
// Keyboard Icon (Filled)
|
|
627
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)("svg", { xmlns: "http://www.w3.org/2000/svg", viewBox: "0 0 24 24", fill: "currentColor", className: "w-5 h-5 text-gray-600", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("path", { fillRule: "evenodd", d: "M3 6a3 3 0 0 1 3-3h12a3 3 0 0 1 3 3v12a3 3 0 0 1-3 3H6a3 3 0 0 1-3-3V6Zm4.5 3a.75.75 0 0 1 .75-.75h1.5a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-.75.75h-1.5a.75.75 0 0 1-.75-.75V9Zm6 0a.75.75 0 0 1 .75-.75h1.5a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-.75.75h-1.5a.75.75 0 0 1-.75-.75V9Zm6 0a.75.75 0 0 1 .75-.75h1.5a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-.75.75h-1.5a.75.75 0 0 1-.75-.75V9Zm-12 4.5a.75.75 0 0 1 .75-.75h1.5a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-.75.75h-1.5a.75.75 0 0 1-.75-.75v-1.5Zm6 0a.75.75 0 0 1 .75-.75h6.75a.75.75 0 0 1 .75.75v1.5a.75.75 0 0 1-.75.75h-6.75a.75.75 0 0 1-.75-.75v-1.5Z", clipRule: "evenodd" }) })
|
|
628
|
-
)
|
|
613
|
+
className: `mb-1 p-2 rounded-full transition-all duration-300 flex-shrink-0 border ${voiceTrigger ? "text-white border-orange-400 bg-orange-500 scale-110 shadow-lg animate-pulse" : "text-gray-500 border-gray-300 bg-white hover:text-gray-700 hover:bg-gray-100"}`,
|
|
614
|
+
title: voiceTrigger ? "Stop Recording" : "Start Voice Input",
|
|
615
|
+
children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.MicrophoneIcon, { className: "w-5 h-5" })
|
|
629
616
|
}
|
|
630
617
|
),
|
|
631
618
|
/* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(
|
|
632
619
|
"div",
|
|
633
620
|
{
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
onKeyDown: handleVoiceKeyDown,
|
|
637
|
-
onKeyUp: handleVoiceKeyUp,
|
|
638
|
-
onFocus: () => setIsFocused(true),
|
|
639
|
-
onBlur: () => setIsFocused(false),
|
|
640
|
-
className: "flex-1 flex items-center border border-gray-300 rounded-lg overflow-hidden outline-none focus-within:ring-2 focus-within:ring-blue-500 focus-within:border-blue-500 bg-white min-h-[42px] mb-1",
|
|
621
|
+
tabIndex: -1,
|
|
622
|
+
className: `flex-1 flex items-center border border-gray-300 rounded-lg overflow-hidden outline-none bg-white min-h-[42px] mb-1 transition-all ${voiceTrigger ? "ring-2 ring-orange-100 border-orange-300" : "focus-within:ring-2 focus-within:ring-blue-500 focus-within:border-blue-500"}`,
|
|
641
623
|
children: [
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
"textarea",
|
|
653
|
-
{
|
|
654
|
-
ref: textareaRef,
|
|
655
|
-
value: message,
|
|
656
|
-
onChange: (e) => {
|
|
657
|
-
if (isControlled && onChange) {
|
|
658
|
-
onChange(e);
|
|
659
|
-
} else {
|
|
660
|
-
setInternalMessage(e.target.value);
|
|
661
|
-
}
|
|
662
|
-
},
|
|
663
|
-
onKeyDown: handleKeyDown,
|
|
664
|
-
placeholder: getPlaceholder(),
|
|
665
|
-
disabled: isInputDisabled || !!voiceTrigger,
|
|
666
|
-
rows: 1,
|
|
667
|
-
className: `flex-grow px-4 py-2 outline-none text-gray-700 placeholder-gray-500 disabled:bg-gray-100 resize-none leading-6 w-full ${isInputDisabled ? "cursor-not-allowed" : ""}`
|
|
668
|
-
}
|
|
669
|
-
)
|
|
670
|
-
] }),
|
|
671
|
-
inputMode === "voice" && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "flex-grow flex flex-col justify-center items-center p-1 relative", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
672
|
-
"button",
|
|
624
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
625
|
+
"span",
|
|
626
|
+
{
|
|
627
|
+
ref: measurementRef,
|
|
628
|
+
className: "absolute invisible whitespace-pre-wrap p-0 m-0 text-gray-700 leading-6",
|
|
629
|
+
style: { fontSize: "1rem" }
|
|
630
|
+
}
|
|
631
|
+
),
|
|
632
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
633
|
+
"textarea",
|
|
673
634
|
{
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
635
|
+
ref: textareaRef,
|
|
636
|
+
value: message,
|
|
637
|
+
onChange: (e) => {
|
|
638
|
+
if (isControlled && onChange) {
|
|
639
|
+
onChange(e);
|
|
640
|
+
} else {
|
|
641
|
+
setInternalMessage(e.target.value);
|
|
680
642
|
}
|
|
681
643
|
},
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
] }) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("span", { children: "Tap to Talk" })
|
|
644
|
+
onKeyDown: handleKeyDown,
|
|
645
|
+
onFocus: () => setIsFocused(true),
|
|
646
|
+
onBlur: () => setIsFocused(false),
|
|
647
|
+
placeholder: getPlaceholder(),
|
|
648
|
+
disabled: isInputDisabled,
|
|
649
|
+
readOnly: !!voiceTrigger,
|
|
650
|
+
rows: 1,
|
|
651
|
+
className: `flex-grow px-4 py-2 outline-none text-gray-700 placeholder-gray-500 resize-none leading-6 w-full ${isInputDisabled ? "bg-gray-100 cursor-not-allowed" : "bg-transparent"} ${voiceTrigger ? "cursor-default" : ""}`
|
|
691
652
|
}
|
|
692
|
-
)
|
|
693
|
-
|
|
653
|
+
),
|
|
654
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsxs)("div", { className: "relative mx-2 flex-shrink-0", children: [
|
|
694
655
|
isSending && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "absolute -inset-1", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(
|
|
695
656
|
"svg",
|
|
696
657
|
{
|
|
@@ -733,24 +694,10 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
733
694
|
handleSubmit();
|
|
734
695
|
}
|
|
735
696
|
},
|
|
736
|
-
disabled: (currentTask == null ? void 0 : currentTask.complete) || isSending && !onStop || isInputDisabled
|
|
697
|
+
disabled: (currentTask == null ? void 0 : currentTask.complete) || isSending && !onStop || isInputDisabled,
|
|
737
698
|
className: `relative z-10 text-white rounded-full p-2 transition-colors duration-200 disabled:bg-gray-400 disabled:cursor-not-allowed ${isSending && onStop ? "bg-red-500 hover:bg-red-600" : "bg-blue-600 hover:bg-blue-700"}`,
|
|
738
699
|
title: isSending && onStop ? "Stop generating" : "Send message",
|
|
739
|
-
children: isSending ? onStop ? /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.StopIcon, { className: "h-5 w-5" }) : (
|
|
740
|
-
// AND we show the overlay spinner outside?
|
|
741
|
-
// Actually `ChatInput.tsx` lines 117-140 are `isLoading && (...)`. It is always shown when loading.
|
|
742
|
-
// So we have a spinner ring AROUND the button (absolute -inset-1).
|
|
743
|
-
// AND potentially a spinner INSIDE the button if no onStop?
|
|
744
|
-
// In my case, I will stick to:
|
|
745
|
-
// If onStop: Show StopIcon. Button is Red.
|
|
746
|
-
// If !onStop: Show Spinner inside? Or just let the outer ring do the work?
|
|
747
|
-
// Legacy `Spinner` component usage inside button suggests double spinner if we are not careful.
|
|
748
|
-
// But usually `onStop` is provided for streaming.
|
|
749
|
-
// If I look at the screenshot, it shows a RED button (with stop icon) and a BLUE ring around it.
|
|
750
|
-
// That matches: Red button (bg-red-500) + Blue Spinner Ring (text-blue-500).
|
|
751
|
-
// So I will replicate that structure.
|
|
752
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.StopIcon, { className: "h-5 w-5" })
|
|
753
|
-
) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.PaperAirplaneIcon, { className: "h-5 w-5" })
|
|
700
|
+
children: isSending ? onStop ? /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.StopIcon, { className: "h-5 w-5" }) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "w-5 h-5" }) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.PaperAirplaneIcon, { className: "h-5 w-5" })
|
|
754
701
|
}
|
|
755
702
|
)
|
|
756
703
|
] })
|
|
@@ -759,18 +706,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
759
706
|
)
|
|
760
707
|
] }),
|
|
761
708
|
inputHint && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "text-sm text-red-500 bg-red-50 py-1 px-4 rounded-lg mt-1", children: inputHint }),
|
|
762
|
-
|
|
763
|
-
inputMode === "voice" && !voiceTrigger && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
764
|
-
"p",
|
|
765
|
-
{
|
|
766
|
-
className: "text-[10px] text-gray-400 font-medium ml-12 text-center -mt-1 mb-1 cursor-pointer hover:text-gray-600 transition-colors",
|
|
767
|
-
onClick: () => {
|
|
768
|
-
var _a2;
|
|
769
|
-
return (_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
770
|
-
},
|
|
771
|
-
children: isFocused ? "Click to talk or hold space to talk" : "Tap to talk or click here to focus and push space to talk"
|
|
772
|
-
}
|
|
773
|
-
)
|
|
709
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "mb-2 mt-0.5 min-h-[0.75rem]", style: { marginLeft: "48px" }, children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("p", { className: `text-[10px] leading-tight transition-colors duration-200 ${voiceTrigger ? "text-orange-600 font-medium" : "text-gray-400"}`, children: voiceTrigger ? "Listening... tap mic icon again to stop" : hintText || (voiceConfig ? "Type in text or tap mic icon to talk" : "Type your message...") }) })
|
|
774
710
|
] });
|
|
775
711
|
});
|
|
776
712
|
ChatInputArea.displayName = "ChatInputArea";
|