@contentgrowth/llm-service 0.8.4 → 0.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ui/react/components/index.cjs +263 -135
- package/dist/ui/react/components/index.cjs.map +1 -1
- package/dist/ui/react/components/index.d.cts +1 -0
- package/dist/ui/react/components/index.d.ts +1 -0
- package/dist/ui/react/components/index.js +264 -136
- package/dist/ui/react/components/index.js.map +1 -1
- package/package.json +1 -1
|
@@ -200,6 +200,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
200
200
|
const [error, setError] = (0, import_react2.useState)(null);
|
|
201
201
|
const [isSupported, setIsSupported] = (0, import_react2.useState)(false);
|
|
202
202
|
const recognitionRef = (0, import_react2.useRef)(null);
|
|
203
|
+
const isSimulatingRef = (0, import_react2.useRef)(false);
|
|
204
|
+
const simulationTimeoutRef = (0, import_react2.useRef)(null);
|
|
203
205
|
(0, import_react2.useEffect)(() => {
|
|
204
206
|
if (typeof window !== "undefined") {
|
|
205
207
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
@@ -210,10 +212,16 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
210
212
|
recognition.interimResults = true;
|
|
211
213
|
recognition.lang = language;
|
|
212
214
|
recognition.onstart = () => {
|
|
215
|
+
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
213
216
|
setIsListening(true);
|
|
214
217
|
setError(null);
|
|
215
218
|
};
|
|
216
219
|
recognition.onend = () => {
|
|
220
|
+
console.log("[useSpeechRecognition] Native onend event fired");
|
|
221
|
+
if (isSimulatingRef.current) {
|
|
222
|
+
console.log("[useSpeechRecognition] Ignoring onend due to simulation");
|
|
223
|
+
return;
|
|
224
|
+
}
|
|
217
225
|
setIsListening(false);
|
|
218
226
|
if (onEnd) onEnd();
|
|
219
227
|
};
|
|
@@ -233,17 +241,21 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
233
241
|
setTranscript((prev) => prev + finalTranscript);
|
|
234
242
|
};
|
|
235
243
|
recognition.onerror = (event) => {
|
|
244
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
236
245
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
237
246
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
247
|
+
isSimulatingRef.current = true;
|
|
238
248
|
setError(null);
|
|
239
249
|
setIsListening(true);
|
|
240
|
-
setTimeout(() => {
|
|
250
|
+
simulationTimeoutRef.current = setTimeout(() => {
|
|
241
251
|
const mockText = "This is a simulated voice input for testing.";
|
|
242
252
|
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
243
253
|
if (onResult) onResult(mockText, true);
|
|
254
|
+
isSimulatingRef.current = false;
|
|
244
255
|
setIsListening(false);
|
|
245
256
|
if (onEnd) onEnd();
|
|
246
|
-
|
|
257
|
+
simulationTimeoutRef.current = null;
|
|
258
|
+
}, 3e3);
|
|
247
259
|
return;
|
|
248
260
|
}
|
|
249
261
|
console.error("Speech recognition error", event.error);
|
|
@@ -254,26 +266,50 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
254
266
|
}
|
|
255
267
|
}
|
|
256
268
|
return () => {
|
|
257
|
-
if (
|
|
258
|
-
|
|
269
|
+
if (isSimulatingRef.current && simulationTimeoutRef.current) {
|
|
270
|
+
clearTimeout(simulationTimeoutRef.current);
|
|
271
|
+
simulationTimeoutRef.current = null;
|
|
259
272
|
}
|
|
273
|
+
recognitionRef.current.stop();
|
|
260
274
|
};
|
|
261
|
-
}, [onResult, onEnd]);
|
|
275
|
+
}, [onResult, onEnd, language]);
|
|
262
276
|
const start = (0, import_react2.useCallback)(() => {
|
|
277
|
+
console.log("[useSpeechRecognition] start() called");
|
|
263
278
|
if (recognitionRef.current && !isListening) {
|
|
264
279
|
try {
|
|
265
280
|
setTranscript("");
|
|
266
281
|
recognitionRef.current.start();
|
|
282
|
+
console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
|
|
267
283
|
} catch (e) {
|
|
268
|
-
console.error("Failed to start speech recognition:", e);
|
|
284
|
+
console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
|
|
269
285
|
}
|
|
286
|
+
} else {
|
|
287
|
+
console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
|
|
270
288
|
}
|
|
271
289
|
}, [isListening]);
|
|
272
290
|
const stop = (0, import_react2.useCallback)(() => {
|
|
291
|
+
console.log("[useSpeechRecognition] stop() called");
|
|
292
|
+
if (isSimulatingRef.current) {
|
|
293
|
+
console.log("[useSpeechRecognition] Stopping simulation");
|
|
294
|
+
if (simulationTimeoutRef.current) {
|
|
295
|
+
clearTimeout(simulationTimeoutRef.current);
|
|
296
|
+
simulationTimeoutRef.current = null;
|
|
297
|
+
}
|
|
298
|
+
const mockText = "This is a simulated voice input for testing.";
|
|
299
|
+
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
300
|
+
if (onResult) onResult(mockText, true);
|
|
301
|
+
isSimulatingRef.current = false;
|
|
302
|
+
setIsListening(false);
|
|
303
|
+
if (onEnd) onEnd();
|
|
304
|
+
return;
|
|
305
|
+
}
|
|
273
306
|
if (recognitionRef.current && isListening) {
|
|
307
|
+
console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
|
|
274
308
|
recognitionRef.current.stop();
|
|
309
|
+
} else {
|
|
310
|
+
console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
|
|
275
311
|
}
|
|
276
|
-
}, [isListening]);
|
|
312
|
+
}, [isListening, onResult, onEnd]);
|
|
277
313
|
const resetTranscript = (0, import_react2.useCallback)(() => {
|
|
278
314
|
setTranscript("");
|
|
279
315
|
}, []);
|
|
@@ -297,23 +333,32 @@ var useAudioRecorder = (onStop) => {
|
|
|
297
333
|
const mediaRecorderRef = (0, import_react3.useRef)(null);
|
|
298
334
|
const chunksRef = (0, import_react3.useRef)([]);
|
|
299
335
|
const start = (0, import_react3.useCallback)(async () => {
|
|
336
|
+
console.log("[useAudioRecorder] start() called");
|
|
300
337
|
try {
|
|
301
338
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
339
|
+
console.log("[useAudioRecorder] Stream acquired", stream.id);
|
|
302
340
|
const mediaRecorder = new MediaRecorder(stream);
|
|
303
341
|
mediaRecorderRef.current = mediaRecorder;
|
|
304
342
|
chunksRef.current = [];
|
|
305
343
|
mediaRecorder.ondataavailable = (e) => {
|
|
344
|
+
console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
|
|
306
345
|
if (e.data.size > 0) {
|
|
307
346
|
chunksRef.current.push(e.data);
|
|
308
347
|
}
|
|
309
348
|
};
|
|
310
349
|
mediaRecorder.onstop = () => {
|
|
350
|
+
console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
|
|
311
351
|
const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
|
|
352
|
+
console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
|
|
312
353
|
setBlob(audioBlob);
|
|
313
354
|
setIsRecording(false);
|
|
314
355
|
if (onStop) onStop(audioBlob);
|
|
315
|
-
stream.getTracks().forEach((track) =>
|
|
356
|
+
stream.getTracks().forEach((track) => {
|
|
357
|
+
console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
|
|
358
|
+
track.stop();
|
|
359
|
+
});
|
|
316
360
|
};
|
|
361
|
+
console.log("[useAudioRecorder] Starting MediaRecorder...");
|
|
317
362
|
mediaRecorder.start();
|
|
318
363
|
setIsRecording(true);
|
|
319
364
|
setError(null);
|
|
@@ -323,8 +368,12 @@ var useAudioRecorder = (onStop) => {
|
|
|
323
368
|
}
|
|
324
369
|
}, [onStop]);
|
|
325
370
|
const stop = (0, import_react3.useCallback)(() => {
|
|
371
|
+
console.log("[useAudioRecorder] stop() called");
|
|
326
372
|
if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
|
|
373
|
+
console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
|
|
327
374
|
mediaRecorderRef.current.stop();
|
|
375
|
+
} else {
|
|
376
|
+
console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
|
|
328
377
|
}
|
|
329
378
|
}, []);
|
|
330
379
|
return {
|
|
@@ -371,16 +420,27 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
371
420
|
hintText,
|
|
372
421
|
placeholder,
|
|
373
422
|
value,
|
|
374
|
-
onChange
|
|
423
|
+
onChange,
|
|
424
|
+
defaultInputMode = "text"
|
|
375
425
|
}, ref) => {
|
|
376
426
|
var _a, _b, _c, _d;
|
|
377
427
|
const [internalMessage, setInternalMessage] = (0, import_react5.useState)("");
|
|
378
|
-
const [
|
|
379
|
-
const [inputMode, setInputMode] = (0, import_react5.useState)(
|
|
428
|
+
const [voiceTrigger, setVoiceTrigger] = (0, import_react5.useState)(null);
|
|
429
|
+
const [inputMode, setInputMode] = (0, import_react5.useState)(defaultInputMode);
|
|
430
|
+
const [isFocused, setIsFocused] = (0, import_react5.useState)(false);
|
|
380
431
|
const textareaRef = (0, import_react5.useRef)(null);
|
|
381
432
|
const measurementRef = (0, import_react5.useRef)(null);
|
|
433
|
+
const voiceContainerRef = (0, import_react5.useRef)(null);
|
|
434
|
+
(0, import_react5.useEffect)(() => {
|
|
435
|
+
var _a2;
|
|
436
|
+
if (inputMode === "voice") {
|
|
437
|
+
(_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
438
|
+
}
|
|
439
|
+
}, [inputMode]);
|
|
382
440
|
const isControlled = value !== void 0;
|
|
383
441
|
const message = isControlled ? value : internalMessage;
|
|
442
|
+
const messageRef = (0, import_react5.useRef)(message);
|
|
443
|
+
messageRef.current = message;
|
|
384
444
|
const { voice: globalVoice } = useChatConfig();
|
|
385
445
|
const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
|
|
386
446
|
const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
|
|
@@ -396,31 +456,62 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
396
456
|
}
|
|
397
457
|
};
|
|
398
458
|
const isInputDisabled = (currentTask == null ? void 0 : currentTask.complete) || (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (((_b = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _b.function) === "form" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted) || ((_c = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _c.function) === "confirm" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted));
|
|
399
|
-
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled ||
|
|
400
|
-
const
|
|
401
|
-
|
|
402
|
-
|
|
459
|
+
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger || inputMode === "voice");
|
|
460
|
+
const handleVoiceKeyDown = (e) => {
|
|
461
|
+
if (inputMode !== "voice" || isInputDisabled) return;
|
|
462
|
+
if (e.code !== "Space") return;
|
|
463
|
+
const activeElement = document.activeElement;
|
|
464
|
+
const isInputActive = activeElement && (activeElement.tagName === "INPUT" || activeElement.tagName === "TEXTAREA" || activeElement instanceof HTMLElement && activeElement.isContentEditable);
|
|
465
|
+
if (isInputActive) return;
|
|
466
|
+
e.preventDefault();
|
|
467
|
+
e.stopPropagation();
|
|
468
|
+
if (voiceTrigger === "click") return;
|
|
469
|
+
if (!e.repeat && !voiceTrigger) {
|
|
470
|
+
startRecording("space");
|
|
471
|
+
}
|
|
472
|
+
};
|
|
473
|
+
const handleVoiceKeyUp = (e) => {
|
|
474
|
+
if (inputMode !== "voice" || isInputDisabled) return;
|
|
475
|
+
if (e.code === "Space") {
|
|
476
|
+
if (voiceTrigger === "space") {
|
|
477
|
+
e.preventDefault();
|
|
478
|
+
stopRecording();
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
};
|
|
482
|
+
const handleVoiceResult = (0, import_react5.useCallback)((text) => {
|
|
483
|
+
console.log("[ChatInputArea] nativeSpeech result:", text);
|
|
484
|
+
triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
485
|
+
}, []);
|
|
486
|
+
const handleVoiceEnd = (0, import_react5.useCallback)(() => {
|
|
403
487
|
var _a2;
|
|
404
|
-
|
|
488
|
+
console.log("[ChatInputArea] nativeSpeech onEnd triggered");
|
|
489
|
+
setVoiceTrigger(null);
|
|
405
490
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
406
|
-
}, voiceConfig
|
|
491
|
+
}, [voiceConfig]);
|
|
492
|
+
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
407
493
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
408
494
|
var _a2;
|
|
409
|
-
|
|
495
|
+
console.log("[ChatInputArea] customRecorder onStop triggered");
|
|
496
|
+
setVoiceTrigger(null);
|
|
410
497
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
411
498
|
if (voiceConfig == null ? void 0 : voiceConfig.onAudioCapture) {
|
|
412
499
|
try {
|
|
413
500
|
const text = await voiceConfig.onAudioCapture(blob);
|
|
414
|
-
if (text) triggerChange(
|
|
501
|
+
if (text) triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
415
502
|
} catch (e) {
|
|
416
|
-
console.error("Audio capture failed", e);
|
|
503
|
+
console.error("[ChatInputArea] Audio capture failed", e);
|
|
417
504
|
}
|
|
418
505
|
}
|
|
419
506
|
});
|
|
420
507
|
(0, import_react5.useImperativeHandle)(ref, () => ({
|
|
421
508
|
focus: () => {
|
|
422
|
-
var _a2;
|
|
423
|
-
(
|
|
509
|
+
var _a2, _b2;
|
|
510
|
+
if (inputMode === "voice") {
|
|
511
|
+
(_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
512
|
+
} else {
|
|
513
|
+
(_b2 = textareaRef.current) == null ? void 0 : _b2.focus();
|
|
514
|
+
}
|
|
424
515
|
},
|
|
425
516
|
setValue: (newValue) => {
|
|
426
517
|
triggerChange(newValue);
|
|
@@ -449,24 +540,29 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
449
540
|
handleSubmit();
|
|
450
541
|
}
|
|
451
542
|
};
|
|
452
|
-
const startRecording = async () => {
|
|
543
|
+
const startRecording = async (trigger) => {
|
|
453
544
|
var _a2;
|
|
454
|
-
|
|
455
|
-
|
|
545
|
+
console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
|
|
546
|
+
console.log("[ChatInputArea] voiceConfig:", voiceConfig);
|
|
547
|
+
if (voiceTrigger) return;
|
|
548
|
+
setVoiceTrigger(trigger);
|
|
456
549
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
457
550
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
458
551
|
if (!nativeSpeech.isSupported) {
|
|
459
552
|
alert("Speech recognition is not supported in this browser.");
|
|
460
|
-
|
|
553
|
+
setVoiceTrigger(null);
|
|
461
554
|
return;
|
|
462
555
|
}
|
|
556
|
+
console.log("[ChatInputArea] Starting nativeSpeech");
|
|
463
557
|
nativeSpeech.start();
|
|
464
558
|
} else {
|
|
559
|
+
console.log("[ChatInputArea] Starting customRecorder");
|
|
465
560
|
await customRecorder.start();
|
|
466
561
|
}
|
|
467
562
|
};
|
|
468
563
|
const stopRecording = () => {
|
|
469
|
-
|
|
564
|
+
console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
|
|
565
|
+
if (!voiceTrigger) return;
|
|
470
566
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
471
567
|
nativeSpeech.stop();
|
|
472
568
|
} else {
|
|
@@ -475,7 +571,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
475
571
|
};
|
|
476
572
|
const getPlaceholder = () => {
|
|
477
573
|
if (placeholder) return placeholder;
|
|
478
|
-
if (
|
|
574
|
+
if (voiceTrigger) return "Listening...";
|
|
479
575
|
if (currentTask == null ? void 0 : currentTask.complete) {
|
|
480
576
|
return "Task completed!";
|
|
481
577
|
}
|
|
@@ -507,7 +603,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
507
603
|
{
|
|
508
604
|
type: "button",
|
|
509
605
|
onClick: () => {
|
|
510
|
-
if (inputMode === "voice" &&
|
|
606
|
+
if (inputMode === "voice" && voiceTrigger) {
|
|
511
607
|
stopRecording();
|
|
512
608
|
}
|
|
513
609
|
setInputMode((prev) => prev === "text" ? "voice" : "text");
|
|
@@ -523,117 +619,149 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
523
619
|
)
|
|
524
620
|
}
|
|
525
621
|
),
|
|
526
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
"
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
} else {
|
|
545
|
-
setInternalMessage(e.target.value);
|
|
622
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(
|
|
623
|
+
"div",
|
|
624
|
+
{
|
|
625
|
+
ref: voiceContainerRef,
|
|
626
|
+
tabIndex: inputMode === "voice" ? 0 : -1,
|
|
627
|
+
onKeyDown: handleVoiceKeyDown,
|
|
628
|
+
onKeyUp: handleVoiceKeyUp,
|
|
629
|
+
onFocus: () => setIsFocused(true),
|
|
630
|
+
onBlur: () => setIsFocused(false),
|
|
631
|
+
className: "flex-1 flex items-center border border-gray-300 rounded-lg overflow-hidden outline-none focus-within:ring-2 focus-within:ring-blue-500 focus-within:border-blue-500 bg-white min-h-[42px] mb-1",
|
|
632
|
+
children: [
|
|
633
|
+
inputMode === "text" && /* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(import_jsx_runtime5.Fragment, { children: [
|
|
634
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
635
|
+
"span",
|
|
636
|
+
{
|
|
637
|
+
ref: measurementRef,
|
|
638
|
+
className: "absolute invisible whitespace-pre-wrap p-0 m-0 text-gray-700 leading-6",
|
|
639
|
+
style: { fontSize: "1rem" }
|
|
546
640
|
}
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
children: [
|
|
578
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
579
|
-
"circle",
|
|
580
|
-
{
|
|
581
|
-
className: "opacity-25",
|
|
582
|
-
cx: "12",
|
|
583
|
-
cy: "12",
|
|
584
|
-
r: "10",
|
|
585
|
-
stroke: "currentColor",
|
|
586
|
-
strokeWidth: "4"
|
|
587
|
-
}
|
|
588
|
-
),
|
|
589
|
-
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
590
|
-
"path",
|
|
591
|
-
{
|
|
592
|
-
className: "opacity-75",
|
|
593
|
-
fill: "currentColor",
|
|
594
|
-
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"
|
|
641
|
+
),
|
|
642
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
643
|
+
"textarea",
|
|
644
|
+
{
|
|
645
|
+
ref: textareaRef,
|
|
646
|
+
value: message,
|
|
647
|
+
onChange: (e) => {
|
|
648
|
+
if (isControlled && onChange) {
|
|
649
|
+
onChange(e);
|
|
650
|
+
} else {
|
|
651
|
+
setInternalMessage(e.target.value);
|
|
652
|
+
}
|
|
653
|
+
},
|
|
654
|
+
onKeyDown: handleKeyDown,
|
|
655
|
+
placeholder: getPlaceholder(),
|
|
656
|
+
disabled: isInputDisabled || !!voiceTrigger,
|
|
657
|
+
rows: 1,
|
|
658
|
+
className: `flex-grow px-4 py-2 outline-none text-gray-700 placeholder-gray-500 disabled:bg-gray-100 resize-none leading-6 w-full ${isInputDisabled ? "cursor-not-allowed" : ""}`
|
|
659
|
+
}
|
|
660
|
+
)
|
|
661
|
+
] }),
|
|
662
|
+
inputMode === "voice" && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "flex-grow flex flex-col justify-center items-center p-1 relative", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
663
|
+
"button",
|
|
664
|
+
{
|
|
665
|
+
type: "button",
|
|
666
|
+
onClick: () => {
|
|
667
|
+
if (voiceTrigger === "click") {
|
|
668
|
+
stopRecording();
|
|
669
|
+
} else if (!voiceTrigger) {
|
|
670
|
+
startRecording("click");
|
|
595
671
|
}
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
672
|
+
},
|
|
673
|
+
disabled: isInputDisabled || voiceTrigger === "space",
|
|
674
|
+
className: `w-full py-2 text-center font-medium rounded-md transition-all select-none flex items-center justify-center gap-2 ${voiceTrigger ? "bg-red-50 text-red-600 animate-pulse border border-red-200" : "bg-gray-50 text-gray-700 hover:bg-gray-100"} ${voiceTrigger === "space" ? "opacity-90 cursor-default" : ""}`,
|
|
675
|
+
children: voiceTrigger ? /* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(import_jsx_runtime5.Fragment, { children: [
|
|
676
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "w-2 h-2 rounded-full bg-red-500 animate-ping mr-2" }),
|
|
677
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsxs)("span", { children: [
|
|
678
|
+
"Listening... ",
|
|
679
|
+
voiceTrigger === "space" ? "(Release Space to send)" : "Tap to send"
|
|
680
|
+
] })
|
|
681
|
+
] }) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("span", { children: "Tap to Talk" })
|
|
682
|
+
}
|
|
683
|
+
) }),
|
|
684
|
+
(inputMode === "text" || isSending) && /* @__PURE__ */ (0, import_jsx_runtime5.jsxs)("div", { className: "relative mx-2 flex-shrink-0", children: [
|
|
685
|
+
isSending && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "absolute -inset-1", children: /* @__PURE__ */ (0, import_jsx_runtime5.jsxs)(
|
|
686
|
+
"svg",
|
|
687
|
+
{
|
|
688
|
+
className: "animate-spin h-full w-full text-blue-500 opacity-75",
|
|
689
|
+
xmlns: "http://www.w3.org/2000/svg",
|
|
690
|
+
fill: "none",
|
|
691
|
+
viewBox: "0 0 24 24",
|
|
692
|
+
children: [
|
|
693
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
694
|
+
"circle",
|
|
695
|
+
{
|
|
696
|
+
className: "opacity-25",
|
|
697
|
+
cx: "12",
|
|
698
|
+
cy: "12",
|
|
699
|
+
r: "10",
|
|
700
|
+
stroke: "currentColor",
|
|
701
|
+
strokeWidth: "4"
|
|
702
|
+
}
|
|
703
|
+
),
|
|
704
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
705
|
+
"path",
|
|
706
|
+
{
|
|
707
|
+
className: "opacity-75",
|
|
708
|
+
fill: "currentColor",
|
|
709
|
+
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"
|
|
710
|
+
}
|
|
711
|
+
)
|
|
712
|
+
]
|
|
610
713
|
}
|
|
611
|
-
},
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
714
|
+
) }),
|
|
715
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
716
|
+
"button",
|
|
717
|
+
{
|
|
718
|
+
type: "button",
|
|
719
|
+
onClick: (e) => {
|
|
720
|
+
if (isSending && onStop) {
|
|
721
|
+
e.preventDefault();
|
|
722
|
+
onStop();
|
|
723
|
+
} else {
|
|
724
|
+
handleSubmit();
|
|
725
|
+
}
|
|
726
|
+
},
|
|
727
|
+
disabled: (currentTask == null ? void 0 : currentTask.complete) || isSending && !onStop || isInputDisabled || !!voiceTrigger,
|
|
728
|
+
className: `relative z-10 text-white rounded-full p-2 transition-colors duration-200 disabled:bg-gray-400 disabled:cursor-not-allowed ${isSending && onStop ? "bg-red-500 hover:bg-red-600" : "bg-blue-600 hover:bg-blue-700"}`,
|
|
729
|
+
title: isSending && onStop ? "Stop generating" : "Send message",
|
|
730
|
+
children: isSending ? onStop ? /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.StopIcon, { className: "h-5 w-5" }) : (
|
|
731
|
+
// AND we show the overlay spinner outside?
|
|
732
|
+
// Actually `ChatInput.tsx` lines 117-140 are `isLoading && (...)`. It is always shown when loading.
|
|
733
|
+
// So we have a spinner ring AROUND the button (absolute -inset-1).
|
|
734
|
+
// AND potentially a spinner INSIDE the button if no onStop?
|
|
735
|
+
// In my case, I will stick to:
|
|
736
|
+
// If onStop: Show StopIcon. Button is Red.
|
|
737
|
+
// If !onStop: Show Spinner inside? Or just let the outer ring do the work?
|
|
738
|
+
// Legacy `Spinner` component usage inside button suggests double spinner if we are not careful.
|
|
739
|
+
// But usually `onStop` is provided for streaming.
|
|
740
|
+
// If I look at the screenshot, it shows a RED button (with stop icon) and a BLUE ring around it.
|
|
741
|
+
// That matches: Red button (bg-red-500) + Blue Spinner Ring (text-blue-500).
|
|
742
|
+
// So I will replicate that structure.
|
|
743
|
+
/* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.StopIcon, { className: "h-5 w-5" })
|
|
744
|
+
) : /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(import_outline.PaperAirplaneIcon, { className: "h-5 w-5" })
|
|
745
|
+
}
|
|
746
|
+
)
|
|
747
|
+
] })
|
|
748
|
+
]
|
|
749
|
+
}
|
|
750
|
+
)
|
|
634
751
|
] }),
|
|
635
752
|
inputHint && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("div", { className: "text-sm text-red-500 bg-red-50 py-1 px-4 rounded-lg mt-1", children: inputHint }),
|
|
636
|
-
hintText && inputMode === "text" && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("p", { className: "text-xs text-gray-500 ml-12 mb-2 mt-1", children: hintText })
|
|
753
|
+
hintText && inputMode === "text" && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)("p", { className: "text-xs text-gray-500 ml-12 mb-2 mt-1", children: hintText }),
|
|
754
|
+
inputMode === "voice" && !voiceTrigger && /* @__PURE__ */ (0, import_jsx_runtime5.jsx)(
|
|
755
|
+
"p",
|
|
756
|
+
{
|
|
757
|
+
className: "text-[10px] text-gray-400 font-medium ml-12 text-center -mt-1 mb-1 cursor-pointer hover:text-gray-600 transition-colors",
|
|
758
|
+
onClick: () => {
|
|
759
|
+
var _a2;
|
|
760
|
+
return (_a2 = voiceContainerRef.current) == null ? void 0 : _a2.focus();
|
|
761
|
+
},
|
|
762
|
+
children: isFocused ? "Click to talk or hold space to talk" : "Tap to talk or click here to focus and push space to talk"
|
|
763
|
+
}
|
|
764
|
+
)
|
|
637
765
|
] });
|
|
638
766
|
});
|
|
639
767
|
ChatInputArea.displayName = "ChatInputArea";
|