@contentgrowth/llm-service 0.9.9 → 0.9.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -211,45 +211,62 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
211
211
|
}, [onResult, onEnd]);
|
|
212
212
|
const isStartingRef = (0, import_react2.useRef)(false);
|
|
213
213
|
(0, import_react2.useEffect)(() => {
|
|
214
|
+
const isMobile = typeof window !== "undefined" && (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0);
|
|
214
215
|
if (typeof window !== "undefined") {
|
|
215
216
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
217
|
+
console.log("[useSpeechRecognition] Init - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile);
|
|
216
218
|
if (SpeechRecognition) {
|
|
217
219
|
setIsSupported(true);
|
|
218
220
|
const recognition = new SpeechRecognition();
|
|
219
221
|
recognition.continuous = true;
|
|
220
222
|
recognition.interimResults = true;
|
|
223
|
+
console.log("[useSpeechRecognition] Created recognition instance. continuous:", recognition.continuous, "interimResults:", recognition.interimResults);
|
|
221
224
|
recognition.onstart = () => {
|
|
222
|
-
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
225
|
+
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
223
226
|
isStartingRef.current = false;
|
|
224
227
|
setIsListening(true);
|
|
225
228
|
setError(null);
|
|
226
229
|
};
|
|
227
230
|
recognition.onend = () => {
|
|
228
|
-
console.log("[useSpeechRecognition] Native onend event fired");
|
|
231
|
+
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
229
232
|
isStartingRef.current = false;
|
|
230
233
|
if (isSimulatingRef.current) {
|
|
234
|
+
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
231
235
|
return;
|
|
232
236
|
}
|
|
233
237
|
setIsListening(false);
|
|
234
238
|
if (onEndRef.current) onEndRef.current();
|
|
235
239
|
};
|
|
236
240
|
recognition.onresult = (event) => {
|
|
241
|
+
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
237
242
|
let interimTranscript = "";
|
|
238
243
|
let finalTranscript = "";
|
|
239
244
|
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
240
245
|
const result = event.results[i];
|
|
241
246
|
if (result.isFinal) {
|
|
242
247
|
finalTranscript += result[0].transcript;
|
|
248
|
+
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
243
249
|
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
244
250
|
} else {
|
|
245
251
|
interimTranscript += result[0].transcript;
|
|
252
|
+
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
246
253
|
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
247
254
|
}
|
|
248
255
|
}
|
|
249
256
|
setTranscript((prev) => prev + finalTranscript);
|
|
250
257
|
};
|
|
251
258
|
recognition.onerror = (event) => {
|
|
252
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
259
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
260
|
+
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
261
|
+
if (event.error === "aborted") {
|
|
262
|
+
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
263
|
+
} else if (event.error === "not-allowed") {
|
|
264
|
+
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
265
|
+
} else if (event.error === "no-speech") {
|
|
266
|
+
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
267
|
+
} else if (event.error === "network") {
|
|
268
|
+
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
269
|
+
}
|
|
253
270
|
isStartingRef.current = false;
|
|
254
271
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
255
272
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
@@ -292,8 +309,17 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
292
309
|
}
|
|
293
310
|
}, [language]);
|
|
294
311
|
const start = (0, import_react2.useCallback)(() => {
|
|
295
|
-
|
|
296
|
-
|
|
312
|
+
var _a;
|
|
313
|
+
const startTimestamp = Date.now();
|
|
314
|
+
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
315
|
+
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasInstance:", !!recognitionRef.current);
|
|
316
|
+
if (typeof document !== "undefined") {
|
|
317
|
+
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
318
|
+
}
|
|
319
|
+
if (isSimulatingRef.current) {
|
|
320
|
+
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
297
323
|
if (!recognitionRef.current) {
|
|
298
324
|
console.error("[useSpeechRecognition] Recognition instance missing");
|
|
299
325
|
return;
|
|
@@ -303,7 +329,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
303
329
|
return;
|
|
304
330
|
}
|
|
305
331
|
if (recognitionRef.current.isListening) {
|
|
306
|
-
console.warn("[useSpeechRecognition] Already listening - ignoring");
|
|
332
|
+
console.warn("[useSpeechRecognition] Already listening (native prop) - ignoring");
|
|
307
333
|
}
|
|
308
334
|
if (isListening) {
|
|
309
335
|
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
@@ -312,11 +338,15 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
312
338
|
try {
|
|
313
339
|
setTranscript("");
|
|
314
340
|
isStartingRef.current = true;
|
|
341
|
+
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
315
342
|
recognitionRef.current.start();
|
|
316
|
-
console.log("[useSpeechRecognition] recognition.start() executed");
|
|
343
|
+
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
317
344
|
} catch (error2) {
|
|
318
345
|
isStartingRef.current = false;
|
|
319
|
-
console.error("[useSpeechRecognition] Failed to start recognition:", error2);
|
|
346
|
+
console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
|
|
347
|
+
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
348
|
+
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
349
|
+
}
|
|
320
350
|
}
|
|
321
351
|
}, [isListening]);
|
|
322
352
|
const stop = (0, import_react2.useCallback)(() => {
|
|
@@ -640,26 +670,47 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
640
670
|
handleSubmit();
|
|
641
671
|
}
|
|
642
672
|
};
|
|
673
|
+
const isMobile = (0, import_react5.useCallback)(() => {
|
|
674
|
+
if (typeof window === "undefined") return false;
|
|
675
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
676
|
+
}, []);
|
|
643
677
|
const startRecording = async (trigger) => {
|
|
644
678
|
var _a2;
|
|
645
|
-
|
|
679
|
+
console.log("[ChatInputArea] startRecording called. trigger:", trigger, "isMobile:", isMobile());
|
|
680
|
+
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
681
|
+
if (voiceTrigger || isTranscribing) {
|
|
682
|
+
console.log("[ChatInputArea] startRecording ignored - already active");
|
|
683
|
+
return;
|
|
684
|
+
}
|
|
646
685
|
setVoiceTrigger(trigger);
|
|
647
686
|
setVoiceError(null);
|
|
687
|
+
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
648
688
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
649
689
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
690
|
+
console.log("[ChatInputArea] Using native speech recognition");
|
|
650
691
|
if (!nativeSpeech.isSupported) {
|
|
692
|
+
console.error("[ChatInputArea] Native speech not supported");
|
|
651
693
|
alert("Speech recognition is not supported in this browser.");
|
|
652
694
|
setVoiceTrigger(null);
|
|
653
695
|
return;
|
|
654
696
|
}
|
|
697
|
+
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
655
698
|
nativeSpeech.start();
|
|
699
|
+
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
656
700
|
} else {
|
|
701
|
+
console.log("[ChatInputArea] Using custom recorder");
|
|
657
702
|
await customRecorder.start();
|
|
703
|
+
console.log("[ChatInputArea] Custom recorder started");
|
|
704
|
+
}
|
|
705
|
+
if (!isMobile()) {
|
|
706
|
+
console.log("[ChatInputArea] Re-focusing textarea (desktop only)");
|
|
707
|
+
setTimeout(() => {
|
|
708
|
+
var _a3;
|
|
709
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
710
|
+
}, 0);
|
|
711
|
+
} else {
|
|
712
|
+
console.log("[ChatInputArea] SKIPPING textarea focus on mobile to prevent keyboard conflict");
|
|
658
713
|
}
|
|
659
|
-
setTimeout(() => {
|
|
660
|
-
var _a3;
|
|
661
|
-
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
662
|
-
}, 0);
|
|
663
714
|
};
|
|
664
715
|
const stopRecording = () => {
|
|
665
716
|
if (!voiceTrigger) return;
|
|
@@ -948,13 +999,22 @@ var TapToTalk = ({
|
|
|
948
999
|
const isListening = !!voiceTrigger || nativeSpeech.isListening || customRecorder.isRecording;
|
|
949
1000
|
const isActive = isListening || isTranscribing;
|
|
950
1001
|
const processingRef = (0, import_react6.useRef)(false);
|
|
951
|
-
const
|
|
1002
|
+
const isMobile = (0, import_react6.useCallback)(() => {
|
|
1003
|
+
if (typeof window === "undefined") return false;
|
|
1004
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
1005
|
+
}, []);
|
|
1006
|
+
const toggleVoice = async (e) => {
|
|
1007
|
+
if (e) {
|
|
1008
|
+
e.preventDefault();
|
|
1009
|
+
e.stopPropagation();
|
|
1010
|
+
}
|
|
1011
|
+
console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
|
|
952
1012
|
if (processingRef.current) {
|
|
953
1013
|
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
954
1014
|
return;
|
|
955
1015
|
}
|
|
956
1016
|
processingRef.current = true;
|
|
957
|
-
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
1017
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive, "isListening:", isListening, "isTranscribing:", isTranscribing);
|
|
958
1018
|
try {
|
|
959
1019
|
const now = Date.now();
|
|
960
1020
|
if (now - tapCountRef.current.lastTap < 500) {
|
|
@@ -987,24 +1047,39 @@ var TapToTalk = ({
|
|
|
987
1047
|
}
|
|
988
1048
|
setVoiceTrigger(null);
|
|
989
1049
|
} else {
|
|
990
|
-
console.log("[TapToTalk] Starting voice...");
|
|
1050
|
+
console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
|
|
991
1051
|
setErrorMsg(null);
|
|
992
|
-
onFocusTarget
|
|
1052
|
+
if (onFocusTarget && !isMobile()) {
|
|
1053
|
+
console.log("[TapToTalk] calling onFocusTarget() (desktop only)");
|
|
1054
|
+
onFocusTarget();
|
|
1055
|
+
} else if (onFocusTarget) {
|
|
1056
|
+
console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
|
|
1057
|
+
} else {
|
|
1058
|
+
console.log("[TapToTalk] onFocusTarget is undefined");
|
|
1059
|
+
}
|
|
993
1060
|
setVoiceTrigger("click");
|
|
1061
|
+
console.log("[TapToTalk] voiceTrigger set to click");
|
|
994
1062
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "custom") {
|
|
1063
|
+
console.log("[TapToTalk] Starting custom recorder...");
|
|
995
1064
|
try {
|
|
996
1065
|
await customRecorder.start();
|
|
997
|
-
|
|
1066
|
+
console.log("[TapToTalk] Custom recorder started successfully");
|
|
1067
|
+
} catch (e2) {
|
|
1068
|
+
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
998
1069
|
setErrorMsg("Mic access denied");
|
|
999
1070
|
setVoiceTrigger(null);
|
|
1000
1071
|
}
|
|
1001
1072
|
} else {
|
|
1073
|
+
console.log("[TapToTalk] Starting native speech recognition...");
|
|
1002
1074
|
if (!nativeSpeech.isSupported) {
|
|
1075
|
+
console.error("[TapToTalk] Native speech not supported");
|
|
1003
1076
|
setErrorMsg("Speech not supported");
|
|
1004
1077
|
setVoiceTrigger(null);
|
|
1005
1078
|
return;
|
|
1006
1079
|
}
|
|
1080
|
+
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
1007
1081
|
nativeSpeech.start();
|
|
1082
|
+
console.log("[TapToTalk] nativeSpeech.start() called");
|
|
1008
1083
|
}
|
|
1009
1084
|
}
|
|
1010
1085
|
} finally {
|