@contentgrowth/llm-service 0.9.9 → 0.9.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -169,45 +169,62 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
169
169
|
}, [onResult, onEnd]);
|
|
170
170
|
const isStartingRef = useRef(false);
|
|
171
171
|
useEffect(() => {
|
|
172
|
+
const isMobile = typeof window !== "undefined" && (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0);
|
|
172
173
|
if (typeof window !== "undefined") {
|
|
173
174
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
175
|
+
console.log("[useSpeechRecognition] Init - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile);
|
|
174
176
|
if (SpeechRecognition) {
|
|
175
177
|
setIsSupported(true);
|
|
176
178
|
const recognition = new SpeechRecognition();
|
|
177
179
|
recognition.continuous = true;
|
|
178
180
|
recognition.interimResults = true;
|
|
181
|
+
console.log("[useSpeechRecognition] Created recognition instance. continuous:", recognition.continuous, "interimResults:", recognition.interimResults);
|
|
179
182
|
recognition.onstart = () => {
|
|
180
|
-
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
183
|
+
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
181
184
|
isStartingRef.current = false;
|
|
182
185
|
setIsListening(true);
|
|
183
186
|
setError(null);
|
|
184
187
|
};
|
|
185
188
|
recognition.onend = () => {
|
|
186
|
-
console.log("[useSpeechRecognition] Native onend event fired");
|
|
189
|
+
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
187
190
|
isStartingRef.current = false;
|
|
188
191
|
if (isSimulatingRef.current) {
|
|
192
|
+
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
189
193
|
return;
|
|
190
194
|
}
|
|
191
195
|
setIsListening(false);
|
|
192
196
|
if (onEndRef.current) onEndRef.current();
|
|
193
197
|
};
|
|
194
198
|
recognition.onresult = (event) => {
|
|
199
|
+
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
195
200
|
let interimTranscript = "";
|
|
196
201
|
let finalTranscript = "";
|
|
197
202
|
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
198
203
|
const result = event.results[i];
|
|
199
204
|
if (result.isFinal) {
|
|
200
205
|
finalTranscript += result[0].transcript;
|
|
206
|
+
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
201
207
|
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
202
208
|
} else {
|
|
203
209
|
interimTranscript += result[0].transcript;
|
|
210
|
+
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
204
211
|
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
205
212
|
}
|
|
206
213
|
}
|
|
207
214
|
setTranscript((prev) => prev + finalTranscript);
|
|
208
215
|
};
|
|
209
216
|
recognition.onerror = (event) => {
|
|
210
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
217
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
218
|
+
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
219
|
+
if (event.error === "aborted") {
|
|
220
|
+
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
221
|
+
} else if (event.error === "not-allowed") {
|
|
222
|
+
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
223
|
+
} else if (event.error === "no-speech") {
|
|
224
|
+
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
225
|
+
} else if (event.error === "network") {
|
|
226
|
+
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
227
|
+
}
|
|
211
228
|
isStartingRef.current = false;
|
|
212
229
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
213
230
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
@@ -250,8 +267,17 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
250
267
|
}
|
|
251
268
|
}, [language]);
|
|
252
269
|
const start = useCallback(() => {
|
|
253
|
-
|
|
254
|
-
|
|
270
|
+
var _a;
|
|
271
|
+
const startTimestamp = Date.now();
|
|
272
|
+
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
273
|
+
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasInstance:", !!recognitionRef.current);
|
|
274
|
+
if (typeof document !== "undefined") {
|
|
275
|
+
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
276
|
+
}
|
|
277
|
+
if (isSimulatingRef.current) {
|
|
278
|
+
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
279
|
+
return;
|
|
280
|
+
}
|
|
255
281
|
if (!recognitionRef.current) {
|
|
256
282
|
console.error("[useSpeechRecognition] Recognition instance missing");
|
|
257
283
|
return;
|
|
@@ -261,7 +287,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
261
287
|
return;
|
|
262
288
|
}
|
|
263
289
|
if (recognitionRef.current.isListening) {
|
|
264
|
-
console.warn("[useSpeechRecognition] Already listening - ignoring");
|
|
290
|
+
console.warn("[useSpeechRecognition] Already listening (native prop) - ignoring");
|
|
265
291
|
}
|
|
266
292
|
if (isListening) {
|
|
267
293
|
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
@@ -270,11 +296,15 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
270
296
|
try {
|
|
271
297
|
setTranscript("");
|
|
272
298
|
isStartingRef.current = true;
|
|
299
|
+
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
273
300
|
recognitionRef.current.start();
|
|
274
|
-
console.log("[useSpeechRecognition] recognition.start() executed");
|
|
301
|
+
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
275
302
|
} catch (error2) {
|
|
276
303
|
isStartingRef.current = false;
|
|
277
|
-
console.error("[useSpeechRecognition] Failed to start recognition:", error2);
|
|
304
|
+
console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
|
|
305
|
+
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
306
|
+
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
307
|
+
}
|
|
278
308
|
}
|
|
279
309
|
}, [isListening]);
|
|
280
310
|
const stop = useCallback(() => {
|
|
@@ -598,26 +628,47 @@ var ChatInputArea = forwardRef(({
|
|
|
598
628
|
handleSubmit();
|
|
599
629
|
}
|
|
600
630
|
};
|
|
631
|
+
const isMobile = useCallback3(() => {
|
|
632
|
+
if (typeof window === "undefined") return false;
|
|
633
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
634
|
+
}, []);
|
|
601
635
|
const startRecording = async (trigger) => {
|
|
602
636
|
var _a2;
|
|
603
|
-
|
|
637
|
+
console.log("[ChatInputArea] startRecording called. trigger:", trigger, "isMobile:", isMobile());
|
|
638
|
+
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
639
|
+
if (voiceTrigger || isTranscribing) {
|
|
640
|
+
console.log("[ChatInputArea] startRecording ignored - already active");
|
|
641
|
+
return;
|
|
642
|
+
}
|
|
604
643
|
setVoiceTrigger(trigger);
|
|
605
644
|
setVoiceError(null);
|
|
645
|
+
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
606
646
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
607
647
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
648
|
+
console.log("[ChatInputArea] Using native speech recognition");
|
|
608
649
|
if (!nativeSpeech.isSupported) {
|
|
650
|
+
console.error("[ChatInputArea] Native speech not supported");
|
|
609
651
|
alert("Speech recognition is not supported in this browser.");
|
|
610
652
|
setVoiceTrigger(null);
|
|
611
653
|
return;
|
|
612
654
|
}
|
|
655
|
+
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
613
656
|
nativeSpeech.start();
|
|
657
|
+
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
614
658
|
} else {
|
|
659
|
+
console.log("[ChatInputArea] Using custom recorder");
|
|
615
660
|
await customRecorder.start();
|
|
661
|
+
console.log("[ChatInputArea] Custom recorder started");
|
|
662
|
+
}
|
|
663
|
+
if (!isMobile()) {
|
|
664
|
+
console.log("[ChatInputArea] Re-focusing textarea (desktop only)");
|
|
665
|
+
setTimeout(() => {
|
|
666
|
+
var _a3;
|
|
667
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
668
|
+
}, 0);
|
|
669
|
+
} else {
|
|
670
|
+
console.log("[ChatInputArea] SKIPPING textarea focus on mobile to prevent keyboard conflict");
|
|
616
671
|
}
|
|
617
|
-
setTimeout(() => {
|
|
618
|
-
var _a3;
|
|
619
|
-
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
620
|
-
}, 0);
|
|
621
672
|
};
|
|
622
673
|
const stopRecording = () => {
|
|
623
674
|
if (!voiceTrigger) return;
|
|
@@ -906,13 +957,22 @@ var TapToTalk = ({
|
|
|
906
957
|
const isListening = !!voiceTrigger || nativeSpeech.isListening || customRecorder.isRecording;
|
|
907
958
|
const isActive = isListening || isTranscribing;
|
|
908
959
|
const processingRef = useRef4(false);
|
|
909
|
-
const
|
|
960
|
+
const isMobile = useCallback4(() => {
|
|
961
|
+
if (typeof window === "undefined") return false;
|
|
962
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
963
|
+
}, []);
|
|
964
|
+
const toggleVoice = async (e) => {
|
|
965
|
+
if (e) {
|
|
966
|
+
e.preventDefault();
|
|
967
|
+
e.stopPropagation();
|
|
968
|
+
}
|
|
969
|
+
console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
|
|
910
970
|
if (processingRef.current) {
|
|
911
971
|
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
912
972
|
return;
|
|
913
973
|
}
|
|
914
974
|
processingRef.current = true;
|
|
915
|
-
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
975
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive, "isListening:", isListening, "isTranscribing:", isTranscribing);
|
|
916
976
|
try {
|
|
917
977
|
const now = Date.now();
|
|
918
978
|
if (now - tapCountRef.current.lastTap < 500) {
|
|
@@ -945,24 +1005,39 @@ var TapToTalk = ({
|
|
|
945
1005
|
}
|
|
946
1006
|
setVoiceTrigger(null);
|
|
947
1007
|
} else {
|
|
948
|
-
console.log("[TapToTalk] Starting voice...");
|
|
1008
|
+
console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
|
|
949
1009
|
setErrorMsg(null);
|
|
950
|
-
onFocusTarget
|
|
1010
|
+
if (onFocusTarget && !isMobile()) {
|
|
1011
|
+
console.log("[TapToTalk] calling onFocusTarget() (desktop only)");
|
|
1012
|
+
onFocusTarget();
|
|
1013
|
+
} else if (onFocusTarget) {
|
|
1014
|
+
console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
|
|
1015
|
+
} else {
|
|
1016
|
+
console.log("[TapToTalk] onFocusTarget is undefined");
|
|
1017
|
+
}
|
|
951
1018
|
setVoiceTrigger("click");
|
|
1019
|
+
console.log("[TapToTalk] voiceTrigger set to click");
|
|
952
1020
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "custom") {
|
|
1021
|
+
console.log("[TapToTalk] Starting custom recorder...");
|
|
953
1022
|
try {
|
|
954
1023
|
await customRecorder.start();
|
|
955
|
-
|
|
1024
|
+
console.log("[TapToTalk] Custom recorder started successfully");
|
|
1025
|
+
} catch (e2) {
|
|
1026
|
+
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
956
1027
|
setErrorMsg("Mic access denied");
|
|
957
1028
|
setVoiceTrigger(null);
|
|
958
1029
|
}
|
|
959
1030
|
} else {
|
|
1031
|
+
console.log("[TapToTalk] Starting native speech recognition...");
|
|
960
1032
|
if (!nativeSpeech.isSupported) {
|
|
1033
|
+
console.error("[TapToTalk] Native speech not supported");
|
|
961
1034
|
setErrorMsg("Speech not supported");
|
|
962
1035
|
setVoiceTrigger(null);
|
|
963
1036
|
return;
|
|
964
1037
|
}
|
|
1038
|
+
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
965
1039
|
nativeSpeech.start();
|
|
1040
|
+
console.log("[TapToTalk] nativeSpeech.start() called");
|
|
966
1041
|
}
|
|
967
1042
|
}
|
|
968
1043
|
} finally {
|