@contentgrowth/llm-service 0.9.91 → 0.9.92

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -211,45 +211,62 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
211
211
  }, [onResult, onEnd]);
212
212
  const isStartingRef = (0, import_react2.useRef)(false);
213
213
  (0, import_react2.useEffect)(() => {
214
+ const isMobile = typeof window !== "undefined" && (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0);
214
215
  if (typeof window !== "undefined") {
215
216
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
217
+ console.log("[useSpeechRecognition] Init - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile);
216
218
  if (SpeechRecognition) {
217
219
  setIsSupported(true);
218
220
  const recognition = new SpeechRecognition();
219
221
  recognition.continuous = true;
220
222
  recognition.interimResults = true;
223
+ console.log("[useSpeechRecognition] Created recognition instance. continuous:", recognition.continuous, "interimResults:", recognition.interimResults);
221
224
  recognition.onstart = () => {
222
- console.log("[useSpeechRecognition] Native onstart event fired");
225
+ console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
223
226
  isStartingRef.current = false;
224
227
  setIsListening(true);
225
228
  setError(null);
226
229
  };
227
230
  recognition.onend = () => {
228
- console.log("[useSpeechRecognition] Native onend event fired");
231
+ console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
229
232
  isStartingRef.current = false;
230
233
  if (isSimulatingRef.current) {
234
+ console.log("[useSpeechRecognition] onend ignored - simulating");
231
235
  return;
232
236
  }
233
237
  setIsListening(false);
234
238
  if (onEndRef.current) onEndRef.current();
235
239
  };
236
240
  recognition.onresult = (event) => {
241
+ console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
237
242
  let interimTranscript = "";
238
243
  let finalTranscript = "";
239
244
  for (let i = event.results.length - 1; i < event.results.length; ++i) {
240
245
  const result = event.results[i];
241
246
  if (result.isFinal) {
242
247
  finalTranscript += result[0].transcript;
248
+ console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
243
249
  if (onResultRef.current) onResultRef.current(finalTranscript, true);
244
250
  } else {
245
251
  interimTranscript += result[0].transcript;
252
+ console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
246
253
  if (onResultRef.current) onResultRef.current(interimTranscript, false);
247
254
  }
248
255
  }
249
256
  setTranscript((prev) => prev + finalTranscript);
250
257
  };
251
258
  recognition.onerror = (event) => {
252
- console.error("[useSpeechRecognition] Native onerror event:", event.error);
259
+ console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
260
+ console.error("[useSpeechRecognition] Error details - This could be caused by:");
261
+ if (event.error === "aborted") {
262
+ console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
263
+ } else if (event.error === "not-allowed") {
264
+ console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
265
+ } else if (event.error === "no-speech") {
266
+ console.error("[useSpeechRecognition] - no-speech: No speech detected");
267
+ } else if (event.error === "network") {
268
+ console.error("[useSpeechRecognition] - network: Network error during recognition");
269
+ }
253
270
  isStartingRef.current = false;
254
271
  if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
255
272
  console.warn("Speech recognition blocked. Simulating input for development...");
@@ -292,7 +309,13 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
292
309
  }
293
310
  }, [language]);
294
311
  const start = (0, import_react2.useCallback)(() => {
295
- console.log("[useSpeechRecognition] start() called. isListening:", isListening, "isStarting:", isStartingRef.current, "hasInstance:", !!recognitionRef.current);
312
+ var _a;
313
+ const startTimestamp = Date.now();
314
+ console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
315
+ console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasInstance:", !!recognitionRef.current);
316
+ if (typeof document !== "undefined") {
317
+ console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
318
+ }
296
319
  if (isSimulatingRef.current) {
297
320
  console.log("[useSpeechRecognition] isSimulating, ignoring start");
298
321
  return;
@@ -315,11 +338,15 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
315
338
  try {
316
339
  setTranscript("");
317
340
  isStartingRef.current = true;
341
+ console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
318
342
  recognitionRef.current.start();
319
- console.log("[useSpeechRecognition] recognition.start() executed successfully");
343
+ console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
320
344
  } catch (error2) {
321
345
  isStartingRef.current = false;
322
- console.error("[useSpeechRecognition] Failed to start recognition:", error2);
346
+ console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
347
+ if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
348
+ console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
349
+ }
323
350
  }
324
351
  }, [isListening]);
325
352
  const stop = (0, import_react2.useCallback)(() => {
@@ -643,26 +670,47 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
643
670
  handleSubmit();
644
671
  }
645
672
  };
673
+ const isMobile = (0, import_react5.useCallback)(() => {
674
+ if (typeof window === "undefined") return false;
675
+ return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
676
+ }, []);
646
677
  const startRecording = async (trigger) => {
647
678
  var _a2;
648
- if (voiceTrigger || isTranscribing) return;
679
+ console.log("[ChatInputArea] startRecording called. trigger:", trigger, "isMobile:", isMobile());
680
+ console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
681
+ if (voiceTrigger || isTranscribing) {
682
+ console.log("[ChatInputArea] startRecording ignored - already active");
683
+ return;
684
+ }
649
685
  setVoiceTrigger(trigger);
650
686
  setVoiceError(null);
687
+ console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
651
688
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
652
689
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
690
+ console.log("[ChatInputArea] Using native speech recognition");
653
691
  if (!nativeSpeech.isSupported) {
692
+ console.error("[ChatInputArea] Native speech not supported");
654
693
  alert("Speech recognition is not supported in this browser.");
655
694
  setVoiceTrigger(null);
656
695
  return;
657
696
  }
697
+ console.log("[ChatInputArea] Calling nativeSpeech.start()...");
658
698
  nativeSpeech.start();
699
+ console.log("[ChatInputArea] nativeSpeech.start() called");
659
700
  } else {
701
+ console.log("[ChatInputArea] Using custom recorder");
660
702
  await customRecorder.start();
703
+ console.log("[ChatInputArea] Custom recorder started");
704
+ }
705
+ if (!isMobile()) {
706
+ console.log("[ChatInputArea] Re-focusing textarea (desktop only)");
707
+ setTimeout(() => {
708
+ var _a3;
709
+ return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
710
+ }, 0);
711
+ } else {
712
+ console.log("[ChatInputArea] SKIPPING textarea focus on mobile to prevent keyboard conflict");
661
713
  }
662
- setTimeout(() => {
663
- var _a3;
664
- return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
665
- }, 0);
666
714
  };
667
715
  const stopRecording = () => {
668
716
  if (!voiceTrigger) return;
@@ -951,18 +999,22 @@ var TapToTalk = ({
951
999
  const isListening = !!voiceTrigger || nativeSpeech.isListening || customRecorder.isRecording;
952
1000
  const isActive = isListening || isTranscribing;
953
1001
  const processingRef = (0, import_react6.useRef)(false);
1002
+ const isMobile = (0, import_react6.useCallback)(() => {
1003
+ if (typeof window === "undefined") return false;
1004
+ return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
1005
+ }, []);
954
1006
  const toggleVoice = async (e) => {
955
1007
  if (e) {
956
1008
  e.preventDefault();
957
1009
  e.stopPropagation();
958
1010
  }
959
- console.trace("[TapToTalk] toggleVoice called trace");
1011
+ console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
960
1012
  if (processingRef.current) {
961
1013
  console.log("[TapToTalk] toggleVoice ignored - processing");
962
1014
  return;
963
1015
  }
964
1016
  processingRef.current = true;
965
- console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
1017
+ console.log("[TapToTalk] toggleVoice called. isActive:", isActive, "isListening:", isListening, "isTranscribing:", isTranscribing);
966
1018
  try {
967
1019
  const now = Date.now();
968
1020
  if (now - tapCountRef.current.lastTap < 500) {
@@ -995,29 +1047,39 @@ var TapToTalk = ({
995
1047
  }
996
1048
  setVoiceTrigger(null);
997
1049
  } else {
998
- console.log("[TapToTalk] Starting voice...");
1050
+ console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
999
1051
  setErrorMsg(null);
1000
- if (onFocusTarget) {
1001
- console.log("[TapToTalk] calling onFocusTarget() - this might trigger keyboard");
1052
+ if (onFocusTarget && !isMobile()) {
1053
+ console.log("[TapToTalk] calling onFocusTarget() (desktop only)");
1002
1054
  onFocusTarget();
1055
+ } else if (onFocusTarget) {
1056
+ console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
1003
1057
  } else {
1004
1058
  console.log("[TapToTalk] onFocusTarget is undefined");
1005
1059
  }
1006
1060
  setVoiceTrigger("click");
1061
+ console.log("[TapToTalk] voiceTrigger set to click");
1007
1062
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "custom") {
1063
+ console.log("[TapToTalk] Starting custom recorder...");
1008
1064
  try {
1009
1065
  await customRecorder.start();
1066
+ console.log("[TapToTalk] Custom recorder started successfully");
1010
1067
  } catch (e2) {
1068
+ console.error("[TapToTalk] Custom recorder failed:", e2);
1011
1069
  setErrorMsg("Mic access denied");
1012
1070
  setVoiceTrigger(null);
1013
1071
  }
1014
1072
  } else {
1073
+ console.log("[TapToTalk] Starting native speech recognition...");
1015
1074
  if (!nativeSpeech.isSupported) {
1075
+ console.error("[TapToTalk] Native speech not supported");
1016
1076
  setErrorMsg("Speech not supported");
1017
1077
  setVoiceTrigger(null);
1018
1078
  return;
1019
1079
  }
1080
+ console.log("[TapToTalk] Calling nativeSpeech.start()...");
1020
1081
  nativeSpeech.start();
1082
+ console.log("[TapToTalk] nativeSpeech.start() called");
1021
1083
  }
1022
1084
  }
1023
1085
  } finally {