@contentgrowth/llm-service 0.9.97 → 0.9.99
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -261,7 +261,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
261
261
|
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
262
262
|
const recognition = new SpeechRecognition();
|
|
263
263
|
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
264
|
-
recognition.continuous =
|
|
264
|
+
recognition.continuous = true;
|
|
265
265
|
recognition.interimResults = true;
|
|
266
266
|
recognition.lang = languageRef.current;
|
|
267
267
|
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
@@ -300,6 +300,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
300
300
|
recognition.onend = () => {
|
|
301
301
|
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
302
302
|
isStartingRef.current = false;
|
|
303
|
+
if (recognitionRef.current === recognition) {
|
|
304
|
+
console.log("[useSpeechRecognition] Nullifying recognitionRef (onend)");
|
|
305
|
+
recognitionRef.current = null;
|
|
306
|
+
}
|
|
303
307
|
if (isSimulatingRef.current) {
|
|
304
308
|
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
305
309
|
return;
|
|
@@ -345,6 +349,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
345
349
|
} else if (event.error === "network") {
|
|
346
350
|
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
347
351
|
}
|
|
352
|
+
if (recognitionRef.current === recognition) {
|
|
353
|
+
console.log("[useSpeechRecognition] Nullifying recognitionRef (onerror)");
|
|
354
|
+
recognitionRef.current = null;
|
|
355
|
+
}
|
|
348
356
|
isStartingRef.current = false;
|
|
349
357
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
350
358
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
@@ -405,6 +413,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
405
413
|
if (recognitionRef.current) {
|
|
406
414
|
console.log("[useSpeechRecognition] Stopping existing instance before creating new one");
|
|
407
415
|
try {
|
|
416
|
+
recognitionRef.current.onend = null;
|
|
417
|
+
recognitionRef.current.onerror = null;
|
|
408
418
|
recognitionRef.current.stop();
|
|
409
419
|
} catch (e) {
|
|
410
420
|
}
|
|
@@ -1011,7 +1021,6 @@ var TapToTalk = ({
|
|
|
1011
1021
|
const globalConfig = useChatConfig();
|
|
1012
1022
|
const voiceConfig = propVoiceConfig || ((_a = globalConfig.voice) == null ? void 0 : _a.config);
|
|
1013
1023
|
const [isTranscribing, setIsTranscribing] = (0, import_react6.useState)(false);
|
|
1014
|
-
const [voiceTrigger, setVoiceTrigger] = (0, import_react6.useState)(null);
|
|
1015
1024
|
const [errorMsg, setErrorMsg] = (0, import_react6.useState)(null);
|
|
1016
1025
|
const [showDebug, setShowDebug] = (0, import_react6.useState)(false);
|
|
1017
1026
|
const [logs, setLogs] = (0, import_react6.useState)([]);
|
|
@@ -1057,14 +1066,14 @@ var TapToTalk = ({
|
|
|
1057
1066
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
1058
1067
|
}, [logs]);
|
|
1059
1068
|
const handleVoiceResult = (0, import_react6.useCallback)((text, isFinal) => {
|
|
1069
|
+
console.log("[TapToTalk] Native speech result:", text, isFinal);
|
|
1060
1070
|
if (isFinal) {
|
|
1061
1071
|
onResult(text);
|
|
1062
1072
|
setErrorMsg(null);
|
|
1063
|
-
setVoiceTrigger(null);
|
|
1064
1073
|
}
|
|
1065
1074
|
}, [onResult]);
|
|
1066
1075
|
const handleVoiceEnd = (0, import_react6.useCallback)(() => {
|
|
1067
|
-
|
|
1076
|
+
console.log("[TapToTalk] Native speech ended");
|
|
1068
1077
|
}, []);
|
|
1069
1078
|
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
1070
1079
|
import_react6.default.useEffect(() => {
|
|
@@ -1074,7 +1083,6 @@ var TapToTalk = ({
|
|
|
1074
1083
|
}
|
|
1075
1084
|
}, [nativeSpeech.error]);
|
|
1076
1085
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
1077
|
-
setVoiceTrigger(null);
|
|
1078
1086
|
setIsTranscribing(true);
|
|
1079
1087
|
setErrorMsg(null);
|
|
1080
1088
|
if (blob.type === "audio/simulated") {
|
|
@@ -1098,7 +1106,7 @@ var TapToTalk = ({
|
|
|
1098
1106
|
setIsTranscribing(false);
|
|
1099
1107
|
}
|
|
1100
1108
|
});
|
|
1101
|
-
const isListening =
|
|
1109
|
+
const isListening = nativeSpeech.isListening || customRecorder.isRecording;
|
|
1102
1110
|
const isActive = isListening || isTranscribing;
|
|
1103
1111
|
const processingRef = (0, import_react6.useRef)(false);
|
|
1104
1112
|
const isMobile = (0, import_react6.useCallback)(() => {
|
|
@@ -1106,10 +1114,6 @@ var TapToTalk = ({
|
|
|
1106
1114
|
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
1107
1115
|
}, []);
|
|
1108
1116
|
const toggleVoice = async (e) => {
|
|
1109
|
-
if (e) {
|
|
1110
|
-
e.preventDefault();
|
|
1111
|
-
e.stopPropagation();
|
|
1112
|
-
}
|
|
1113
1117
|
const now = Date.now();
|
|
1114
1118
|
if (now - tapCountRef.current.lastTap < 500) {
|
|
1115
1119
|
tapCountRef.current.count++;
|
|
@@ -1124,7 +1128,6 @@ var TapToTalk = ({
|
|
|
1124
1128
|
console.log("[TapToTalk] Debug trigger force-stop");
|
|
1125
1129
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
1126
1130
|
else customRecorder.stop();
|
|
1127
|
-
setVoiceTrigger(null);
|
|
1128
1131
|
}
|
|
1129
1132
|
return;
|
|
1130
1133
|
}
|
|
@@ -1147,12 +1150,9 @@ var TapToTalk = ({
|
|
|
1147
1150
|
} else {
|
|
1148
1151
|
customRecorder.stop();
|
|
1149
1152
|
}
|
|
1150
|
-
setVoiceTrigger(null);
|
|
1151
1153
|
} else {
|
|
1152
1154
|
console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
|
|
1153
1155
|
setErrorMsg(null);
|
|
1154
|
-
setVoiceTrigger("click");
|
|
1155
|
-
console.log("[TapToTalk] voiceTrigger set to click");
|
|
1156
1156
|
if (!isMobile() && onFocusTarget) {
|
|
1157
1157
|
console.log("[TapToTalk] Desktop: calling onFocusTarget()");
|
|
1158
1158
|
onFocusTarget();
|
|
@@ -1165,14 +1165,12 @@ var TapToTalk = ({
|
|
|
1165
1165
|
} catch (e2) {
|
|
1166
1166
|
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
1167
1167
|
setErrorMsg("Mic access denied");
|
|
1168
|
-
setVoiceTrigger(null);
|
|
1169
1168
|
}
|
|
1170
1169
|
} else {
|
|
1171
1170
|
console.log("[TapToTalk] Starting native speech recognition...");
|
|
1172
1171
|
if (!nativeSpeech.isSupported) {
|
|
1173
1172
|
console.error("[TapToTalk] Native speech not supported");
|
|
1174
1173
|
setErrorMsg("Speech not supported");
|
|
1175
|
-
setVoiceTrigger(null);
|
|
1176
1174
|
return;
|
|
1177
1175
|
}
|
|
1178
1176
|
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
@@ -1223,7 +1221,9 @@ var TapToTalk = ({
|
|
|
1223
1221
|
/* @__PURE__ */ (0, import_jsx_runtime6.jsxs)(
|
|
1224
1222
|
"button",
|
|
1225
1223
|
{
|
|
1224
|
+
type: "button",
|
|
1226
1225
|
onClick: toggleVoice,
|
|
1226
|
+
style: { touchAction: "manipulation" },
|
|
1227
1227
|
disabled: disabled || isTranscribing && !isListening,
|
|
1228
1228
|
className: `flex items-center justify-center gap-3 px-6 py-3 rounded-xl transition-all duration-300 w-full font-medium shadow-md active:scale-[0.98]
|
|
1229
1229
|
${bgColor} text-white
|
|
@@ -1231,9 +1231,9 @@ var TapToTalk = ({
|
|
|
1231
1231
|
${className}`,
|
|
1232
1232
|
title: label,
|
|
1233
1233
|
children: [
|
|
1234
|
-
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("div", { className: "flex items-center justify-center shrink-0", children: Icon }),
|
|
1235
|
-
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "truncate", children: label }),
|
|
1236
|
-
errorMsg && /* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1", children: errorMsg })
|
|
1234
|
+
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("div", { className: "flex items-center justify-center shrink-0 pointer-events-none", children: Icon }),
|
|
1235
|
+
/* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "truncate pointer-events-none", children: label }),
|
|
1236
|
+
errorMsg && /* @__PURE__ */ (0, import_jsx_runtime6.jsx)("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1 pointer-events-none", children: errorMsg })
|
|
1237
1237
|
]
|
|
1238
1238
|
}
|
|
1239
1239
|
)
|