@contentgrowth/llm-service 0.9.98 → 0.9.99
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -261,7 +261,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
261
261
|
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
262
262
|
const recognition = new SpeechRecognition();
|
|
263
263
|
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
264
|
-
recognition.continuous =
|
|
264
|
+
recognition.continuous = true;
|
|
265
265
|
recognition.interimResults = true;
|
|
266
266
|
recognition.lang = languageRef.current;
|
|
267
267
|
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
@@ -1021,7 +1021,6 @@ var TapToTalk = ({
|
|
|
1021
1021
|
const globalConfig = useChatConfig();
|
|
1022
1022
|
const voiceConfig = propVoiceConfig || ((_a = globalConfig.voice) == null ? void 0 : _a.config);
|
|
1023
1023
|
const [isTranscribing, setIsTranscribing] = (0, import_react6.useState)(false);
|
|
1024
|
-
const [voiceTrigger, setVoiceTrigger] = (0, import_react6.useState)(null);
|
|
1025
1024
|
const [errorMsg, setErrorMsg] = (0, import_react6.useState)(null);
|
|
1026
1025
|
const [showDebug, setShowDebug] = (0, import_react6.useState)(false);
|
|
1027
1026
|
const [logs, setLogs] = (0, import_react6.useState)([]);
|
|
@@ -1067,14 +1066,14 @@ var TapToTalk = ({
|
|
|
1067
1066
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
1068
1067
|
}, [logs]);
|
|
1069
1068
|
const handleVoiceResult = (0, import_react6.useCallback)((text, isFinal) => {
|
|
1069
|
+
console.log("[TapToTalk] Native speech result:", text, isFinal);
|
|
1070
1070
|
if (isFinal) {
|
|
1071
1071
|
onResult(text);
|
|
1072
1072
|
setErrorMsg(null);
|
|
1073
|
-
setVoiceTrigger(null);
|
|
1074
1073
|
}
|
|
1075
1074
|
}, [onResult]);
|
|
1076
1075
|
const handleVoiceEnd = (0, import_react6.useCallback)(() => {
|
|
1077
|
-
|
|
1076
|
+
console.log("[TapToTalk] Native speech ended");
|
|
1078
1077
|
}, []);
|
|
1079
1078
|
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
1080
1079
|
import_react6.default.useEffect(() => {
|
|
@@ -1084,7 +1083,6 @@ var TapToTalk = ({
|
|
|
1084
1083
|
}
|
|
1085
1084
|
}, [nativeSpeech.error]);
|
|
1086
1085
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
1087
|
-
setVoiceTrigger(null);
|
|
1088
1086
|
setIsTranscribing(true);
|
|
1089
1087
|
setErrorMsg(null);
|
|
1090
1088
|
if (blob.type === "audio/simulated") {
|
|
@@ -1108,7 +1106,7 @@ var TapToTalk = ({
|
|
|
1108
1106
|
setIsTranscribing(false);
|
|
1109
1107
|
}
|
|
1110
1108
|
});
|
|
1111
|
-
const isListening =
|
|
1109
|
+
const isListening = nativeSpeech.isListening || customRecorder.isRecording;
|
|
1112
1110
|
const isActive = isListening || isTranscribing;
|
|
1113
1111
|
const processingRef = (0, import_react6.useRef)(false);
|
|
1114
1112
|
const isMobile = (0, import_react6.useCallback)(() => {
|
|
@@ -1130,7 +1128,6 @@ var TapToTalk = ({
|
|
|
1130
1128
|
console.log("[TapToTalk] Debug trigger force-stop");
|
|
1131
1129
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
1132
1130
|
else customRecorder.stop();
|
|
1133
|
-
setVoiceTrigger(null);
|
|
1134
1131
|
}
|
|
1135
1132
|
return;
|
|
1136
1133
|
}
|
|
@@ -1153,12 +1150,9 @@ var TapToTalk = ({
|
|
|
1153
1150
|
} else {
|
|
1154
1151
|
customRecorder.stop();
|
|
1155
1152
|
}
|
|
1156
|
-
setVoiceTrigger(null);
|
|
1157
1153
|
} else {
|
|
1158
1154
|
console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
|
|
1159
1155
|
setErrorMsg(null);
|
|
1160
|
-
setVoiceTrigger("click");
|
|
1161
|
-
console.log("[TapToTalk] voiceTrigger set to click");
|
|
1162
1156
|
if (!isMobile() && onFocusTarget) {
|
|
1163
1157
|
console.log("[TapToTalk] Desktop: calling onFocusTarget()");
|
|
1164
1158
|
onFocusTarget();
|
|
@@ -1171,14 +1165,12 @@ var TapToTalk = ({
|
|
|
1171
1165
|
} catch (e2) {
|
|
1172
1166
|
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
1173
1167
|
setErrorMsg("Mic access denied");
|
|
1174
|
-
setVoiceTrigger(null);
|
|
1175
1168
|
}
|
|
1176
1169
|
} else {
|
|
1177
1170
|
console.log("[TapToTalk] Starting native speech recognition...");
|
|
1178
1171
|
if (!nativeSpeech.isSupported) {
|
|
1179
1172
|
console.error("[TapToTalk] Native speech not supported");
|
|
1180
1173
|
setErrorMsg("Speech not supported");
|
|
1181
|
-
setVoiceTrigger(null);
|
|
1182
1174
|
return;
|
|
1183
1175
|
}
|
|
1184
1176
|
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
@@ -1231,6 +1223,7 @@ var TapToTalk = ({
|
|
|
1231
1223
|
{
|
|
1232
1224
|
type: "button",
|
|
1233
1225
|
onClick: toggleVoice,
|
|
1226
|
+
style: { touchAction: "manipulation" },
|
|
1234
1227
|
disabled: disabled || isTranscribing && !isListening,
|
|
1235
1228
|
className: `flex items-center justify-center gap-3 px-6 py-3 rounded-xl transition-all duration-300 w-full font-medium shadow-md active:scale-[0.98]
|
|
1236
1229
|
${bgColor} text-white
|