@contentgrowth/llm-service 0.9.7 → 0.9.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -167,6 +167,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
167
167
|
onResultRef.current = onResult;
|
|
168
168
|
onEndRef.current = onEnd;
|
|
169
169
|
}, [onResult, onEnd]);
|
|
170
|
+
const isStartingRef = useRef(false);
|
|
170
171
|
useEffect(() => {
|
|
171
172
|
if (typeof window !== "undefined") {
|
|
172
173
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
@@ -176,10 +177,14 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
176
177
|
recognition.continuous = true;
|
|
177
178
|
recognition.interimResults = true;
|
|
178
179
|
recognition.onstart = () => {
|
|
180
|
+
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
181
|
+
isStartingRef.current = false;
|
|
179
182
|
setIsListening(true);
|
|
180
183
|
setError(null);
|
|
181
184
|
};
|
|
182
185
|
recognition.onend = () => {
|
|
186
|
+
console.log("[useSpeechRecognition] Native onend event fired");
|
|
187
|
+
isStartingRef.current = false;
|
|
183
188
|
if (isSimulatingRef.current) {
|
|
184
189
|
return;
|
|
185
190
|
}
|
|
@@ -203,6 +208,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
203
208
|
};
|
|
204
209
|
recognition.onerror = (event) => {
|
|
205
210
|
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
211
|
+
isStartingRef.current = false;
|
|
206
212
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
207
213
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
208
214
|
isSimulatingRef.current = true;
|
|
@@ -227,6 +233,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
227
233
|
}
|
|
228
234
|
}
|
|
229
235
|
return () => {
|
|
236
|
+
console.log("[useSpeechRecognition] Effect cleanup - stopping recognition");
|
|
230
237
|
if (isSimulatingRef.current && simulationTimeoutRef.current) {
|
|
231
238
|
clearTimeout(simulationTimeoutRef.current);
|
|
232
239
|
simulationTimeoutRef.current = null;
|
|
@@ -238,21 +245,40 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
238
245
|
}, []);
|
|
239
246
|
useEffect(() => {
|
|
240
247
|
if (recognitionRef.current) {
|
|
248
|
+
console.log("[useSpeechRecognition] Updating language to:", language);
|
|
241
249
|
recognitionRef.current.lang = language;
|
|
242
250
|
}
|
|
243
251
|
}, [language]);
|
|
244
252
|
const start = useCallback(() => {
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
+
console.log("[useSpeechRecognition] start() called");
|
|
254
|
+
if (isSimulatingRef.current) return;
|
|
255
|
+
if (!recognitionRef.current) {
|
|
256
|
+
console.error("[useSpeechRecognition] Recognition instance missing");
|
|
257
|
+
return;
|
|
258
|
+
}
|
|
259
|
+
if (isStartingRef.current) {
|
|
260
|
+
console.warn("[useSpeechRecognition] Already starting - ignoring duplicate call");
|
|
261
|
+
return;
|
|
262
|
+
}
|
|
263
|
+
if (recognitionRef.current.isListening) {
|
|
264
|
+
console.warn("[useSpeechRecognition] Already listening - ignoring");
|
|
265
|
+
}
|
|
266
|
+
if (isListening) {
|
|
267
|
+
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
268
|
+
return;
|
|
269
|
+
}
|
|
270
|
+
try {
|
|
271
|
+
setTranscript("");
|
|
272
|
+
isStartingRef.current = true;
|
|
273
|
+
recognitionRef.current.start();
|
|
274
|
+
console.log("[useSpeechRecognition] recognition.start() executed");
|
|
275
|
+
} catch (error2) {
|
|
276
|
+
isStartingRef.current = false;
|
|
277
|
+
console.error("[useSpeechRecognition] Failed to start recognition:", error2);
|
|
253
278
|
}
|
|
254
279
|
}, [isListening]);
|
|
255
280
|
const stop = useCallback(() => {
|
|
281
|
+
console.log("[useSpeechRecognition] stop() called");
|
|
256
282
|
if (isSimulatingRef.current) {
|
|
257
283
|
if (simulationTimeoutRef.current) {
|
|
258
284
|
clearTimeout(simulationTimeoutRef.current);
|
|
@@ -266,11 +292,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
266
292
|
if (onEndRef.current) onEndRef.current();
|
|
267
293
|
return;
|
|
268
294
|
}
|
|
269
|
-
if (recognitionRef.current
|
|
295
|
+
if (recognitionRef.current) {
|
|
270
296
|
recognitionRef.current.stop();
|
|
271
|
-
|
|
297
|
+
console.log("[useSpeechRecognition] recognition.stop() executed");
|
|
272
298
|
}
|
|
273
|
-
}, [
|
|
299
|
+
}, []);
|
|
274
300
|
const resetTranscript = useCallback(() => {
|
|
275
301
|
setTranscript("");
|
|
276
302
|
}, []);
|
|
@@ -879,51 +905,70 @@ var TapToTalk = ({
|
|
|
879
905
|
});
|
|
880
906
|
const isListening = !!voiceTrigger || nativeSpeech.isListening || customRecorder.isRecording;
|
|
881
907
|
const isActive = isListening || isTranscribing;
|
|
908
|
+
const processingRef = useRef4(false);
|
|
882
909
|
const toggleVoice = async () => {
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
tapCountRef.current.count++;
|
|
886
|
-
} else {
|
|
887
|
-
tapCountRef.current.count = 1;
|
|
888
|
-
}
|
|
889
|
-
tapCountRef.current.lastTap = now;
|
|
890
|
-
if (tapCountRef.current.count >= 5) {
|
|
891
|
-
setShowDebug((prev) => !prev);
|
|
892
|
-
tapCountRef.current.count = 0;
|
|
893
|
-
if (isActive) {
|
|
894
|
-
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
895
|
-
else customRecorder.stop();
|
|
896
|
-
setVoiceTrigger(null);
|
|
897
|
-
}
|
|
910
|
+
if (processingRef.current) {
|
|
911
|
+
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
898
912
|
return;
|
|
899
913
|
}
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
914
|
+
processingRef.current = true;
|
|
915
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
916
|
+
try {
|
|
917
|
+
const now = Date.now();
|
|
918
|
+
if (now - tapCountRef.current.lastTap < 500) {
|
|
919
|
+
tapCountRef.current.count++;
|
|
904
920
|
} else {
|
|
905
|
-
|
|
921
|
+
tapCountRef.current.count = 1;
|
|
906
922
|
}
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
} catch (e) {
|
|
916
|
-
setErrorMsg("Mic access denied");
|
|
923
|
+
tapCountRef.current.lastTap = now;
|
|
924
|
+
if (tapCountRef.current.count >= 5) {
|
|
925
|
+
setShowDebug((prev) => !prev);
|
|
926
|
+
tapCountRef.current.count = 0;
|
|
927
|
+
if (isActive) {
|
|
928
|
+
console.log("[TapToTalk] Debug trigger force-stop");
|
|
929
|
+
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
930
|
+
else customRecorder.stop();
|
|
917
931
|
setVoiceTrigger(null);
|
|
918
932
|
}
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
933
|
+
return;
|
|
934
|
+
}
|
|
935
|
+
if (isActive) {
|
|
936
|
+
if (isTranscribing && !isListening) {
|
|
937
|
+
console.log("[TapToTalk] Ignoring click during transcription");
|
|
923
938
|
return;
|
|
924
939
|
}
|
|
925
|
-
|
|
940
|
+
console.log("[TapToTalk] Stopping voice...");
|
|
941
|
+
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
942
|
+
nativeSpeech.stop();
|
|
943
|
+
} else {
|
|
944
|
+
customRecorder.stop();
|
|
945
|
+
}
|
|
946
|
+
setVoiceTrigger(null);
|
|
947
|
+
} else {
|
|
948
|
+
console.log("[TapToTalk] Starting voice...");
|
|
949
|
+
setErrorMsg(null);
|
|
950
|
+
onFocusTarget == null ? void 0 : onFocusTarget();
|
|
951
|
+
setVoiceTrigger("click");
|
|
952
|
+
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "custom") {
|
|
953
|
+
try {
|
|
954
|
+
await customRecorder.start();
|
|
955
|
+
} catch (e) {
|
|
956
|
+
setErrorMsg("Mic access denied");
|
|
957
|
+
setVoiceTrigger(null);
|
|
958
|
+
}
|
|
959
|
+
} else {
|
|
960
|
+
if (!nativeSpeech.isSupported) {
|
|
961
|
+
setErrorMsg("Speech not supported");
|
|
962
|
+
setVoiceTrigger(null);
|
|
963
|
+
return;
|
|
964
|
+
}
|
|
965
|
+
nativeSpeech.start();
|
|
966
|
+
}
|
|
926
967
|
}
|
|
968
|
+
} finally {
|
|
969
|
+
setTimeout(() => {
|
|
970
|
+
processingRef.current = false;
|
|
971
|
+
}, 300);
|
|
927
972
|
}
|
|
928
973
|
};
|
|
929
974
|
let bgColor = accentColor;
|