@contentgrowth/llm-service 0.9.91 → 0.9.93
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -161,76 +161,32 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
161
161
|
const recognitionRef = useRef(null);
|
|
162
162
|
const isSimulatingRef = useRef(false);
|
|
163
163
|
const simulationTimeoutRef = useRef(null);
|
|
164
|
+
const languageRef = useRef(language);
|
|
165
|
+
const instanceIdRef = useRef(Math.random().toString(36).slice(2));
|
|
166
|
+
const lastStartAtRef = useRef(null);
|
|
167
|
+
const lastStopAtRef = useRef(null);
|
|
164
168
|
const onResultRef = useRef(onResult);
|
|
165
169
|
const onEndRef = useRef(onEnd);
|
|
166
170
|
useEffect(() => {
|
|
167
171
|
onResultRef.current = onResult;
|
|
168
172
|
onEndRef.current = onEnd;
|
|
169
173
|
}, [onResult, onEnd]);
|
|
174
|
+
useEffect(() => {
|
|
175
|
+
languageRef.current = language;
|
|
176
|
+
if (recognitionRef.current) {
|
|
177
|
+
console.log("[useSpeechRecognition] Updating language to:", language);
|
|
178
|
+
recognitionRef.current.lang = language;
|
|
179
|
+
}
|
|
180
|
+
}, [language]);
|
|
170
181
|
const isStartingRef = useRef(false);
|
|
171
182
|
useEffect(() => {
|
|
183
|
+
var _a;
|
|
172
184
|
if (typeof window !== "undefined") {
|
|
173
185
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
recognition.interimResults = true;
|
|
179
|
-
recognition.onstart = () => {
|
|
180
|
-
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
181
|
-
isStartingRef.current = false;
|
|
182
|
-
setIsListening(true);
|
|
183
|
-
setError(null);
|
|
184
|
-
};
|
|
185
|
-
recognition.onend = () => {
|
|
186
|
-
console.log("[useSpeechRecognition] Native onend event fired");
|
|
187
|
-
isStartingRef.current = false;
|
|
188
|
-
if (isSimulatingRef.current) {
|
|
189
|
-
return;
|
|
190
|
-
}
|
|
191
|
-
setIsListening(false);
|
|
192
|
-
if (onEndRef.current) onEndRef.current();
|
|
193
|
-
};
|
|
194
|
-
recognition.onresult = (event) => {
|
|
195
|
-
let interimTranscript = "";
|
|
196
|
-
let finalTranscript = "";
|
|
197
|
-
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
198
|
-
const result = event.results[i];
|
|
199
|
-
if (result.isFinal) {
|
|
200
|
-
finalTranscript += result[0].transcript;
|
|
201
|
-
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
202
|
-
} else {
|
|
203
|
-
interimTranscript += result[0].transcript;
|
|
204
|
-
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
setTranscript((prev) => prev + finalTranscript);
|
|
208
|
-
};
|
|
209
|
-
recognition.onerror = (event) => {
|
|
210
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
211
|
-
isStartingRef.current = false;
|
|
212
|
-
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
213
|
-
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
214
|
-
isSimulatingRef.current = true;
|
|
215
|
-
setError(null);
|
|
216
|
-
setIsListening(true);
|
|
217
|
-
simulationTimeoutRef.current = setTimeout(() => {
|
|
218
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
219
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
220
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
221
|
-
isSimulatingRef.current = false;
|
|
222
|
-
setIsListening(false);
|
|
223
|
-
if (onEndRef.current) onEndRef.current();
|
|
224
|
-
simulationTimeoutRef.current = null;
|
|
225
|
-
}, 3e3);
|
|
226
|
-
return;
|
|
227
|
-
}
|
|
228
|
-
console.error("Speech recognition error", event.error);
|
|
229
|
-
setError(event.error);
|
|
230
|
-
setIsListening(false);
|
|
231
|
-
};
|
|
232
|
-
recognitionRef.current = recognition;
|
|
233
|
-
}
|
|
186
|
+
console.log("[useSpeechRecognition] Env - isSecureContext:", window.isSecureContext, "protocol:", (_a = window.location) == null ? void 0 : _a.protocol);
|
|
187
|
+
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
188
|
+
console.log("[useSpeechRecognition] Init check - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
189
|
+
setIsSupported(!!SpeechRecognition);
|
|
234
190
|
}
|
|
235
191
|
return () => {
|
|
236
192
|
console.log("[useSpeechRecognition] Effect cleanup - stopping recognition");
|
|
@@ -239,49 +195,204 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
239
195
|
simulationTimeoutRef.current = null;
|
|
240
196
|
}
|
|
241
197
|
if (recognitionRef.current) {
|
|
242
|
-
|
|
198
|
+
try {
|
|
199
|
+
recognitionRef.current.stop();
|
|
200
|
+
} catch (e) {
|
|
201
|
+
}
|
|
202
|
+
recognitionRef.current = null;
|
|
203
|
+
}
|
|
204
|
+
if (typeof window !== "undefined") {
|
|
205
|
+
const w = window;
|
|
206
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
207
|
+
console.log("[useSpeechRecognition] Cleanup clearing global active instance lock. instanceId:", instanceIdRef.current);
|
|
208
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
209
|
+
}
|
|
243
210
|
}
|
|
244
211
|
};
|
|
245
212
|
}, []);
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
213
|
+
const createRecognitionInstance = useCallback(() => {
|
|
214
|
+
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
215
|
+
if (!SpeechRecognition) {
|
|
216
|
+
console.error("[useSpeechRecognition] SpeechRecognition not available");
|
|
217
|
+
return null;
|
|
250
218
|
}
|
|
251
|
-
|
|
219
|
+
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
220
|
+
const recognition = new SpeechRecognition();
|
|
221
|
+
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
222
|
+
recognition.continuous = !isMobile;
|
|
223
|
+
recognition.interimResults = true;
|
|
224
|
+
recognition.lang = languageRef.current;
|
|
225
|
+
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
226
|
+
recognition.onaudiostart = () => {
|
|
227
|
+
console.log("[useSpeechRecognition] Native onaudiostart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
228
|
+
};
|
|
229
|
+
recognition.onaudioend = () => {
|
|
230
|
+
console.log("[useSpeechRecognition] Native onaudioend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
231
|
+
};
|
|
232
|
+
recognition.onsoundstart = () => {
|
|
233
|
+
console.log("[useSpeechRecognition] Native onsoundstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
234
|
+
};
|
|
235
|
+
recognition.onsoundend = () => {
|
|
236
|
+
console.log("[useSpeechRecognition] Native onsoundend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
237
|
+
};
|
|
238
|
+
recognition.onspeechstart = () => {
|
|
239
|
+
console.log("[useSpeechRecognition] Native onspeechstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
240
|
+
};
|
|
241
|
+
recognition.onspeechend = () => {
|
|
242
|
+
console.log("[useSpeechRecognition] Native onspeechend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
243
|
+
};
|
|
244
|
+
recognition.onnomatch = () => {
|
|
245
|
+
console.log("[useSpeechRecognition] Native onnomatch. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
246
|
+
};
|
|
247
|
+
recognition.onstart = () => {
|
|
248
|
+
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
249
|
+
isStartingRef.current = false;
|
|
250
|
+
setIsListening(true);
|
|
251
|
+
setError(null);
|
|
252
|
+
if (typeof window !== "undefined") {
|
|
253
|
+
const w = window;
|
|
254
|
+
w.__llmSpeechRecognitionActiveInstanceId = instanceIdRef.current;
|
|
255
|
+
console.log("[useSpeechRecognition] Set global active instance lock. instanceId:", instanceIdRef.current);
|
|
256
|
+
}
|
|
257
|
+
};
|
|
258
|
+
recognition.onend = () => {
|
|
259
|
+
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
260
|
+
isStartingRef.current = false;
|
|
261
|
+
if (isSimulatingRef.current) {
|
|
262
|
+
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
263
|
+
return;
|
|
264
|
+
}
|
|
265
|
+
setIsListening(false);
|
|
266
|
+
if (onEndRef.current) onEndRef.current();
|
|
267
|
+
if (typeof window !== "undefined") {
|
|
268
|
+
const w = window;
|
|
269
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
270
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
271
|
+
console.log("[useSpeechRecognition] Cleared global active instance lock. instanceId:", instanceIdRef.current);
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
};
|
|
275
|
+
recognition.onresult = (event) => {
|
|
276
|
+
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
277
|
+
let interimTranscript = "";
|
|
278
|
+
let finalTranscript = "";
|
|
279
|
+
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
280
|
+
const result = event.results[i];
|
|
281
|
+
if (result.isFinal) {
|
|
282
|
+
finalTranscript += result[0].transcript;
|
|
283
|
+
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
284
|
+
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
285
|
+
} else {
|
|
286
|
+
interimTranscript += result[0].transcript;
|
|
287
|
+
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
288
|
+
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
setTranscript((prev) => prev + finalTranscript);
|
|
292
|
+
};
|
|
293
|
+
recognition.onerror = (event) => {
|
|
294
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
295
|
+
console.error("[useSpeechRecognition] Error context - lastStartAt:", lastStartAtRef.current, "lastStopAt:", lastStopAtRef.current, "instanceId:", instanceIdRef.current);
|
|
296
|
+
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
297
|
+
if (event.error === "aborted") {
|
|
298
|
+
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
299
|
+
} else if (event.error === "not-allowed") {
|
|
300
|
+
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
301
|
+
} else if (event.error === "no-speech") {
|
|
302
|
+
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
303
|
+
} else if (event.error === "network") {
|
|
304
|
+
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
305
|
+
}
|
|
306
|
+
isStartingRef.current = false;
|
|
307
|
+
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
308
|
+
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
309
|
+
isSimulatingRef.current = true;
|
|
310
|
+
setError(null);
|
|
311
|
+
setIsListening(true);
|
|
312
|
+
simulationTimeoutRef.current = setTimeout(() => {
|
|
313
|
+
const mockText = "This is a simulated voice input for testing.";
|
|
314
|
+
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
315
|
+
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
316
|
+
isSimulatingRef.current = false;
|
|
317
|
+
setIsListening(false);
|
|
318
|
+
if (onEndRef.current) onEndRef.current();
|
|
319
|
+
simulationTimeoutRef.current = null;
|
|
320
|
+
}, 3e3);
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
323
|
+
console.error("Speech recognition error", event.error);
|
|
324
|
+
setError(event.error);
|
|
325
|
+
setIsListening(false);
|
|
326
|
+
if (typeof window !== "undefined") {
|
|
327
|
+
const w = window;
|
|
328
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
329
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
330
|
+
console.log("[useSpeechRecognition] Cleared global active instance lock after error. instanceId:", instanceIdRef.current);
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
};
|
|
334
|
+
return recognition;
|
|
335
|
+
}, []);
|
|
252
336
|
const start = useCallback(() => {
|
|
253
|
-
|
|
337
|
+
var _a;
|
|
338
|
+
const startTimestamp = Date.now();
|
|
339
|
+
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
340
|
+
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasExistingInstance:", !!recognitionRef.current);
|
|
341
|
+
if (typeof document !== "undefined") {
|
|
342
|
+
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
343
|
+
}
|
|
254
344
|
if (isSimulatingRef.current) {
|
|
255
345
|
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
256
346
|
return;
|
|
257
347
|
}
|
|
258
|
-
if (!recognitionRef.current) {
|
|
259
|
-
console.error("[useSpeechRecognition] Recognition instance missing");
|
|
260
|
-
return;
|
|
261
|
-
}
|
|
262
348
|
if (isStartingRef.current) {
|
|
263
349
|
console.warn("[useSpeechRecognition] Already starting - ignoring duplicate call");
|
|
264
350
|
return;
|
|
265
351
|
}
|
|
266
|
-
if (recognitionRef.current.isListening) {
|
|
267
|
-
console.warn("[useSpeechRecognition] Already listening (native prop) - ignoring");
|
|
268
|
-
}
|
|
269
352
|
if (isListening) {
|
|
270
353
|
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
271
354
|
return;
|
|
272
355
|
}
|
|
356
|
+
if (typeof window !== "undefined") {
|
|
357
|
+
const w = window;
|
|
358
|
+
if (w.__llmSpeechRecognitionActiveInstanceId && w.__llmSpeechRecognitionActiveInstanceId !== instanceIdRef.current) {
|
|
359
|
+
console.error("[useSpeechRecognition] Another recognition instance appears active. activeInstanceId:", w.__llmSpeechRecognitionActiveInstanceId, "thisInstanceId:", instanceIdRef.current);
|
|
360
|
+
}
|
|
361
|
+
}
|
|
273
362
|
try {
|
|
363
|
+
if (recognitionRef.current) {
|
|
364
|
+
console.log("[useSpeechRecognition] Stopping existing instance before creating new one");
|
|
365
|
+
try {
|
|
366
|
+
recognitionRef.current.stop();
|
|
367
|
+
} catch (e) {
|
|
368
|
+
}
|
|
369
|
+
recognitionRef.current = null;
|
|
370
|
+
}
|
|
371
|
+
const recognition = createRecognitionInstance();
|
|
372
|
+
if (!recognition) {
|
|
373
|
+
console.error("[useSpeechRecognition] Failed to create recognition instance");
|
|
374
|
+
setError("Speech recognition not available");
|
|
375
|
+
return;
|
|
376
|
+
}
|
|
377
|
+
recognitionRef.current = recognition;
|
|
274
378
|
setTranscript("");
|
|
275
379
|
isStartingRef.current = true;
|
|
380
|
+
lastStartAtRef.current = Date.now();
|
|
381
|
+
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
276
382
|
recognitionRef.current.start();
|
|
277
|
-
console.log("[useSpeechRecognition] recognition.start() executed successfully");
|
|
383
|
+
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
278
384
|
} catch (error2) {
|
|
279
385
|
isStartingRef.current = false;
|
|
280
|
-
console.error("[useSpeechRecognition] Failed to start recognition:", error2);
|
|
386
|
+
console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
|
|
387
|
+
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
388
|
+
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
389
|
+
}
|
|
390
|
+
setError((error2 == null ? void 0 : error2.message) || "Failed to start speech recognition");
|
|
281
391
|
}
|
|
282
|
-
}, [isListening]);
|
|
392
|
+
}, [isListening, createRecognitionInstance]);
|
|
283
393
|
const stop = useCallback(() => {
|
|
284
394
|
console.log("[useSpeechRecognition] stop() called");
|
|
395
|
+
lastStopAtRef.current = Date.now();
|
|
285
396
|
if (isSimulatingRef.current) {
|
|
286
397
|
if (simulationTimeoutRef.current) {
|
|
287
398
|
clearTimeout(simulationTimeoutRef.current);
|
|
@@ -601,26 +712,47 @@ var ChatInputArea = forwardRef(({
|
|
|
601
712
|
handleSubmit();
|
|
602
713
|
}
|
|
603
714
|
};
|
|
715
|
+
const isMobile = useCallback3(() => {
|
|
716
|
+
if (typeof window === "undefined") return false;
|
|
717
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
718
|
+
}, []);
|
|
604
719
|
const startRecording = async (trigger) => {
|
|
605
720
|
var _a2;
|
|
606
|
-
|
|
721
|
+
console.log("[ChatInputArea] startRecording called. trigger:", trigger, "isMobile:", isMobile());
|
|
722
|
+
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
723
|
+
if (voiceTrigger || isTranscribing) {
|
|
724
|
+
console.log("[ChatInputArea] startRecording ignored - already active");
|
|
725
|
+
return;
|
|
726
|
+
}
|
|
607
727
|
setVoiceTrigger(trigger);
|
|
608
728
|
setVoiceError(null);
|
|
729
|
+
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
609
730
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
610
731
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
732
|
+
console.log("[ChatInputArea] Using native speech recognition");
|
|
611
733
|
if (!nativeSpeech.isSupported) {
|
|
734
|
+
console.error("[ChatInputArea] Native speech not supported");
|
|
612
735
|
alert("Speech recognition is not supported in this browser.");
|
|
613
736
|
setVoiceTrigger(null);
|
|
614
737
|
return;
|
|
615
738
|
}
|
|
739
|
+
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
616
740
|
nativeSpeech.start();
|
|
741
|
+
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
617
742
|
} else {
|
|
743
|
+
console.log("[ChatInputArea] Using custom recorder");
|
|
618
744
|
await customRecorder.start();
|
|
745
|
+
console.log("[ChatInputArea] Custom recorder started");
|
|
746
|
+
}
|
|
747
|
+
if (!isMobile()) {
|
|
748
|
+
console.log("[ChatInputArea] Re-focusing textarea (desktop only)");
|
|
749
|
+
setTimeout(() => {
|
|
750
|
+
var _a3;
|
|
751
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
752
|
+
}, 0);
|
|
753
|
+
} else {
|
|
754
|
+
console.log("[ChatInputArea] SKIPPING textarea focus on mobile to prevent keyboard conflict");
|
|
619
755
|
}
|
|
620
|
-
setTimeout(() => {
|
|
621
|
-
var _a3;
|
|
622
|
-
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
623
|
-
}, 0);
|
|
624
756
|
};
|
|
625
757
|
const stopRecording = () => {
|
|
626
758
|
if (!voiceTrigger) return;
|
|
@@ -909,18 +1041,22 @@ var TapToTalk = ({
|
|
|
909
1041
|
const isListening = !!voiceTrigger || nativeSpeech.isListening || customRecorder.isRecording;
|
|
910
1042
|
const isActive = isListening || isTranscribing;
|
|
911
1043
|
const processingRef = useRef4(false);
|
|
1044
|
+
const isMobile = useCallback4(() => {
|
|
1045
|
+
if (typeof window === "undefined") return false;
|
|
1046
|
+
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
1047
|
+
}, []);
|
|
912
1048
|
const toggleVoice = async (e) => {
|
|
913
1049
|
if (e) {
|
|
914
1050
|
e.preventDefault();
|
|
915
1051
|
e.stopPropagation();
|
|
916
1052
|
}
|
|
917
|
-
console.
|
|
1053
|
+
console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
|
|
918
1054
|
if (processingRef.current) {
|
|
919
1055
|
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
920
1056
|
return;
|
|
921
1057
|
}
|
|
922
1058
|
processingRef.current = true;
|
|
923
|
-
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
1059
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive, "isListening:", isListening, "isTranscribing:", isTranscribing);
|
|
924
1060
|
try {
|
|
925
1061
|
const now = Date.now();
|
|
926
1062
|
if (now - tapCountRef.current.lastTap < 500) {
|
|
@@ -953,29 +1089,39 @@ var TapToTalk = ({
|
|
|
953
1089
|
}
|
|
954
1090
|
setVoiceTrigger(null);
|
|
955
1091
|
} else {
|
|
956
|
-
console.log("[TapToTalk] Starting voice...");
|
|
1092
|
+
console.log("[TapToTalk] Starting voice... mode:", voiceConfig == null ? void 0 : voiceConfig.mode);
|
|
957
1093
|
setErrorMsg(null);
|
|
958
|
-
if (onFocusTarget) {
|
|
959
|
-
console.log("[TapToTalk] calling onFocusTarget()
|
|
1094
|
+
if (onFocusTarget && !isMobile()) {
|
|
1095
|
+
console.log("[TapToTalk] calling onFocusTarget() (desktop only)");
|
|
960
1096
|
onFocusTarget();
|
|
1097
|
+
} else if (onFocusTarget) {
|
|
1098
|
+
console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
|
|
961
1099
|
} else {
|
|
962
1100
|
console.log("[TapToTalk] onFocusTarget is undefined");
|
|
963
1101
|
}
|
|
964
1102
|
setVoiceTrigger("click");
|
|
1103
|
+
console.log("[TapToTalk] voiceTrigger set to click");
|
|
965
1104
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "custom") {
|
|
1105
|
+
console.log("[TapToTalk] Starting custom recorder...");
|
|
966
1106
|
try {
|
|
967
1107
|
await customRecorder.start();
|
|
1108
|
+
console.log("[TapToTalk] Custom recorder started successfully");
|
|
968
1109
|
} catch (e2) {
|
|
1110
|
+
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
969
1111
|
setErrorMsg("Mic access denied");
|
|
970
1112
|
setVoiceTrigger(null);
|
|
971
1113
|
}
|
|
972
1114
|
} else {
|
|
1115
|
+
console.log("[TapToTalk] Starting native speech recognition...");
|
|
973
1116
|
if (!nativeSpeech.isSupported) {
|
|
1117
|
+
console.error("[TapToTalk] Native speech not supported");
|
|
974
1118
|
setErrorMsg("Speech not supported");
|
|
975
1119
|
setVoiceTrigger(null);
|
|
976
1120
|
return;
|
|
977
1121
|
}
|
|
1122
|
+
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
978
1123
|
nativeSpeech.start();
|
|
1124
|
+
console.log("[TapToTalk] nativeSpeech.start() called");
|
|
979
1125
|
}
|
|
980
1126
|
}
|
|
981
1127
|
} finally {
|