@contentgrowth/llm-service 0.9.92 → 0.9.94
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -203,93 +203,32 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
203
203
|
const recognitionRef = (0, import_react2.useRef)(null);
|
|
204
204
|
const isSimulatingRef = (0, import_react2.useRef)(false);
|
|
205
205
|
const simulationTimeoutRef = (0, import_react2.useRef)(null);
|
|
206
|
+
const languageRef = (0, import_react2.useRef)(language);
|
|
207
|
+
const instanceIdRef = (0, import_react2.useRef)(Math.random().toString(36).slice(2));
|
|
208
|
+
const lastStartAtRef = (0, import_react2.useRef)(null);
|
|
209
|
+
const lastStopAtRef = (0, import_react2.useRef)(null);
|
|
206
210
|
const onResultRef = (0, import_react2.useRef)(onResult);
|
|
207
211
|
const onEndRef = (0, import_react2.useRef)(onEnd);
|
|
208
212
|
(0, import_react2.useEffect)(() => {
|
|
209
213
|
onResultRef.current = onResult;
|
|
210
214
|
onEndRef.current = onEnd;
|
|
211
215
|
}, [onResult, onEnd]);
|
|
216
|
+
(0, import_react2.useEffect)(() => {
|
|
217
|
+
languageRef.current = language;
|
|
218
|
+
if (recognitionRef.current) {
|
|
219
|
+
console.log("[useSpeechRecognition] Updating language to:", language);
|
|
220
|
+
recognitionRef.current.lang = language;
|
|
221
|
+
}
|
|
222
|
+
}, [language]);
|
|
212
223
|
const isStartingRef = (0, import_react2.useRef)(false);
|
|
213
224
|
(0, import_react2.useEffect)(() => {
|
|
214
|
-
|
|
225
|
+
var _a;
|
|
215
226
|
if (typeof window !== "undefined") {
|
|
216
227
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
217
|
-
console.log("[useSpeechRecognition]
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
recognition.continuous = true;
|
|
222
|
-
recognition.interimResults = true;
|
|
223
|
-
console.log("[useSpeechRecognition] Created recognition instance. continuous:", recognition.continuous, "interimResults:", recognition.interimResults);
|
|
224
|
-
recognition.onstart = () => {
|
|
225
|
-
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
226
|
-
isStartingRef.current = false;
|
|
227
|
-
setIsListening(true);
|
|
228
|
-
setError(null);
|
|
229
|
-
};
|
|
230
|
-
recognition.onend = () => {
|
|
231
|
-
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
232
|
-
isStartingRef.current = false;
|
|
233
|
-
if (isSimulatingRef.current) {
|
|
234
|
-
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
235
|
-
return;
|
|
236
|
-
}
|
|
237
|
-
setIsListening(false);
|
|
238
|
-
if (onEndRef.current) onEndRef.current();
|
|
239
|
-
};
|
|
240
|
-
recognition.onresult = (event) => {
|
|
241
|
-
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
242
|
-
let interimTranscript = "";
|
|
243
|
-
let finalTranscript = "";
|
|
244
|
-
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
245
|
-
const result = event.results[i];
|
|
246
|
-
if (result.isFinal) {
|
|
247
|
-
finalTranscript += result[0].transcript;
|
|
248
|
-
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
249
|
-
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
250
|
-
} else {
|
|
251
|
-
interimTranscript += result[0].transcript;
|
|
252
|
-
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
253
|
-
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
254
|
-
}
|
|
255
|
-
}
|
|
256
|
-
setTranscript((prev) => prev + finalTranscript);
|
|
257
|
-
};
|
|
258
|
-
recognition.onerror = (event) => {
|
|
259
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
260
|
-
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
261
|
-
if (event.error === "aborted") {
|
|
262
|
-
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
263
|
-
} else if (event.error === "not-allowed") {
|
|
264
|
-
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
265
|
-
} else if (event.error === "no-speech") {
|
|
266
|
-
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
267
|
-
} else if (event.error === "network") {
|
|
268
|
-
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
269
|
-
}
|
|
270
|
-
isStartingRef.current = false;
|
|
271
|
-
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
272
|
-
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
273
|
-
isSimulatingRef.current = true;
|
|
274
|
-
setError(null);
|
|
275
|
-
setIsListening(true);
|
|
276
|
-
simulationTimeoutRef.current = setTimeout(() => {
|
|
277
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
278
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
279
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
280
|
-
isSimulatingRef.current = false;
|
|
281
|
-
setIsListening(false);
|
|
282
|
-
if (onEndRef.current) onEndRef.current();
|
|
283
|
-
simulationTimeoutRef.current = null;
|
|
284
|
-
}, 3e3);
|
|
285
|
-
return;
|
|
286
|
-
}
|
|
287
|
-
console.error("Speech recognition error", event.error);
|
|
288
|
-
setError(event.error);
|
|
289
|
-
setIsListening(false);
|
|
290
|
-
};
|
|
291
|
-
recognitionRef.current = recognition;
|
|
292
|
-
}
|
|
228
|
+
console.log("[useSpeechRecognition] Env - isSecureContext:", window.isSecureContext, "protocol:", (_a = window.location) == null ? void 0 : _a.protocol);
|
|
229
|
+
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
230
|
+
console.log("[useSpeechRecognition] Init check - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
231
|
+
setIsSupported(!!SpeechRecognition);
|
|
293
232
|
}
|
|
294
233
|
return () => {
|
|
295
234
|
console.log("[useSpeechRecognition] Effect cleanup - stopping recognition");
|
|
@@ -298,21 +237,149 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
298
237
|
simulationTimeoutRef.current = null;
|
|
299
238
|
}
|
|
300
239
|
if (recognitionRef.current) {
|
|
301
|
-
|
|
240
|
+
try {
|
|
241
|
+
recognitionRef.current.stop();
|
|
242
|
+
} catch (e) {
|
|
243
|
+
}
|
|
244
|
+
recognitionRef.current = null;
|
|
245
|
+
}
|
|
246
|
+
if (typeof window !== "undefined") {
|
|
247
|
+
const w = window;
|
|
248
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
249
|
+
console.log("[useSpeechRecognition] Cleanup clearing global active instance lock. instanceId:", instanceIdRef.current);
|
|
250
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
251
|
+
}
|
|
302
252
|
}
|
|
303
253
|
};
|
|
304
254
|
}, []);
|
|
305
|
-
(0, import_react2.
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
255
|
+
const createRecognitionInstance = (0, import_react2.useCallback)(() => {
|
|
256
|
+
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
257
|
+
if (!SpeechRecognition) {
|
|
258
|
+
console.error("[useSpeechRecognition] SpeechRecognition not available");
|
|
259
|
+
return null;
|
|
309
260
|
}
|
|
310
|
-
|
|
261
|
+
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
262
|
+
const recognition = new SpeechRecognition();
|
|
263
|
+
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
264
|
+
recognition.continuous = !isMobile;
|
|
265
|
+
recognition.interimResults = true;
|
|
266
|
+
recognition.lang = languageRef.current;
|
|
267
|
+
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
268
|
+
recognition.onaudiostart = () => {
|
|
269
|
+
console.log("[useSpeechRecognition] Native onaudiostart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
270
|
+
};
|
|
271
|
+
recognition.onaudioend = () => {
|
|
272
|
+
console.log("[useSpeechRecognition] Native onaudioend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
273
|
+
};
|
|
274
|
+
recognition.onsoundstart = () => {
|
|
275
|
+
console.log("[useSpeechRecognition] Native onsoundstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
276
|
+
};
|
|
277
|
+
recognition.onsoundend = () => {
|
|
278
|
+
console.log("[useSpeechRecognition] Native onsoundend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
279
|
+
};
|
|
280
|
+
recognition.onspeechstart = () => {
|
|
281
|
+
console.log("[useSpeechRecognition] Native onspeechstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
282
|
+
};
|
|
283
|
+
recognition.onspeechend = () => {
|
|
284
|
+
console.log("[useSpeechRecognition] Native onspeechend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
285
|
+
};
|
|
286
|
+
recognition.onnomatch = () => {
|
|
287
|
+
console.log("[useSpeechRecognition] Native onnomatch. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
288
|
+
};
|
|
289
|
+
recognition.onstart = () => {
|
|
290
|
+
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
291
|
+
isStartingRef.current = false;
|
|
292
|
+
setIsListening(true);
|
|
293
|
+
setError(null);
|
|
294
|
+
if (typeof window !== "undefined") {
|
|
295
|
+
const w = window;
|
|
296
|
+
w.__llmSpeechRecognitionActiveInstanceId = instanceIdRef.current;
|
|
297
|
+
console.log("[useSpeechRecognition] Set global active instance lock. instanceId:", instanceIdRef.current);
|
|
298
|
+
}
|
|
299
|
+
};
|
|
300
|
+
recognition.onend = () => {
|
|
301
|
+
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
302
|
+
isStartingRef.current = false;
|
|
303
|
+
if (isSimulatingRef.current) {
|
|
304
|
+
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
307
|
+
setIsListening(false);
|
|
308
|
+
if (onEndRef.current) onEndRef.current();
|
|
309
|
+
if (typeof window !== "undefined") {
|
|
310
|
+
const w = window;
|
|
311
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
312
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
313
|
+
console.log("[useSpeechRecognition] Cleared global active instance lock. instanceId:", instanceIdRef.current);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
};
|
|
317
|
+
recognition.onresult = (event) => {
|
|
318
|
+
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
319
|
+
let interimTranscript = "";
|
|
320
|
+
let finalTranscript = "";
|
|
321
|
+
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
322
|
+
const result = event.results[i];
|
|
323
|
+
if (result.isFinal) {
|
|
324
|
+
finalTranscript += result[0].transcript;
|
|
325
|
+
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
326
|
+
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
327
|
+
} else {
|
|
328
|
+
interimTranscript += result[0].transcript;
|
|
329
|
+
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
330
|
+
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
setTranscript((prev) => prev + finalTranscript);
|
|
334
|
+
};
|
|
335
|
+
recognition.onerror = (event) => {
|
|
336
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
337
|
+
console.error("[useSpeechRecognition] Error context - lastStartAt:", lastStartAtRef.current, "lastStopAt:", lastStopAtRef.current, "instanceId:", instanceIdRef.current);
|
|
338
|
+
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
339
|
+
if (event.error === "aborted") {
|
|
340
|
+
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
341
|
+
} else if (event.error === "not-allowed") {
|
|
342
|
+
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
343
|
+
} else if (event.error === "no-speech") {
|
|
344
|
+
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
345
|
+
} else if (event.error === "network") {
|
|
346
|
+
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
347
|
+
}
|
|
348
|
+
isStartingRef.current = false;
|
|
349
|
+
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
350
|
+
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
351
|
+
isSimulatingRef.current = true;
|
|
352
|
+
setError(null);
|
|
353
|
+
setIsListening(true);
|
|
354
|
+
simulationTimeoutRef.current = setTimeout(() => {
|
|
355
|
+
const mockText = "This is a simulated voice input for testing.";
|
|
356
|
+
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
357
|
+
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
358
|
+
isSimulatingRef.current = false;
|
|
359
|
+
setIsListening(false);
|
|
360
|
+
if (onEndRef.current) onEndRef.current();
|
|
361
|
+
simulationTimeoutRef.current = null;
|
|
362
|
+
}, 3e3);
|
|
363
|
+
return;
|
|
364
|
+
}
|
|
365
|
+
console.error("Speech recognition error", event.error);
|
|
366
|
+
setError(event.error);
|
|
367
|
+
setIsListening(false);
|
|
368
|
+
if (typeof window !== "undefined") {
|
|
369
|
+
const w = window;
|
|
370
|
+
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
371
|
+
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
372
|
+
console.log("[useSpeechRecognition] Cleared global active instance lock after error. instanceId:", instanceIdRef.current);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
};
|
|
376
|
+
return recognition;
|
|
377
|
+
}, []);
|
|
311
378
|
const start = (0, import_react2.useCallback)(() => {
|
|
312
379
|
var _a;
|
|
313
380
|
const startTimestamp = Date.now();
|
|
314
381
|
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
315
|
-
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "
|
|
382
|
+
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasExistingInstance:", !!recognitionRef.current);
|
|
316
383
|
if (typeof document !== "undefined") {
|
|
317
384
|
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
318
385
|
}
|
|
@@ -320,24 +387,39 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
320
387
|
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
321
388
|
return;
|
|
322
389
|
}
|
|
323
|
-
if (!recognitionRef.current) {
|
|
324
|
-
console.error("[useSpeechRecognition] Recognition instance missing");
|
|
325
|
-
return;
|
|
326
|
-
}
|
|
327
390
|
if (isStartingRef.current) {
|
|
328
391
|
console.warn("[useSpeechRecognition] Already starting - ignoring duplicate call");
|
|
329
392
|
return;
|
|
330
393
|
}
|
|
331
|
-
if (recognitionRef.current.isListening) {
|
|
332
|
-
console.warn("[useSpeechRecognition] Already listening (native prop) - ignoring");
|
|
333
|
-
}
|
|
334
394
|
if (isListening) {
|
|
335
395
|
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
336
396
|
return;
|
|
337
397
|
}
|
|
398
|
+
if (typeof window !== "undefined") {
|
|
399
|
+
const w = window;
|
|
400
|
+
if (w.__llmSpeechRecognitionActiveInstanceId && w.__llmSpeechRecognitionActiveInstanceId !== instanceIdRef.current) {
|
|
401
|
+
console.error("[useSpeechRecognition] Another recognition instance appears active. activeInstanceId:", w.__llmSpeechRecognitionActiveInstanceId, "thisInstanceId:", instanceIdRef.current);
|
|
402
|
+
}
|
|
403
|
+
}
|
|
338
404
|
try {
|
|
405
|
+
if (recognitionRef.current) {
|
|
406
|
+
console.log("[useSpeechRecognition] Stopping existing instance before creating new one");
|
|
407
|
+
try {
|
|
408
|
+
recognitionRef.current.stop();
|
|
409
|
+
} catch (e) {
|
|
410
|
+
}
|
|
411
|
+
recognitionRef.current = null;
|
|
412
|
+
}
|
|
413
|
+
const recognition = createRecognitionInstance();
|
|
414
|
+
if (!recognition) {
|
|
415
|
+
console.error("[useSpeechRecognition] Failed to create recognition instance");
|
|
416
|
+
setError("Speech recognition not available");
|
|
417
|
+
return;
|
|
418
|
+
}
|
|
419
|
+
recognitionRef.current = recognition;
|
|
339
420
|
setTranscript("");
|
|
340
421
|
isStartingRef.current = true;
|
|
422
|
+
lastStartAtRef.current = Date.now();
|
|
341
423
|
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
342
424
|
recognitionRef.current.start();
|
|
343
425
|
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
@@ -347,10 +429,12 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
347
429
|
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
348
430
|
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
349
431
|
}
|
|
432
|
+
setError((error2 == null ? void 0 : error2.message) || "Failed to start speech recognition");
|
|
350
433
|
}
|
|
351
|
-
}, [isListening]);
|
|
434
|
+
}, [isListening, createRecognitionInstance]);
|
|
352
435
|
const stop = (0, import_react2.useCallback)(() => {
|
|
353
436
|
console.log("[useSpeechRecognition] stop() called");
|
|
437
|
+
lastStopAtRef.current = Date.now();
|
|
354
438
|
if (isSimulatingRef.current) {
|
|
355
439
|
if (simulationTimeoutRef.current) {
|
|
356
440
|
clearTimeout(simulationTimeoutRef.current);
|
|
@@ -675,7 +759,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
675
759
|
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
676
760
|
}, []);
|
|
677
761
|
const startRecording = async (trigger) => {
|
|
678
|
-
var _a2;
|
|
762
|
+
var _a2, _b2;
|
|
679
763
|
console.log("[ChatInputArea] startRecording called. trigger:", trigger, "isMobile:", isMobile());
|
|
680
764
|
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
681
765
|
if (voiceTrigger || isTranscribing) {
|
|
@@ -684,8 +768,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
684
768
|
}
|
|
685
769
|
setVoiceTrigger(trigger);
|
|
686
770
|
setVoiceError(null);
|
|
687
|
-
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
688
|
-
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
689
771
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
690
772
|
console.log("[ChatInputArea] Using native speech recognition");
|
|
691
773
|
if (!nativeSpeech.isSupported) {
|
|
@@ -697,8 +779,22 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
697
779
|
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
698
780
|
nativeSpeech.start();
|
|
699
781
|
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
782
|
+
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists (after nativeSpeech.start)...");
|
|
783
|
+
try {
|
|
784
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
785
|
+
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
786
|
+
} catch (e) {
|
|
787
|
+
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
788
|
+
}
|
|
700
789
|
} else {
|
|
701
790
|
console.log("[ChatInputArea] Using custom recorder");
|
|
791
|
+
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists (custom mode)...");
|
|
792
|
+
try {
|
|
793
|
+
(_b2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _b2.call(voiceConfig);
|
|
794
|
+
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
795
|
+
} catch (e) {
|
|
796
|
+
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
797
|
+
}
|
|
702
798
|
await customRecorder.start();
|
|
703
799
|
console.log("[ChatInputArea] Custom recorder started");
|
|
704
800
|
}
|
|
@@ -710,6 +806,10 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
710
806
|
}, 0);
|
|
711
807
|
} else {
|
|
712
808
|
console.log("[ChatInputArea] SKIPPING textarea focus on mobile to prevent keyboard conflict");
|
|
809
|
+
if (document.activeElement instanceof HTMLElement) {
|
|
810
|
+
console.log("[ChatInputArea] Blur active element on mobile");
|
|
811
|
+
document.activeElement.blur();
|
|
812
|
+
}
|
|
713
813
|
}
|
|
714
814
|
};
|
|
715
815
|
const stopRecording = () => {
|
|
@@ -1052,10 +1152,14 @@ var TapToTalk = ({
|
|
|
1052
1152
|
if (onFocusTarget && !isMobile()) {
|
|
1053
1153
|
console.log("[TapToTalk] calling onFocusTarget() (desktop only)");
|
|
1054
1154
|
onFocusTarget();
|
|
1055
|
-
} else if (onFocusTarget) {
|
|
1056
|
-
console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
|
|
1057
1155
|
} else {
|
|
1058
|
-
|
|
1156
|
+
if (onFocusTarget) {
|
|
1157
|
+
console.log("[TapToTalk] SKIPPING onFocusTarget on mobile to prevent keyboard conflict");
|
|
1158
|
+
}
|
|
1159
|
+
if (isMobile() && document.activeElement instanceof HTMLElement) {
|
|
1160
|
+
console.log("[TapToTalk] Blurring active element on mobile");
|
|
1161
|
+
document.activeElement.blur();
|
|
1162
|
+
}
|
|
1059
1163
|
}
|
|
1060
1164
|
setVoiceTrigger("click");
|
|
1061
1165
|
console.log("[TapToTalk] voiceTrigger set to click");
|