@contentgrowth/llm-service 0.8.5 → 0.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -212,11 +212,16 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
212
212
|
recognition.interimResults = true;
|
|
213
213
|
recognition.lang = language;
|
|
214
214
|
recognition.onstart = () => {
|
|
215
|
+
console.log("[useSpeechRecognition] Native onstart event fired");
|
|
215
216
|
setIsListening(true);
|
|
216
217
|
setError(null);
|
|
217
218
|
};
|
|
218
219
|
recognition.onend = () => {
|
|
219
|
-
|
|
220
|
+
console.log("[useSpeechRecognition] Native onend event fired");
|
|
221
|
+
if (isSimulatingRef.current) {
|
|
222
|
+
console.log("[useSpeechRecognition] Ignoring onend due to simulation");
|
|
223
|
+
return;
|
|
224
|
+
}
|
|
220
225
|
setIsListening(false);
|
|
221
226
|
if (onEnd) onEnd();
|
|
222
227
|
};
|
|
@@ -236,6 +241,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
236
241
|
setTranscript((prev) => prev + finalTranscript);
|
|
237
242
|
};
|
|
238
243
|
recognition.onerror = (event) => {
|
|
244
|
+
console.error("[useSpeechRecognition] Native onerror event:", event.error);
|
|
239
245
|
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
240
246
|
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
241
247
|
isSimulatingRef.current = true;
|
|
@@ -268,17 +274,23 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
268
274
|
};
|
|
269
275
|
}, [onResult, onEnd, language]);
|
|
270
276
|
const start = (0, import_react2.useCallback)(() => {
|
|
277
|
+
console.log("[useSpeechRecognition] start() called");
|
|
271
278
|
if (recognitionRef.current && !isListening) {
|
|
272
279
|
try {
|
|
273
280
|
setTranscript("");
|
|
274
281
|
recognitionRef.current.start();
|
|
282
|
+
console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
|
|
275
283
|
} catch (e) {
|
|
276
|
-
console.error("Failed to start speech recognition:", e);
|
|
284
|
+
console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
|
|
277
285
|
}
|
|
286
|
+
} else {
|
|
287
|
+
console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
|
|
278
288
|
}
|
|
279
289
|
}, [isListening]);
|
|
280
290
|
const stop = (0, import_react2.useCallback)(() => {
|
|
291
|
+
console.log("[useSpeechRecognition] stop() called");
|
|
281
292
|
if (isSimulatingRef.current) {
|
|
293
|
+
console.log("[useSpeechRecognition] Stopping simulation");
|
|
282
294
|
if (simulationTimeoutRef.current) {
|
|
283
295
|
clearTimeout(simulationTimeoutRef.current);
|
|
284
296
|
simulationTimeoutRef.current = null;
|
|
@@ -292,7 +304,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
|
292
304
|
return;
|
|
293
305
|
}
|
|
294
306
|
if (recognitionRef.current && isListening) {
|
|
307
|
+
console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
|
|
295
308
|
recognitionRef.current.stop();
|
|
309
|
+
} else {
|
|
310
|
+
console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
|
|
296
311
|
}
|
|
297
312
|
}, [isListening, onResult, onEnd]);
|
|
298
313
|
const resetTranscript = (0, import_react2.useCallback)(() => {
|
|
@@ -318,23 +333,32 @@ var useAudioRecorder = (onStop) => {
|
|
|
318
333
|
const mediaRecorderRef = (0, import_react3.useRef)(null);
|
|
319
334
|
const chunksRef = (0, import_react3.useRef)([]);
|
|
320
335
|
const start = (0, import_react3.useCallback)(async () => {
|
|
336
|
+
console.log("[useAudioRecorder] start() called");
|
|
321
337
|
try {
|
|
322
338
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
339
|
+
console.log("[useAudioRecorder] Stream acquired", stream.id);
|
|
323
340
|
const mediaRecorder = new MediaRecorder(stream);
|
|
324
341
|
mediaRecorderRef.current = mediaRecorder;
|
|
325
342
|
chunksRef.current = [];
|
|
326
343
|
mediaRecorder.ondataavailable = (e) => {
|
|
344
|
+
console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
|
|
327
345
|
if (e.data.size > 0) {
|
|
328
346
|
chunksRef.current.push(e.data);
|
|
329
347
|
}
|
|
330
348
|
};
|
|
331
349
|
mediaRecorder.onstop = () => {
|
|
350
|
+
console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
|
|
332
351
|
const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
|
|
352
|
+
console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
|
|
333
353
|
setBlob(audioBlob);
|
|
334
354
|
setIsRecording(false);
|
|
335
355
|
if (onStop) onStop(audioBlob);
|
|
336
|
-
stream.getTracks().forEach((track) =>
|
|
356
|
+
stream.getTracks().forEach((track) => {
|
|
357
|
+
console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
|
|
358
|
+
track.stop();
|
|
359
|
+
});
|
|
337
360
|
};
|
|
361
|
+
console.log("[useAudioRecorder] Starting MediaRecorder...");
|
|
338
362
|
mediaRecorder.start();
|
|
339
363
|
setIsRecording(true);
|
|
340
364
|
setError(null);
|
|
@@ -344,8 +368,12 @@ var useAudioRecorder = (onStop) => {
|
|
|
344
368
|
}
|
|
345
369
|
}, [onStop]);
|
|
346
370
|
const stop = (0, import_react3.useCallback)(() => {
|
|
371
|
+
console.log("[useAudioRecorder] stop() called");
|
|
347
372
|
if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
|
|
373
|
+
console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
|
|
348
374
|
mediaRecorderRef.current.stop();
|
|
375
|
+
} else {
|
|
376
|
+
console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
|
|
349
377
|
}
|
|
350
378
|
}, []);
|
|
351
379
|
return {
|
|
@@ -411,6 +439,8 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
411
439
|
}, [inputMode]);
|
|
412
440
|
const isControlled = value !== void 0;
|
|
413
441
|
const message = isControlled ? value : internalMessage;
|
|
442
|
+
const messageRef = (0, import_react5.useRef)(message);
|
|
443
|
+
messageRef.current = message;
|
|
414
444
|
const { voice: globalVoice } = useChatConfig();
|
|
415
445
|
const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
|
|
416
446
|
const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
|
|
@@ -449,23 +479,28 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
449
479
|
}
|
|
450
480
|
}
|
|
451
481
|
};
|
|
452
|
-
const
|
|
453
|
-
|
|
454
|
-
|
|
482
|
+
const handleVoiceResult = (0, import_react5.useCallback)((text) => {
|
|
483
|
+
console.log("[ChatInputArea] nativeSpeech result:", text);
|
|
484
|
+
triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
485
|
+
}, []);
|
|
486
|
+
const handleVoiceEnd = (0, import_react5.useCallback)(() => {
|
|
455
487
|
var _a2;
|
|
488
|
+
console.log("[ChatInputArea] nativeSpeech onEnd triggered");
|
|
456
489
|
setVoiceTrigger(null);
|
|
457
490
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
458
|
-
}, voiceConfig
|
|
491
|
+
}, [voiceConfig]);
|
|
492
|
+
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
459
493
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
460
494
|
var _a2;
|
|
495
|
+
console.log("[ChatInputArea] customRecorder onStop triggered");
|
|
461
496
|
setVoiceTrigger(null);
|
|
462
497
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
463
498
|
if (voiceConfig == null ? void 0 : voiceConfig.onAudioCapture) {
|
|
464
499
|
try {
|
|
465
500
|
const text = await voiceConfig.onAudioCapture(blob);
|
|
466
|
-
if (text) triggerChange(
|
|
501
|
+
if (text) triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
|
|
467
502
|
} catch (e) {
|
|
468
|
-
console.error("Audio capture failed", e);
|
|
503
|
+
console.error("[ChatInputArea] Audio capture failed", e);
|
|
469
504
|
}
|
|
470
505
|
}
|
|
471
506
|
});
|
|
@@ -507,6 +542,8 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
507
542
|
};
|
|
508
543
|
const startRecording = async (trigger) => {
|
|
509
544
|
var _a2;
|
|
545
|
+
console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
|
|
546
|
+
console.log("[ChatInputArea] voiceConfig:", voiceConfig);
|
|
510
547
|
if (voiceTrigger) return;
|
|
511
548
|
setVoiceTrigger(trigger);
|
|
512
549
|
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
@@ -516,12 +553,15 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
|
|
|
516
553
|
setVoiceTrigger(null);
|
|
517
554
|
return;
|
|
518
555
|
}
|
|
556
|
+
console.log("[ChatInputArea] Starting nativeSpeech");
|
|
519
557
|
nativeSpeech.start();
|
|
520
558
|
} else {
|
|
559
|
+
console.log("[ChatInputArea] Starting customRecorder");
|
|
521
560
|
await customRecorder.start();
|
|
522
561
|
}
|
|
523
562
|
};
|
|
524
563
|
const stopRecording = () => {
|
|
564
|
+
console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
|
|
525
565
|
if (!voiceTrigger) return;
|
|
526
566
|
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
|
|
527
567
|
nativeSpeech.stop();
|