@contentgrowth/llm-service 0.8.8 → 0.8.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -212,14 +212,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
212
212
  recognition.interimResults = true;
213
213
  recognition.lang = language;
214
214
  recognition.onstart = () => {
215
- console.log("[useSpeechRecognition] Native onstart event fired");
216
215
  setIsListening(true);
217
216
  setError(null);
218
217
  };
219
218
  recognition.onend = () => {
220
- console.log("[useSpeechRecognition] Native onend event fired");
221
219
  if (isSimulatingRef.current) {
222
- console.log("[useSpeechRecognition] Ignoring onend due to simulation");
223
220
  return;
224
221
  }
225
222
  setIsListening(false);
@@ -274,23 +271,18 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
274
271
  };
275
272
  }, [onResult, onEnd, language]);
276
273
  const start = (0, import_react2.useCallback)(() => {
277
- console.log("[useSpeechRecognition] start() called");
278
274
  if (recognitionRef.current && !isListening) {
279
275
  try {
280
276
  setTranscript("");
281
277
  recognitionRef.current.start();
282
- console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
283
278
  } catch (e) {
284
279
  console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
285
280
  }
286
281
  } else {
287
- console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
288
282
  }
289
283
  }, [isListening]);
290
284
  const stop = (0, import_react2.useCallback)(() => {
291
- console.log("[useSpeechRecognition] stop() called");
292
285
  if (isSimulatingRef.current) {
293
- console.log("[useSpeechRecognition] Stopping simulation");
294
286
  if (simulationTimeoutRef.current) {
295
287
  clearTimeout(simulationTimeoutRef.current);
296
288
  simulationTimeoutRef.current = null;
@@ -304,10 +296,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
304
296
  return;
305
297
  }
306
298
  if (recognitionRef.current && isListening) {
307
- console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
308
299
  recognitionRef.current.stop();
309
300
  } else {
310
- console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
311
301
  }
312
302
  }, [isListening, onResult, onEnd]);
313
303
  const resetTranscript = (0, import_react2.useCallback)(() => {
@@ -333,32 +323,25 @@ var useAudioRecorder = (onStop) => {
333
323
  const mediaRecorderRef = (0, import_react3.useRef)(null);
334
324
  const chunksRef = (0, import_react3.useRef)([]);
335
325
  const start = (0, import_react3.useCallback)(async () => {
336
- console.log("[useAudioRecorder] start() called");
337
326
  try {
338
327
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
339
- console.log("[useAudioRecorder] Stream acquired", stream.id);
340
328
  const mediaRecorder = new MediaRecorder(stream);
341
329
  mediaRecorderRef.current = mediaRecorder;
342
330
  chunksRef.current = [];
343
331
  mediaRecorder.ondataavailable = (e) => {
344
- console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
345
332
  if (e.data.size > 0) {
346
333
  chunksRef.current.push(e.data);
347
334
  }
348
335
  };
349
336
  mediaRecorder.onstop = () => {
350
- console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
351
337
  const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
352
- console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
353
338
  setBlob(audioBlob);
354
339
  setIsRecording(false);
355
340
  if (onStop) onStop(audioBlob);
356
341
  stream.getTracks().forEach((track) => {
357
- console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
358
342
  track.stop();
359
343
  });
360
344
  };
361
- console.log("[useAudioRecorder] Starting MediaRecorder...");
362
345
  mediaRecorder.start();
363
346
  setIsRecording(true);
364
347
  setError(null);
@@ -368,12 +351,8 @@ var useAudioRecorder = (onStop) => {
368
351
  }
369
352
  }, [onStop]);
370
353
  const stop = (0, import_react3.useCallback)(() => {
371
- console.log("[useAudioRecorder] stop() called");
372
354
  if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
373
- console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
374
355
  mediaRecorderRef.current.stop();
375
- } else {
376
- console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
377
356
  }
378
357
  }, []);
379
358
  return {
@@ -453,7 +432,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
453
432
  voiceConfigRef.current = voiceConfig;
454
433
  }, [voiceConfig]);
455
434
  const triggerChange = (0, import_react5.useCallback)((newValue) => {
456
- console.log("[ChatInputArea] triggerChange called:", { newValue, isControlled, hasOnChange: !!onChangeRef.current, hasTextarea: !!textareaRef.current });
457
435
  if (isControlled && onChangeRef.current) {
458
436
  const syntheticEvent = {
459
437
  target: { value: newValue },
@@ -488,20 +466,19 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
488
466
  }
489
467
  }
490
468
  };
491
- const handleVoiceResult = (0, import_react5.useCallback)((text) => {
492
- console.log("[ChatInputArea] nativeSpeech result:", text);
493
- triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
469
+ const handleVoiceResult = (0, import_react5.useCallback)((text, isFinal) => {
470
+ if (isFinal) {
471
+ triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
472
+ }
494
473
  }, []);
495
474
  const handleVoiceEnd = (0, import_react5.useCallback)(() => {
496
475
  var _a2, _b2;
497
- console.log("[ChatInputArea] nativeSpeech onEnd triggered");
498
476
  setVoiceTrigger(null);
499
477
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
500
478
  }, []);
501
479
  const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
502
480
  const customRecorder = useAudioRecorder(async (blob) => {
503
481
  var _a2, _b2, _c2;
504
- console.log("[ChatInputArea] customRecorder onStop triggered");
505
482
  setVoiceTrigger(null);
506
483
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
507
484
  if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
@@ -551,8 +528,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
551
528
  };
552
529
  const startRecording = async (trigger) => {
553
530
  var _a2;
554
- console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
555
- console.log("[ChatInputArea] voiceConfig:", voiceConfig);
556
531
  if (voiceTrigger) return;
557
532
  setVoiceTrigger(trigger);
558
533
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
@@ -562,15 +537,12 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
562
537
  setVoiceTrigger(null);
563
538
  return;
564
539
  }
565
- console.log("[ChatInputArea] Starting nativeSpeech");
566
540
  nativeSpeech.start();
567
541
  } else {
568
- console.log("[ChatInputArea] Starting customRecorder");
569
542
  await customRecorder.start();
570
543
  }
571
544
  };
572
545
  const stopRecording = () => {
573
- console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
574
546
  if (!voiceTrigger) return;
575
547
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
576
548
  nativeSpeech.stop();