@contentgrowth/llm-service 0.8.7 → 0.8.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -212,14 +212,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
212
212
  recognition.interimResults = true;
213
213
  recognition.lang = language;
214
214
  recognition.onstart = () => {
215
- console.log("[useSpeechRecognition] Native onstart event fired");
216
215
  setIsListening(true);
217
216
  setError(null);
218
217
  };
219
218
  recognition.onend = () => {
220
- console.log("[useSpeechRecognition] Native onend event fired");
221
219
  if (isSimulatingRef.current) {
222
- console.log("[useSpeechRecognition] Ignoring onend due to simulation");
223
220
  return;
224
221
  }
225
222
  setIsListening(false);
@@ -274,23 +271,18 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
274
271
  };
275
272
  }, [onResult, onEnd, language]);
276
273
  const start = (0, import_react2.useCallback)(() => {
277
- console.log("[useSpeechRecognition] start() called");
278
274
  if (recognitionRef.current && !isListening) {
279
275
  try {
280
276
  setTranscript("");
281
277
  recognitionRef.current.start();
282
- console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
283
278
  } catch (e) {
284
279
  console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
285
280
  }
286
281
  } else {
287
- console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
288
282
  }
289
283
  }, [isListening]);
290
284
  const stop = (0, import_react2.useCallback)(() => {
291
- console.log("[useSpeechRecognition] stop() called");
292
285
  if (isSimulatingRef.current) {
293
- console.log("[useSpeechRecognition] Stopping simulation");
294
286
  if (simulationTimeoutRef.current) {
295
287
  clearTimeout(simulationTimeoutRef.current);
296
288
  simulationTimeoutRef.current = null;
@@ -304,10 +296,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
304
296
  return;
305
297
  }
306
298
  if (recognitionRef.current && isListening) {
307
- console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
308
299
  recognitionRef.current.stop();
309
300
  } else {
310
- console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
311
301
  }
312
302
  }, [isListening, onResult, onEnd]);
313
303
  const resetTranscript = (0, import_react2.useCallback)(() => {
@@ -333,32 +323,25 @@ var useAudioRecorder = (onStop) => {
333
323
  const mediaRecorderRef = (0, import_react3.useRef)(null);
334
324
  const chunksRef = (0, import_react3.useRef)([]);
335
325
  const start = (0, import_react3.useCallback)(async () => {
336
- console.log("[useAudioRecorder] start() called");
337
326
  try {
338
327
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
339
- console.log("[useAudioRecorder] Stream acquired", stream.id);
340
328
  const mediaRecorder = new MediaRecorder(stream);
341
329
  mediaRecorderRef.current = mediaRecorder;
342
330
  chunksRef.current = [];
343
331
  mediaRecorder.ondataavailable = (e) => {
344
- console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
345
332
  if (e.data.size > 0) {
346
333
  chunksRef.current.push(e.data);
347
334
  }
348
335
  };
349
336
  mediaRecorder.onstop = () => {
350
- console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
351
337
  const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
352
- console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
353
338
  setBlob(audioBlob);
354
339
  setIsRecording(false);
355
340
  if (onStop) onStop(audioBlob);
356
341
  stream.getTracks().forEach((track) => {
357
- console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
358
342
  track.stop();
359
343
  });
360
344
  };
361
- console.log("[useAudioRecorder] Starting MediaRecorder...");
362
345
  mediaRecorder.start();
363
346
  setIsRecording(true);
364
347
  setError(null);
@@ -368,12 +351,8 @@ var useAudioRecorder = (onStop) => {
368
351
  }
369
352
  }, [onStop]);
370
353
  const stop = (0, import_react3.useCallback)(() => {
371
- console.log("[useAudioRecorder] stop() called");
372
354
  if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
373
- console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
374
355
  mediaRecorderRef.current.stop();
375
- } else {
376
- console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
377
356
  }
378
357
  }, []);
379
358
  return {
@@ -453,7 +432,7 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
453
432
  voiceConfigRef.current = voiceConfig;
454
433
  }, [voiceConfig]);
455
434
  const triggerChange = (0, import_react5.useCallback)((newValue) => {
456
- if (isControlled && onChangeRef.current && textareaRef.current) {
435
+ if (isControlled && onChangeRef.current) {
457
436
  const syntheticEvent = {
458
437
  target: { value: newValue },
459
438
  currentTarget: { value: newValue }
@@ -487,20 +466,19 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
487
466
  }
488
467
  }
489
468
  };
490
- const handleVoiceResult = (0, import_react5.useCallback)((text) => {
491
- console.log("[ChatInputArea] nativeSpeech result:", text);
492
- triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
469
+ const handleVoiceResult = (0, import_react5.useCallback)((text, isFinal) => {
470
+ if (isFinal) {
471
+ triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
472
+ }
493
473
  }, []);
494
474
  const handleVoiceEnd = (0, import_react5.useCallback)(() => {
495
475
  var _a2, _b2;
496
- console.log("[ChatInputArea] nativeSpeech onEnd triggered");
497
476
  setVoiceTrigger(null);
498
477
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
499
478
  }, []);
500
479
  const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
501
480
  const customRecorder = useAudioRecorder(async (blob) => {
502
481
  var _a2, _b2, _c2;
503
- console.log("[ChatInputArea] customRecorder onStop triggered");
504
482
  setVoiceTrigger(null);
505
483
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
506
484
  if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
@@ -550,8 +528,6 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
550
528
  };
551
529
  const startRecording = async (trigger) => {
552
530
  var _a2;
553
- console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
554
- console.log("[ChatInputArea] voiceConfig:", voiceConfig);
555
531
  if (voiceTrigger) return;
556
532
  setVoiceTrigger(trigger);
557
533
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
@@ -561,15 +537,12 @@ var ChatInputArea = (0, import_react5.forwardRef)(({
561
537
  setVoiceTrigger(null);
562
538
  return;
563
539
  }
564
- console.log("[ChatInputArea] Starting nativeSpeech");
565
540
  nativeSpeech.start();
566
541
  } else {
567
- console.log("[ChatInputArea] Starting customRecorder");
568
542
  await customRecorder.start();
569
543
  }
570
544
  };
571
545
  const stopRecording = () => {
572
- console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
573
546
  if (!voiceTrigger) return;
574
547
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
575
548
  nativeSpeech.stop();