@contentgrowth/llm-service 0.8.7 → 0.8.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -171,14 +171,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
171
171
  recognition.interimResults = true;
172
172
  recognition.lang = language;
173
173
  recognition.onstart = () => {
174
- console.log("[useSpeechRecognition] Native onstart event fired");
175
174
  setIsListening(true);
176
175
  setError(null);
177
176
  };
178
177
  recognition.onend = () => {
179
- console.log("[useSpeechRecognition] Native onend event fired");
180
178
  if (isSimulatingRef.current) {
181
- console.log("[useSpeechRecognition] Ignoring onend due to simulation");
182
179
  return;
183
180
  }
184
181
  setIsListening(false);
@@ -233,23 +230,18 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
233
230
  };
234
231
  }, [onResult, onEnd, language]);
235
232
  const start = useCallback(() => {
236
- console.log("[useSpeechRecognition] start() called");
237
233
  if (recognitionRef.current && !isListening) {
238
234
  try {
239
235
  setTranscript("");
240
236
  recognitionRef.current.start();
241
- console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
242
237
  } catch (e) {
243
238
  console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
244
239
  }
245
240
  } else {
246
- console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
247
241
  }
248
242
  }, [isListening]);
249
243
  const stop = useCallback(() => {
250
- console.log("[useSpeechRecognition] stop() called");
251
244
  if (isSimulatingRef.current) {
252
- console.log("[useSpeechRecognition] Stopping simulation");
253
245
  if (simulationTimeoutRef.current) {
254
246
  clearTimeout(simulationTimeoutRef.current);
255
247
  simulationTimeoutRef.current = null;
@@ -263,10 +255,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
263
255
  return;
264
256
  }
265
257
  if (recognitionRef.current && isListening) {
266
- console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
267
258
  recognitionRef.current.stop();
268
259
  } else {
269
- console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
270
260
  }
271
261
  }, [isListening, onResult, onEnd]);
272
262
  const resetTranscript = useCallback(() => {
@@ -292,32 +282,25 @@ var useAudioRecorder = (onStop) => {
292
282
  const mediaRecorderRef = useRef2(null);
293
283
  const chunksRef = useRef2([]);
294
284
  const start = useCallback2(async () => {
295
- console.log("[useAudioRecorder] start() called");
296
285
  try {
297
286
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
298
- console.log("[useAudioRecorder] Stream acquired", stream.id);
299
287
  const mediaRecorder = new MediaRecorder(stream);
300
288
  mediaRecorderRef.current = mediaRecorder;
301
289
  chunksRef.current = [];
302
290
  mediaRecorder.ondataavailable = (e) => {
303
- console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
304
291
  if (e.data.size > 0) {
305
292
  chunksRef.current.push(e.data);
306
293
  }
307
294
  };
308
295
  mediaRecorder.onstop = () => {
309
- console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
310
296
  const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
311
- console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
312
297
  setBlob(audioBlob);
313
298
  setIsRecording(false);
314
299
  if (onStop) onStop(audioBlob);
315
300
  stream.getTracks().forEach((track) => {
316
- console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
317
301
  track.stop();
318
302
  });
319
303
  };
320
- console.log("[useAudioRecorder] Starting MediaRecorder...");
321
304
  mediaRecorder.start();
322
305
  setIsRecording(true);
323
306
  setError(null);
@@ -327,12 +310,8 @@ var useAudioRecorder = (onStop) => {
327
310
  }
328
311
  }, [onStop]);
329
312
  const stop = useCallback2(() => {
330
- console.log("[useAudioRecorder] stop() called");
331
313
  if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
332
- console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
333
314
  mediaRecorderRef.current.stop();
334
- } else {
335
- console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
336
315
  }
337
316
  }, []);
338
317
  return {
@@ -412,7 +391,7 @@ var ChatInputArea = forwardRef(({
412
391
  voiceConfigRef.current = voiceConfig;
413
392
  }, [voiceConfig]);
414
393
  const triggerChange = useCallback3((newValue) => {
415
- if (isControlled && onChangeRef.current && textareaRef.current) {
394
+ if (isControlled && onChangeRef.current) {
416
395
  const syntheticEvent = {
417
396
  target: { value: newValue },
418
397
  currentTarget: { value: newValue }
@@ -446,20 +425,19 @@ var ChatInputArea = forwardRef(({
446
425
  }
447
426
  }
448
427
  };
449
- const handleVoiceResult = useCallback3((text) => {
450
- console.log("[ChatInputArea] nativeSpeech result:", text);
451
- triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
428
+ const handleVoiceResult = useCallback3((text, isFinal) => {
429
+ if (isFinal) {
430
+ triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
431
+ }
452
432
  }, []);
453
433
  const handleVoiceEnd = useCallback3(() => {
454
434
  var _a2, _b2;
455
- console.log("[ChatInputArea] nativeSpeech onEnd triggered");
456
435
  setVoiceTrigger(null);
457
436
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
458
437
  }, []);
459
438
  const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
460
439
  const customRecorder = useAudioRecorder(async (blob) => {
461
440
  var _a2, _b2, _c2;
462
- console.log("[ChatInputArea] customRecorder onStop triggered");
463
441
  setVoiceTrigger(null);
464
442
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
465
443
  if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
@@ -509,8 +487,6 @@ var ChatInputArea = forwardRef(({
509
487
  };
510
488
  const startRecording = async (trigger) => {
511
489
  var _a2;
512
- console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
513
- console.log("[ChatInputArea] voiceConfig:", voiceConfig);
514
490
  if (voiceTrigger) return;
515
491
  setVoiceTrigger(trigger);
516
492
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
@@ -520,15 +496,12 @@ var ChatInputArea = forwardRef(({
520
496
  setVoiceTrigger(null);
521
497
  return;
522
498
  }
523
- console.log("[ChatInputArea] Starting nativeSpeech");
524
499
  nativeSpeech.start();
525
500
  } else {
526
- console.log("[ChatInputArea] Starting customRecorder");
527
501
  await customRecorder.start();
528
502
  }
529
503
  };
530
504
  const stopRecording = () => {
531
- console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
532
505
  if (!voiceTrigger) return;
533
506
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
534
507
  nativeSpeech.stop();