@contentgrowth/llm-service 0.8.8 → 0.8.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -171,14 +171,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
171
171
  recognition.interimResults = true;
172
172
  recognition.lang = language;
173
173
  recognition.onstart = () => {
174
- console.log("[useSpeechRecognition] Native onstart event fired");
175
174
  setIsListening(true);
176
175
  setError(null);
177
176
  };
178
177
  recognition.onend = () => {
179
- console.log("[useSpeechRecognition] Native onend event fired");
180
178
  if (isSimulatingRef.current) {
181
- console.log("[useSpeechRecognition] Ignoring onend due to simulation");
182
179
  return;
183
180
  }
184
181
  setIsListening(false);
@@ -233,23 +230,18 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
233
230
  };
234
231
  }, [onResult, onEnd, language]);
235
232
  const start = useCallback(() => {
236
- console.log("[useSpeechRecognition] start() called");
237
233
  if (recognitionRef.current && !isListening) {
238
234
  try {
239
235
  setTranscript("");
240
236
  recognitionRef.current.start();
241
- console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
242
237
  } catch (e) {
243
238
  console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
244
239
  }
245
240
  } else {
246
- console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
247
241
  }
248
242
  }, [isListening]);
249
243
  const stop = useCallback(() => {
250
- console.log("[useSpeechRecognition] stop() called");
251
244
  if (isSimulatingRef.current) {
252
- console.log("[useSpeechRecognition] Stopping simulation");
253
245
  if (simulationTimeoutRef.current) {
254
246
  clearTimeout(simulationTimeoutRef.current);
255
247
  simulationTimeoutRef.current = null;
@@ -263,10 +255,8 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
263
255
  return;
264
256
  }
265
257
  if (recognitionRef.current && isListening) {
266
- console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
267
258
  recognitionRef.current.stop();
268
259
  } else {
269
- console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
270
260
  }
271
261
  }, [isListening, onResult, onEnd]);
272
262
  const resetTranscript = useCallback(() => {
@@ -292,32 +282,25 @@ var useAudioRecorder = (onStop) => {
292
282
  const mediaRecorderRef = useRef2(null);
293
283
  const chunksRef = useRef2([]);
294
284
  const start = useCallback2(async () => {
295
- console.log("[useAudioRecorder] start() called");
296
285
  try {
297
286
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
298
- console.log("[useAudioRecorder] Stream acquired", stream.id);
299
287
  const mediaRecorder = new MediaRecorder(stream);
300
288
  mediaRecorderRef.current = mediaRecorder;
301
289
  chunksRef.current = [];
302
290
  mediaRecorder.ondataavailable = (e) => {
303
- console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
304
291
  if (e.data.size > 0) {
305
292
  chunksRef.current.push(e.data);
306
293
  }
307
294
  };
308
295
  mediaRecorder.onstop = () => {
309
- console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
310
296
  const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
311
- console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
312
297
  setBlob(audioBlob);
313
298
  setIsRecording(false);
314
299
  if (onStop) onStop(audioBlob);
315
300
  stream.getTracks().forEach((track) => {
316
- console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
317
301
  track.stop();
318
302
  });
319
303
  };
320
- console.log("[useAudioRecorder] Starting MediaRecorder...");
321
304
  mediaRecorder.start();
322
305
  setIsRecording(true);
323
306
  setError(null);
@@ -327,12 +310,8 @@ var useAudioRecorder = (onStop) => {
327
310
  }
328
311
  }, [onStop]);
329
312
  const stop = useCallback2(() => {
330
- console.log("[useAudioRecorder] stop() called");
331
313
  if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
332
- console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
333
314
  mediaRecorderRef.current.stop();
334
- } else {
335
- console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
336
315
  }
337
316
  }, []);
338
317
  return {
@@ -412,7 +391,6 @@ var ChatInputArea = forwardRef(({
412
391
  voiceConfigRef.current = voiceConfig;
413
392
  }, [voiceConfig]);
414
393
  const triggerChange = useCallback3((newValue) => {
415
- console.log("[ChatInputArea] triggerChange called:", { newValue, isControlled, hasOnChange: !!onChangeRef.current, hasTextarea: !!textareaRef.current });
416
394
  if (isControlled && onChangeRef.current) {
417
395
  const syntheticEvent = {
418
396
  target: { value: newValue },
@@ -447,20 +425,19 @@ var ChatInputArea = forwardRef(({
447
425
  }
448
426
  }
449
427
  };
450
- const handleVoiceResult = useCallback3((text) => {
451
- console.log("[ChatInputArea] nativeSpeech result:", text);
452
- triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
428
+ const handleVoiceResult = useCallback3((text, isFinal) => {
429
+ if (isFinal) {
430
+ triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
431
+ }
453
432
  }, []);
454
433
  const handleVoiceEnd = useCallback3(() => {
455
434
  var _a2, _b2;
456
- console.log("[ChatInputArea] nativeSpeech onEnd triggered");
457
435
  setVoiceTrigger(null);
458
436
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
459
437
  }, []);
460
438
  const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
461
439
  const customRecorder = useAudioRecorder(async (blob) => {
462
440
  var _a2, _b2, _c2;
463
- console.log("[ChatInputArea] customRecorder onStop triggered");
464
441
  setVoiceTrigger(null);
465
442
  (_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
466
443
  if ((_c2 = voiceConfigRef.current) == null ? void 0 : _c2.onAudioCapture) {
@@ -510,8 +487,6 @@ var ChatInputArea = forwardRef(({
510
487
  };
511
488
  const startRecording = async (trigger) => {
512
489
  var _a2;
513
- console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
514
- console.log("[ChatInputArea] voiceConfig:", voiceConfig);
515
490
  if (voiceTrigger) return;
516
491
  setVoiceTrigger(trigger);
517
492
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
@@ -521,15 +496,12 @@ var ChatInputArea = forwardRef(({
521
496
  setVoiceTrigger(null);
522
497
  return;
523
498
  }
524
- console.log("[ChatInputArea] Starting nativeSpeech");
525
499
  nativeSpeech.start();
526
500
  } else {
527
- console.log("[ChatInputArea] Starting customRecorder");
528
501
  await customRecorder.start();
529
502
  }
530
503
  };
531
504
  const stopRecording = () => {
532
- console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
533
505
  if (!voiceTrigger) return;
534
506
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
535
507
  nativeSpeech.stop();