@contentgrowth/llm-service 0.8.5 → 0.8.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -148,7 +148,7 @@ function ChatHeader({
148
148
  }
149
149
 
150
150
  // src/ui/react/components/ChatInputArea.tsx
151
- import { useState as useState3, useRef as useRef3, useImperativeHandle, forwardRef, useEffect as useEffect3 } from "react";
151
+ import { useState as useState3, useRef as useRef3, useImperativeHandle, forwardRef, useEffect as useEffect3, useCallback as useCallback3 } from "react";
152
152
  import { StopIcon, PaperAirplaneIcon } from "@heroicons/react/24/outline";
153
153
 
154
154
  // src/ui/react/hooks/useSpeechRecognition.ts
@@ -171,11 +171,16 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
171
171
  recognition.interimResults = true;
172
172
  recognition.lang = language;
173
173
  recognition.onstart = () => {
174
+ console.log("[useSpeechRecognition] Native onstart event fired");
174
175
  setIsListening(true);
175
176
  setError(null);
176
177
  };
177
178
  recognition.onend = () => {
178
- if (isSimulatingRef.current) return;
179
+ console.log("[useSpeechRecognition] Native onend event fired");
180
+ if (isSimulatingRef.current) {
181
+ console.log("[useSpeechRecognition] Ignoring onend due to simulation");
182
+ return;
183
+ }
179
184
  setIsListening(false);
180
185
  if (onEnd) onEnd();
181
186
  };
@@ -195,6 +200,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
195
200
  setTranscript((prev) => prev + finalTranscript);
196
201
  };
197
202
  recognition.onerror = (event) => {
203
+ console.error("[useSpeechRecognition] Native onerror event:", event.error);
198
204
  if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
199
205
  console.warn("Speech recognition blocked. Simulating input for development...");
200
206
  isSimulatingRef.current = true;
@@ -227,17 +233,23 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
227
233
  };
228
234
  }, [onResult, onEnd, language]);
229
235
  const start = useCallback(() => {
236
+ console.log("[useSpeechRecognition] start() called");
230
237
  if (recognitionRef.current && !isListening) {
231
238
  try {
232
239
  setTranscript("");
233
240
  recognitionRef.current.start();
241
+ console.log("[useSpeechRecognition] recognitionRef.current.start() executed");
234
242
  } catch (e) {
235
- console.error("Failed to start speech recognition:", e);
243
+ console.error("[useSpeechRecognition] Failed to start speech recognition:", e);
236
244
  }
245
+ } else {
246
+ console.log("[useSpeechRecognition] start() ignored: already listening or no recognition instance", { hasInstance: !!recognitionRef.current, isListening });
237
247
  }
238
248
  }, [isListening]);
239
249
  const stop = useCallback(() => {
250
+ console.log("[useSpeechRecognition] stop() called");
240
251
  if (isSimulatingRef.current) {
252
+ console.log("[useSpeechRecognition] Stopping simulation");
241
253
  if (simulationTimeoutRef.current) {
242
254
  clearTimeout(simulationTimeoutRef.current);
243
255
  simulationTimeoutRef.current = null;
@@ -251,7 +263,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
251
263
  return;
252
264
  }
253
265
  if (recognitionRef.current && isListening) {
266
+ console.log("[useSpeechRecognition] recognitionRef.current.stop() executed");
254
267
  recognitionRef.current.stop();
268
+ } else {
269
+ console.log("[useSpeechRecognition] stop() ignored: not listening", { isListening });
255
270
  }
256
271
  }, [isListening, onResult, onEnd]);
257
272
  const resetTranscript = useCallback(() => {
@@ -277,23 +292,32 @@ var useAudioRecorder = (onStop) => {
277
292
  const mediaRecorderRef = useRef2(null);
278
293
  const chunksRef = useRef2([]);
279
294
  const start = useCallback2(async () => {
295
+ console.log("[useAudioRecorder] start() called");
280
296
  try {
281
297
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
298
+ console.log("[useAudioRecorder] Stream acquired", stream.id);
282
299
  const mediaRecorder = new MediaRecorder(stream);
283
300
  mediaRecorderRef.current = mediaRecorder;
284
301
  chunksRef.current = [];
285
302
  mediaRecorder.ondataavailable = (e) => {
303
+ console.log(`[useAudioRecorder] Data available, size: ${e.data.size}`);
286
304
  if (e.data.size > 0) {
287
305
  chunksRef.current.push(e.data);
288
306
  }
289
307
  };
290
308
  mediaRecorder.onstop = () => {
309
+ console.log(`[useAudioRecorder] Recorder stopped. Chunks: ${chunksRef.current.length}`);
291
310
  const audioBlob = new Blob(chunksRef.current, { type: "audio/webm" });
311
+ console.log(`[useAudioRecorder] Blob created. Size: ${audioBlob.size}, Type: ${audioBlob.type}`);
292
312
  setBlob(audioBlob);
293
313
  setIsRecording(false);
294
314
  if (onStop) onStop(audioBlob);
295
- stream.getTracks().forEach((track) => track.stop());
315
+ stream.getTracks().forEach((track) => {
316
+ console.log(`[useAudioRecorder] Stopping track: ${track.label} (${track.kind})`);
317
+ track.stop();
318
+ });
296
319
  };
320
+ console.log("[useAudioRecorder] Starting MediaRecorder...");
297
321
  mediaRecorder.start();
298
322
  setIsRecording(true);
299
323
  setError(null);
@@ -303,8 +327,12 @@ var useAudioRecorder = (onStop) => {
303
327
  }
304
328
  }, [onStop]);
305
329
  const stop = useCallback2(() => {
330
+ console.log("[useAudioRecorder] stop() called");
306
331
  if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") {
332
+ console.log(`[useAudioRecorder] Stopping MediaRecorder. State was: ${mediaRecorderRef.current.state}`);
307
333
  mediaRecorderRef.current.stop();
334
+ } else {
335
+ console.log("[useAudioRecorder] stop() ignored. Recorder is inactive or missing.");
308
336
  }
309
337
  }, []);
310
338
  return {
@@ -370,6 +398,8 @@ var ChatInputArea = forwardRef(({
370
398
  }, [inputMode]);
371
399
  const isControlled = value !== void 0;
372
400
  const message = isControlled ? value : internalMessage;
401
+ const messageRef = useRef3(message);
402
+ messageRef.current = message;
373
403
  const { voice: globalVoice } = useChatConfig();
374
404
  const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
375
405
  const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
@@ -408,23 +438,28 @@ var ChatInputArea = forwardRef(({
408
438
  }
409
439
  }
410
440
  };
411
- const nativeSpeech = useSpeechRecognition((text) => {
412
- triggerChange(message + (message ? " " : "") + text);
413
- }, () => {
441
+ const handleVoiceResult = useCallback3((text) => {
442
+ console.log("[ChatInputArea] nativeSpeech result:", text);
443
+ triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
444
+ }, []);
445
+ const handleVoiceEnd = useCallback3(() => {
414
446
  var _a2;
447
+ console.log("[ChatInputArea] nativeSpeech onEnd triggered");
415
448
  setVoiceTrigger(null);
416
449
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
417
- }, voiceConfig == null ? void 0 : voiceConfig.language);
450
+ }, [voiceConfig]);
451
+ const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
418
452
  const customRecorder = useAudioRecorder(async (blob) => {
419
453
  var _a2;
454
+ console.log("[ChatInputArea] customRecorder onStop triggered");
420
455
  setVoiceTrigger(null);
421
456
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
422
457
  if (voiceConfig == null ? void 0 : voiceConfig.onAudioCapture) {
423
458
  try {
424
459
  const text = await voiceConfig.onAudioCapture(blob);
425
- if (text) triggerChange(message + (message ? " " : "") + text);
460
+ if (text) triggerChange(messageRef.current + (messageRef.current ? " " : "") + text);
426
461
  } catch (e) {
427
- console.error("Audio capture failed", e);
462
+ console.error("[ChatInputArea] Audio capture failed", e);
428
463
  }
429
464
  }
430
465
  });
@@ -466,6 +501,8 @@ var ChatInputArea = forwardRef(({
466
501
  };
467
502
  const startRecording = async (trigger) => {
468
503
  var _a2;
504
+ console.log(`[ChatInputArea] startRecording triggered by: ${trigger}, current voiceTrigger: ${voiceTrigger}`);
505
+ console.log("[ChatInputArea] voiceConfig:", voiceConfig);
469
506
  if (voiceTrigger) return;
470
507
  setVoiceTrigger(trigger);
471
508
  (_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
@@ -475,12 +512,15 @@ var ChatInputArea = forwardRef(({
475
512
  setVoiceTrigger(null);
476
513
  return;
477
514
  }
515
+ console.log("[ChatInputArea] Starting nativeSpeech");
478
516
  nativeSpeech.start();
479
517
  } else {
518
+ console.log("[ChatInputArea] Starting customRecorder");
480
519
  await customRecorder.start();
481
520
  }
482
521
  };
483
522
  const stopRecording = () => {
523
+ console.log(`[ChatInputArea] stopRecording called. Current voiceTrigger: ${voiceTrigger}`);
484
524
  if (!voiceTrigger) return;
485
525
  if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") {
486
526
  nativeSpeech.stop();