@memori.ai/memori-react 8.6.1 → 8.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/components/CompletionProviderStatus/CompletionProviderStatus.js +0 -22
  3. package/dist/components/CompletionProviderStatus/CompletionProviderStatus.js.map +1 -1
  4. package/dist/components/MemoriArtifactSystem/components/ArtifactHandler/ArtifactHandler.js +13 -1
  5. package/dist/components/MemoriArtifactSystem/components/ArtifactHandler/ArtifactHandler.js.map +1 -1
  6. package/dist/components/MemoriWidget/MemoriWidget.d.ts +7 -1
  7. package/dist/components/MemoriWidget/MemoriWidget.js +7 -115
  8. package/dist/components/MemoriWidget/MemoriWidget.js.map +1 -1
  9. package/dist/helpers/message.d.ts +1 -1
  10. package/dist/helpers/message.js +2 -2
  11. package/dist/helpers/message.js.map +1 -1
  12. package/dist/helpers/stt/useSTT.js +171 -73
  13. package/dist/helpers/stt/useSTT.js.map +1 -1
  14. package/dist/helpers/translations.js +10 -3
  15. package/dist/helpers/translations.js.map +1 -1
  16. package/dist/helpers/tts/useTTS.js +16 -1
  17. package/dist/helpers/tts/useTTS.js.map +1 -1
  18. package/dist/index.js +13 -2
  19. package/dist/index.js.map +1 -1
  20. package/esm/components/CompletionProviderStatus/CompletionProviderStatus.js +0 -22
  21. package/esm/components/CompletionProviderStatus/CompletionProviderStatus.js.map +1 -1
  22. package/esm/components/MemoriArtifactSystem/components/ArtifactHandler/ArtifactHandler.js +14 -2
  23. package/esm/components/MemoriArtifactSystem/components/ArtifactHandler/ArtifactHandler.js.map +1 -1
  24. package/esm/components/MemoriWidget/MemoriWidget.d.ts +7 -1
  25. package/esm/components/MemoriWidget/MemoriWidget.js +7 -115
  26. package/esm/components/MemoriWidget/MemoriWidget.js.map +1 -1
  27. package/esm/helpers/message.d.ts +1 -1
  28. package/esm/helpers/message.js +2 -2
  29. package/esm/helpers/message.js.map +1 -1
  30. package/esm/helpers/stt/useSTT.js +171 -73
  31. package/esm/helpers/stt/useSTT.js.map +1 -1
  32. package/esm/helpers/translations.js +10 -3
  33. package/esm/helpers/translations.js.map +1 -1
  34. package/esm/helpers/tts/useTTS.js +16 -1
  35. package/esm/helpers/tts/useTTS.js.map +1 -1
  36. package/esm/index.js +13 -2
  37. package/esm/index.js.map +1 -1
  38. package/package.json +1 -1
  39. package/src/components/CompletionProviderStatus/CompletionProviderStatus.tsx +1 -33
  40. package/src/components/MemoriArtifactSystem/components/ArtifactHandler/ArtifactHandler.tsx +23 -1
  41. package/src/components/MemoriWidget/MemoriWidget.tsx +14 -126
  42. package/src/helpers/message.ts +2 -2
  43. package/src/helpers/stt/useSTT.ts +225 -87
  44. package/src/helpers/translations.ts +12 -1
  45. package/src/helpers/tts/useTTS.ts +28 -1
  46. package/src/index.tsx +26 -8
  47. package/dist/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.css +0 -319
  48. package/dist/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.d.ts +0 -4
  49. package/dist/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.js +0 -50
  50. package/dist/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.js.map +0 -1
  51. package/dist/components/MemoriArtifactSystem/context/ArtifactSystemContext.d.ts +0 -12
  52. package/dist/components/MemoriArtifactSystem/context/ArtifactSystemContext.js +0 -22
  53. package/dist/components/MemoriArtifactSystem/context/ArtifactSystemContext.js.map +0 -1
  54. package/dist/components/MemoriArtifactSystem/hooks/useArtifactSystem.d.ts +0 -12
  55. package/dist/components/MemoriArtifactSystem/hooks/useArtifactSystem.js +0 -288
  56. package/dist/components/MemoriArtifactSystem/hooks/useArtifactSystem.js.map +0 -1
  57. package/dist/components/MemoriArtifactSystem/index.d.ts +0 -9
  58. package/dist/components/MemoriArtifactSystem/index.js +0 -28
  59. package/dist/components/MemoriArtifactSystem/index.js.map +0 -1
  60. package/esm/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.css +0 -319
  61. package/esm/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.d.ts +0 -4
  62. package/esm/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.js +0 -47
  63. package/esm/components/MemoriArtifactSystem/components/ArtifactHistory/ArtifactHistory.js.map +0 -1
  64. package/esm/components/MemoriArtifactSystem/context/ArtifactSystemContext.d.ts +0 -12
  65. package/esm/components/MemoriArtifactSystem/context/ArtifactSystemContext.js +0 -17
  66. package/esm/components/MemoriArtifactSystem/context/ArtifactSystemContext.js.map +0 -1
  67. package/esm/components/MemoriArtifactSystem/hooks/useArtifactSystem.d.ts +0 -12
  68. package/esm/components/MemoriArtifactSystem/hooks/useArtifactSystem.js +0 -281
  69. package/esm/components/MemoriArtifactSystem/hooks/useArtifactSystem.js.map +0 -1
  70. package/esm/components/MemoriArtifactSystem/index.d.ts +0 -9
  71. package/esm/components/MemoriArtifactSystem/index.js +0 -9
  72. package/esm/components/MemoriArtifactSystem/index.js.map +0 -1
@@ -49,28 +49,50 @@ export interface UseSTTOptions {
49
49
  */
50
50
  export type RecordingState = 'idle' | 'recording' | 'processing' | 'error';
51
51
 
52
- /**
53
- * Convert audio blob to WAV format
54
- */
52
+ /**
53
+ * Convert audio blob to WAV format
54
+ */
55
55
  async function convertToWav(audioBlob: Blob): Promise<Blob> {
56
56
  return new Promise((resolve, reject) => {
57
- const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
57
+ // Safari compatibility: check for AudioContext support
58
+ const AudioContextClass = window.AudioContext || (window as any).webkitAudioContext;
59
+ if (!AudioContextClass) {
60
+ reject(new Error('AudioContext not supported in this browser'));
61
+ return;
62
+ }
63
+
64
+ const audioContext = new AudioContextClass();
58
65
  const fileReader = new FileReader();
59
66
 
60
67
  fileReader.onload = async () => {
61
68
  try {
62
69
  const arrayBuffer = fileReader.result as ArrayBuffer;
70
+
71
+ // Resume context if suspended (required for Safari)
72
+ if (audioContext.state === 'suspended') {
73
+ await audioContext.resume();
74
+ }
75
+
63
76
  const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
64
77
 
65
78
  // Convert to WAV format
66
79
  const wavBlob = audioBufferToWav(audioBuffer);
80
+
81
+ // Close the audio context to free resources
82
+ await audioContext.close();
83
+
67
84
  resolve(wavBlob);
68
85
  } catch (error) {
86
+ console.error('Error converting audio to WAV:', error);
69
87
  reject(error);
70
88
  }
71
89
  };
72
90
 
73
- fileReader.onerror = () => reject(new Error('Failed to read audio file'));
91
+ fileReader.onerror = () => {
92
+ console.error('Failed to read audio file');
93
+ reject(new Error('Failed to read audio file'));
94
+ };
95
+
74
96
  fileReader.readAsArrayBuffer(audioBlob);
75
97
  });
76
98
  }
@@ -150,11 +172,15 @@ export function useSTT(
150
172
  const audioContextRef = useRef<AudioContext | null>(null);
151
173
  const analyserRef = useRef<AnalyserNode | null>(null);
152
174
  const dataArrayRef = useRef<Uint8Array | null>(null);
175
+ const backgroundNoiseRef = useRef<number>(0);
176
+ const audioActivityHistoryRef = useRef<number[]>([]);
177
+ const lastStopTimeRef = useRef<number>(0); // Track when recording was last stopped
153
178
  const apiUrl = options.apiUrl || '/api/stt';
154
- const silenceTimeout = options.silenceTimeout || 2; // Default 2 seconds
179
+ const silenceTimeout = options.silenceTimeout || 3; // Increased default to 3 seconds
155
180
 
156
181
  const initializeRecording = useCallback(async (): Promise<boolean> => {
157
182
  try {
183
+
158
184
  if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
159
185
  throw new Error('Media recording is not supported in this browser');
160
186
  }
@@ -170,49 +196,73 @@ export function useSTT(
170
196
 
171
197
  audioStreamRef.current = stream;
172
198
 
173
- // Initialize audio context for silence detection if continuous recording is enabled
199
+ // Initialize audio context for silence detection only if continuous recording is enabled
174
200
  if (options.continuousRecording) {
175
201
  try {
176
- audioContextRef.current = new (window.AudioContext || (window as any).webkitAudioContext)();
177
- analyserRef.current = audioContextRef.current.createAnalyser();
178
- analyserRef.current.fftSize = 256;
179
- const bufferLength = analyserRef.current.frequencyBinCount;
180
- dataArrayRef.current = new Uint8Array(bufferLength);
181
-
182
- const source = audioContextRef.current.createMediaStreamSource(stream);
183
- source.connect(analyserRef.current);
202
+ // Safari compatibility: check for AudioContext support
203
+ const AudioContextClass = window.AudioContext || (window as any).webkitAudioContext;
204
+ if (AudioContextClass) {
205
+ audioContextRef.current = new AudioContextClass({
206
+ sampleRate: 16000, // Match the audio input sample rate
207
+ latencyHint: 'interactive' // Better for real-time analysis
208
+ });
209
+
210
+
211
+ // Resume context if suspended (required for Safari)
212
+ if (audioContextRef.current.state === 'suspended') {
213
+ await audioContextRef.current.resume();
214
+ }
215
+
216
+ // Wait a bit for Safari to stabilize the AudioContext
217
+ await new Promise(resolve => setTimeout(resolve, 100));
218
+
219
+ analyserRef.current = audioContextRef.current.createAnalyser();
220
+ analyserRef.current.fftSize = 512; // Increased for better frequency resolution
221
+ analyserRef.current.smoothingTimeConstant = 0.3; // Reduced for more responsive detection
222
+ analyserRef.current.minDecibels = -90;
223
+ analyserRef.current.maxDecibels = -10;
224
+
225
+ const bufferLength = analyserRef.current.frequencyBinCount;
226
+ dataArrayRef.current = new Uint8Array(bufferLength);
227
+
228
+ const source = audioContextRef.current.createMediaStreamSource(stream);
229
+ source.connect(analyserRef.current);
230
+
231
+ // Initialize audio analysis state
232
+ backgroundNoiseRef.current = 0;
233
+ audioActivityHistoryRef.current = [];
234
+
235
+ } else {
236
+ console.warn('🎤 [INIT] AudioContext not supported in this browser');
237
+ }
184
238
  } catch (err) {
185
239
  // Silence detection initialization failed but we can continue
240
+ console.error('🎤 [INIT] Silence detection initialization failed:', err);
186
241
  }
242
+ } else {
243
+ console.log('🎤 [INIT] Continuous recording disabled, skipping silence detection setup');
187
244
  }
188
245
 
189
- // Format selection based on provider
246
+ // Format selection based on provider with Safari compatibility
190
247
  let mimeType = '';
191
248
 
192
- if (config.provider === 'azure') {
193
- // For Azure, we'll record in a supported format and convert to WAV
194
- if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
195
- mimeType = 'audio/webm;codecs=opus';
196
- } else if (MediaRecorder.isTypeSupported('audio/webm')) {
197
- mimeType = 'audio/webm';
198
- } else if (MediaRecorder.isTypeSupported('audio/mp4')) {
199
- mimeType = 'audio/mp4';
200
- } else if (MediaRecorder.isTypeSupported('audio/ogg;codecs=opus')) {
201
- mimeType = 'audio/ogg;codecs=opus';
202
- }
203
- } else {
204
- // For OpenAI, use the best available format
205
- if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
206
- mimeType = 'audio/webm;codecs=opus';
207
- } else if (MediaRecorder.isTypeSupported('audio/webm')) {
208
- mimeType = 'audio/webm';
209
- } else if (MediaRecorder.isTypeSupported('audio/mp4')) {
210
- mimeType = 'audio/mp4';
211
- } else if (MediaRecorder.isTypeSupported('audio/ogg;codecs=opus')) {
212
- mimeType = 'audio/ogg;codecs=opus';
249
+ // Safari compatibility: prefer formats that work well on Safari
250
+ const supportedFormats = [
251
+ 'audio/mp4', // Best Safari support
252
+ 'audio/webm;codecs=opus',
253
+ 'audio/webm',
254
+ 'audio/ogg;codecs=opus',
255
+ 'audio/wav' // Fallback
256
+ ];
257
+
258
+ for (const format of supportedFormats) {
259
+ if (MediaRecorder.isTypeSupported(format)) {
260
+ mimeType = format;
261
+ break;
213
262
  }
214
263
  }
215
264
 
265
+
216
266
  const mediaRecorder = new MediaRecorder(
217
267
  stream,
218
268
  mimeType ? { mimeType } : {}
@@ -225,11 +275,10 @@ export function useSTT(
225
275
  };
226
276
 
227
277
  mediaRecorder.onstop = async () => {
228
- if (!isRecordingRef.current || !isMountedRef.current) {
229
- return;
230
- }
231
278
 
279
+ // Immediately set processing state to prevent ghost messages
232
280
  setRecordingState('processing');
281
+ setIsListening(false);
233
282
 
234
283
  try {
235
284
  if (chunksRef.current.length === 0) {
@@ -244,28 +293,31 @@ export function useSTT(
244
293
  throw new Error('Recorded audio is empty');
245
294
  }
246
295
 
296
+
247
297
  // Convert to WAV if using Azure
248
298
  if (config.provider === 'azure') {
249
299
  try {
250
- console.log('Converting audio to WAV format for Azure');
251
300
  audioBlob = await convertToWav(audioBlob);
252
- console.log('Audio converted to WAV successfully');
253
301
  } catch (conversionError) {
254
- console.error('Failed to convert audio to WAV:', conversionError);
255
302
  throw new Error('Failed to convert audio to WAV format for Azure');
256
303
  }
257
304
  }
258
305
 
259
306
  const result = await transcribeAudio(audioBlob);
260
307
 
261
- if (processSpeechAndSendMessage) {
262
- processSpeechAndSendMessage(result.text);
263
- }
308
+ // Only process if we have meaningful text
309
+ if (result.text && result.text.trim().length > 0) {
310
+ if (processSpeechAndSendMessage) {
311
+ processSpeechAndSendMessage(result.text);
312
+ }
264
313
 
265
- setLastTranscription(result);
314
+ setLastTranscription(result);
266
315
 
267
- if (options.onTranscriptionComplete) {
268
- options.onTranscriptionComplete(result);
316
+ if (options.onTranscriptionComplete) {
317
+ options.onTranscriptionComplete(result);
318
+ }
319
+ } else {
320
+ console.log('No meaningful text transcribed, skipping message processing');
269
321
  }
270
322
 
271
323
  setRecordingState('idle');
@@ -312,30 +364,49 @@ export function useSTT(
312
364
 
313
365
  /**
314
366
  * Detect if there's audio activity (not silence)
367
+ * Only works when continuous recording is enabled
315
368
  */
316
369
  const detectAudioActivity = useCallback((): boolean => {
370
+ if (!options.continuousRecording) {
371
+ return false;
372
+ }
373
+
317
374
  if (!analyserRef.current || !dataArrayRef.current) {
318
375
  return false;
319
376
  }
320
377
 
321
- analyserRef.current.getByteFrequencyData(dataArrayRef.current);
322
-
323
- // Calculate average volume level
324
- const average = dataArrayRef.current.reduce((sum, value) => sum + value, 0) / dataArrayRef.current.length;
325
-
326
- // Consider audio active if average volume is above threshold
327
- const threshold = 10; // Adjust this value as needed
328
- return average > threshold;
329
- }, []);
378
+ try {
379
+ analyserRef.current.getByteFrequencyData(dataArrayRef.current);
380
+
381
+ // Calculate simple average volume
382
+ let sum = 0;
383
+ for (let i = 0; i < dataArrayRef.current.length; i++) {
384
+ sum += dataArrayRef.current[i];
385
+ }
386
+ const averageVolume = sum / dataArrayRef.current.length;
387
+
388
+ // Simple threshold - if volume is above 20, consider it activity
389
+ const hasActivity = averageVolume > 20;
390
+
391
+
392
+ return hasActivity;
393
+ } catch (error) {
394
+ return false;
395
+ }
396
+ }, [options.continuousRecording]);
330
397
 
331
398
  /**
332
399
  * Start silence detection monitoring
400
+ * Only works when continuous recording is enabled
333
401
  */
334
402
  const startSilenceDetection = useCallback(() => {
335
403
  if (!options.continuousRecording || !analyserRef.current) {
336
404
  return;
337
405
  }
338
406
 
407
+ let silenceCount = 0;
408
+ const maxSilenceCount = 30; // 3 seconds of silence (30 * 100ms)
409
+
339
410
  const checkAudioActivity = () => {
340
411
  if (!isRecordingRef.current || !isMountedRef.current) {
341
412
  return;
@@ -344,36 +415,33 @@ export function useSTT(
344
415
  const hasActivity = detectAudioActivity();
345
416
 
346
417
  if (hasActivity) {
347
- // Reset silence timeout when audio activity is detected
348
- if (silenceTimeoutRef.current) {
349
- clearTimeout(silenceTimeoutRef.current);
350
- }
418
+ // Reset silence counter when activity is detected
419
+ silenceCount = 0;
420
+ } else {
421
+ // Increment silence counter
422
+ silenceCount++;
351
423
 
352
- // Set new timeout for when user stops speaking
353
- silenceTimeoutRef.current = setTimeout(() => {
354
- if (isRecordingRef.current && isMountedRef.current) {
355
- stopRecording();
424
+ // Stop recording after 3 seconds of silence
425
+ if (silenceCount >= maxSilenceCount) {
426
+ isRecordingRef.current = false;
427
+ if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
428
+ mediaRecorderRef.current.stop();
356
429
  }
357
- }, silenceTimeout * 1000);
430
+ return;
431
+ }
358
432
  }
359
433
  };
360
434
 
361
- // Check audio activity every 50ms for responsive detection
362
- const intervalId = setInterval(checkAudioActivity, 50);
363
-
364
- // Store interval ID for cleanup
435
+ // Check audio activity every 100ms
436
+ const intervalId = setInterval(checkAudioActivity, 100);
365
437
  (window as any).memoriSilenceDetectionInterval = intervalId;
366
- }, [options.continuousRecording, detectAudioActivity, silenceTimeout]);
438
+ }, [options.continuousRecording, detectAudioActivity]);
367
439
 
368
440
  /**
369
441
  * Stop silence detection monitoring
442
+ * Only works when continuous recording is enabled
370
443
  */
371
444
  const stopSilenceDetection = useCallback(() => {
372
- if (silenceTimeoutRef.current) {
373
- clearTimeout(silenceTimeoutRef.current);
374
- silenceTimeoutRef.current = null;
375
- }
376
-
377
445
  if ((window as any).memoriSilenceDetectionInterval) {
378
446
  clearInterval((window as any).memoriSilenceDetectionInterval);
379
447
  (window as any).memoriSilenceDetectionInterval = null;
@@ -442,49 +510,81 @@ export function useSTT(
442
510
  * Start recording audio
443
511
  */
444
512
  const startRecording = useCallback(async (): Promise<void> => {
513
+ console.log('🎤 [START] Starting recording...');
514
+ console.log('🎤 [START] Mounted:', isMountedRef.current, 'Muted:', microphoneMuted, 'State:', recordingState);
515
+
516
+ // Prevent immediate restart after stopping (cooldown period)
517
+ const timeSinceLastStop = Date.now() - lastStopTimeRef.current;
518
+ if (timeSinceLastStop < 1000) { // 1 second cooldown
519
+ console.log('🎤 [START] Too soon after last stop, waiting...', timeSinceLastStop + 'ms');
520
+ return;
521
+ }
522
+
445
523
  if (
446
- !isMountedRef.current ||
447
524
  microphoneMuted ||
448
525
  recordingState === 'recording'
449
526
  ) {
527
+ console.log('🎤 [START] Cannot start recording - conditions not met');
450
528
  return;
451
529
  }
452
530
 
453
531
  if (!hasUserActivatedRecord) {
532
+ console.log('🎤 [START] Setting user activated record flag');
454
533
  setHasUserActivatedRecord(true);
455
534
  }
456
535
 
457
536
  try {
458
537
  setError(null);
459
538
  setRecordingState('recording');
539
+ console.log('🎤 [START] Recording state set to recording');
460
540
 
461
541
  // Initialize recording if needed
462
542
  if (!mediaRecorderRef.current) {
543
+ console.log('🎤 [START] MediaRecorder not initialized, initializing...');
463
544
  const initialized = await initializeRecording();
464
545
  if (!initialized) {
546
+ console.error('🎤 [START] Failed to initialize recording');
465
547
  return;
466
548
  }
549
+ console.log('🎤 [START] Recording initialized successfully');
467
550
  }
468
551
 
469
552
  // Reset chunks and start recording
470
553
  chunksRef.current = [];
471
554
  isRecordingRef.current = true;
555
+ console.log('🎤 [START] Reset chunks and set recording flag');
472
556
 
473
557
  if (
474
558
  mediaRecorderRef.current &&
475
559
  mediaRecorderRef.current.state === 'inactive'
476
560
  ) {
477
- mediaRecorderRef.current.start(100); // Collect data every 100ms
561
+ // Use different timeslice based on recording mode and browser
562
+ // For Safari, use longer timeslice to avoid issues with short recordings
563
+ // For other browsers, use shorter timeslice for real-time analysis
564
+ const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent);
565
+ const timeslice = isSafari ? 500 : 100; // 500ms for Safari, 100ms for others
566
+
567
+ console.log(`🎤 [START] Starting MediaRecorder with ${timeslice}ms timeslice (Safari: ${isSafari})`);
568
+ console.log('🎤 [START] Continuous recording enabled:', options.continuousRecording);
569
+
570
+ mediaRecorderRef.current.start(timeslice);
478
571
  setIsListening(true);
572
+ console.log('🎤 [START] MediaRecorder started, listening state set to true');
479
573
 
480
574
  // Start silence detection if continuous recording is enabled
481
575
  if (options.continuousRecording) {
576
+ console.log('🎤 [START] Starting silence detection for continuous recording');
482
577
  startSilenceDetection();
578
+ } else {
579
+ console.log('🎤 [START] Continuous recording disabled, skipping silence detection');
483
580
  }
581
+ } else {
582
+ console.log('🎤 [START] MediaRecorder not available or not inactive, state:', mediaRecorderRef.current?.state);
484
583
  }
485
584
  } catch (err) {
486
585
  const errorMsg =
487
586
  err instanceof Error ? err : new Error('Failed to start recording');
587
+ console.error('🎤 [START] Error starting recording:', errorMsg);
488
588
  setError(errorMsg);
489
589
  setRecordingState('error');
490
590
  isRecordingRef.current = false;
@@ -506,25 +606,44 @@ export function useSTT(
506
606
  * Stop recording audio
507
607
  */
508
608
  const stopRecording = useCallback((): void => {
609
+ console.log('🛑 [STOP] Stop recording called');
610
+ console.log('🛑 [STOP] isRecordingRef:', isRecordingRef.current, 'continuousRecording:', options.continuousRecording);
611
+
509
612
  if (!isRecordingRef.current) {
613
+ console.log('🛑 [STOP] Not currently recording, ignoring stop request');
510
614
  return;
511
615
  }
512
616
 
513
617
  try {
618
+ console.log('🛑 [STOP] Setting listening to false');
514
619
  setIsListening(false);
515
620
 
516
- // Stop silence detection
517
- stopSilenceDetection();
621
+ // Record the stop time for cooldown
622
+ lastStopTimeRef.current = Date.now();
623
+
624
+ // Stop silence detection only if continuous recording was enabled
625
+ if (options.continuousRecording) {
626
+ console.log('🛑 [STOP] Stopping silence detection for continuous recording');
627
+ stopSilenceDetection();
628
+ } else {
629
+ console.log('🛑 [STOP] Continuous recording disabled, skipping silence detection stop');
630
+ }
518
631
 
519
632
  if (
520
633
  mediaRecorderRef.current &&
521
634
  mediaRecorderRef.current.state === 'recording'
522
635
  ) {
636
+ console.log('🛑 [STOP] Stopping MediaRecorder');
523
637
  mediaRecorderRef.current.stop();
638
+ } else {
639
+ console.log('🛑 [STOP] MediaRecorder not available or not recording, state:', mediaRecorderRef.current?.state);
524
640
  }
641
+
642
+ console.log('🛑 [STOP] Recording stop completed');
525
643
  } catch (err) {
526
644
  const errorMsg =
527
645
  err instanceof Error ? err : new Error('Failed to stop recording');
646
+ console.error('🛑 [STOP] Error stopping recording:', errorMsg);
528
647
  setError(errorMsg);
529
648
  setRecordingState('error');
530
649
  isRecordingRef.current = false;
@@ -539,10 +658,16 @@ export function useSTT(
539
658
  * Toggle recording state
540
659
  */
541
660
  const toggleRecording = useCallback(async (): Promise<void> => {
661
+ console.log('🔄 [TOGGLE] Toggle recording called, current state:', recordingState);
662
+
542
663
  if (recordingState === 'recording') {
664
+ console.log('🔄 [TOGGLE] Currently recording, stopping...');
543
665
  stopRecording();
544
666
  } else if (recordingState === 'idle') {
667
+ console.log('🔄 [TOGGLE] Currently idle, starting recording...');
545
668
  await startRecording();
669
+ } else {
670
+ console.log('🔄 [TOGGLE] Cannot toggle from state:', recordingState);
546
671
  }
547
672
  }, [recordingState, startRecording, stopRecording]);
548
673
 
@@ -579,19 +704,32 @@ export function useSTT(
579
704
  audioStreamRef.current = null;
580
705
  }
581
706
 
582
- // Clean up audio context
583
- if (audioContextRef.current) {
584
- audioContextRef.current.close();
707
+ // Clean up audio context only if continuous recording was enabled
708
+ if (options.continuousRecording && audioContextRef.current) {
709
+ try {
710
+ // Check if AudioContext is still valid before closing
711
+ if (audioContextRef.current.state !== 'closed') {
712
+ audioContextRef.current.close();
713
+ }
714
+ } catch (error) {
715
+ console.warn('Error closing AudioContext:', error);
716
+ }
585
717
  audioContextRef.current = null;
586
718
  }
587
719
 
588
- // Stop silence detection
589
- stopSilenceDetection();
720
+ // Stop silence detection only if continuous recording was enabled
721
+ if (options.continuousRecording) {
722
+ stopSilenceDetection();
723
+ }
724
+
725
+ // Reset audio analysis state
726
+ analyserRef.current = null;
727
+ dataArrayRef.current = null;
590
728
 
591
729
  chunksRef.current = [];
592
730
  setIsListening(false);
593
731
  setRecordingState('idle');
594
- }, [stopSilenceDetection]);
732
+ }, [options.continuousRecording, stopSilenceDetection]);
595
733
 
596
734
  /**
597
735
  * Cleanup on unmount
@@ -18,13 +18,18 @@ const stripOutputTags = (text: string) => {
18
18
  return text.replaceAll(/<output.*?>(.*?)<\/output>/g, '');
19
19
  };
20
20
 
21
+ const stripThinkTags = (text: string) => {
22
+ return text.replaceAll(/<think.*?>(.*?)<\/think>/gs, '');
23
+ };
24
+
21
25
  export const getTranslation = async (
22
26
  text: string,
23
27
  to: string,
24
28
  from?: string,
25
29
  baseUrl?: string
26
30
  ): Promise<DeeplTranslation> => {
27
- let textToTranslate = stripOutputTags(text);
31
+ let textToTranslate = stripOutputTags(stripThinkTags(text));
32
+ const justTheThinkTags = text.match(/<think.*?>(.*?)<\/think>/gs);
28
33
 
29
34
  const isReservedKeyword = dialogKeywords.indexOf(text.toLowerCase()) > -1;
30
35
  const fromLanguage = isReservedKeyword ? 'IT' : from?.toUpperCase();
@@ -47,5 +52,11 @@ export const getTranslation = async (
47
52
  }
48
53
  );
49
54
  const deeplResponse = await deeplResult.json();
55
+
56
+ //reapply the think tags to the translated text
57
+ if(deeplResponse && deeplResponse.translations && deeplResponse.translations[0]) {
58
+ deeplResponse.translations[0].text = (justTheThinkTags ? `<think>${justTheThinkTags}</think>` : '') + deeplResponse?.translations?.[0]?.text;
59
+ }
60
+
50
61
  return deeplResponse?.translations?.[0] ?? { text: textToTranslate };
51
62
  };
@@ -199,6 +199,10 @@ export function useTTS(
199
199
 
200
200
  // Only reset speaking flag after cleanup
201
201
  isSpeakingRef.current = false;
202
+
203
+ // Dispatch custom event to notify MemoriWidget that audio has ended
204
+ const e = new CustomEvent('MemoriAudioEnded');
205
+ document.dispatchEvent(e);
202
206
  }, [cleanup]);
203
207
 
204
208
  /**
@@ -223,7 +227,14 @@ export function useTTS(
223
227
  }
224
228
 
225
229
  // Early exit conditions before setting speaking flag
226
- if (!text || options.preview || speakerMuted) {
230
+ if (!text || !text.trim() || options.preview) {
231
+ emitEndSpeakEvent();
232
+ return;
233
+ }
234
+
235
+ // If speaker is muted, completely disable TTS functionality
236
+ if (speakerMuted) {
237
+ console.log('[useTTS] TTS disabled - speaker is muted');
227
238
  emitEndSpeakEvent();
228
239
  return;
229
240
  }
@@ -320,6 +331,10 @@ export function useTTS(
320
331
  cleanup();
321
332
  isSpeakingRef.current = false;
322
333
  emitEndSpeakEvent();
334
+
335
+ // Dispatch custom event to notify MemoriWidget that audio has ended
336
+ const event = new CustomEvent('MemoriAudioEnded');
337
+ document.dispatchEvent(event);
323
338
  }
324
339
  };
325
340
 
@@ -327,6 +342,10 @@ export function useTTS(
327
342
  setIsPlaying(false);
328
343
  isSpeakingRef.current = false;
329
344
  emitEndSpeakEvent();
345
+
346
+ // Dispatch custom event to notify MemoriWidget that audio has ended
347
+ const e = new CustomEvent('MemoriAudioEnded');
348
+ document.dispatchEvent(e);
330
349
  };
331
350
 
332
351
  audioRef.current.onerror = (e) => {
@@ -338,6 +357,10 @@ export function useTTS(
338
357
  const errorMsg = new Error(`Audio playback failed. This may be due to a network issue or audio format problem.`);
339
358
  setError(errorMsg);
340
359
  emitEndSpeakEvent();
360
+
361
+ // Dispatch custom event to notify MemoriWidget that audio has ended
362
+ const event = new CustomEvent('MemoriAudioEnded');
363
+ document.dispatchEvent(event);
341
364
  };
342
365
 
343
366
  audioRef.current.load();
@@ -357,6 +380,10 @@ export function useTTS(
357
380
  setError(errorMsg);
358
381
 
359
382
  emitEndSpeakEvent();
383
+
384
+ // Dispatch custom event to notify MemoriWidget that audio has ended
385
+ const e = new CustomEvent('MemoriAudioEnded');
386
+ document.dispatchEvent(e);
360
387
  }
361
388
  },
362
389
  [
package/src/index.tsx CHANGED
@@ -180,7 +180,7 @@ const Memori: React.FC<Props> = ({
180
180
  if (data.provider) {
181
181
  setProvider(data.provider);
182
182
  } else {
183
- console.warn('Provider not found in speech key response');
183
+ console.warn('Provider not found in speech key response');
184
184
  }
185
185
  } catch (error) {
186
186
  console.error('Error fetching speech key:', error);
@@ -273,13 +273,31 @@ const Memori: React.FC<Props> = ({
273
273
  if (whiteListedDomains) {
274
274
  // check if we are client side
275
275
  if (typeof window !== 'undefined') {
276
- // check if the current domain is in the whiteListedDomains with Regex
277
- if (
278
- !whiteListedDomains.some((domain: string) =>
279
- new RegExp(domain).test(window.location.hostname)
280
- )
281
- ) {
282
- return null;
276
+ const referrer = document.referrer;
277
+ const currentHostname = window.location.hostname;
278
+ const referrerHostname = referrer ? new URL(referrer).hostname : null;
279
+
280
+ // Check if this is a preview context
281
+ const isInIframe = window.parent !== window;
282
+ const hasReferrerFromParent = referrer && referrer.length > 0;
283
+ const isSrcDocIframe = !currentHostname || currentHostname === '';
284
+
285
+ // Preview detection: iframe with referrer from parent (especially srcDoc)
286
+ const isPreview = isInIframe && hasReferrerFromParent && (
287
+ referrerHostname === currentHostname || // Normal same-origin
288
+ isSrcDocIframe || // srcDoc iframe case
289
+ window.location.search.includes('_preview=') // Preview token
290
+ );
291
+ // Skip whitelist check for preview
292
+ if (!isPreview) {
293
+ // check if the current domain is in the whiteListedDomains with Regex
294
+ if (
295
+ !whiteListedDomains.some((domain: string) =>
296
+ new RegExp(domain).test(window.location.hostname)
297
+ )
298
+ ) {
299
+ return null;
300
+ }
283
301
  }
284
302
  }
285
303
  }