@memori.ai/memori-react 8.7.9 → 8.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +29 -0
  2. package/dist/components/ChatBubble/ChatBubble.js +5 -1
  3. package/dist/components/ChatBubble/ChatBubble.js.map +1 -1
  4. package/dist/components/Header/Header.js +28 -14
  5. package/dist/components/Header/Header.js.map +1 -1
  6. package/dist/components/MemoriWidget/MemoriWidget.js +2 -9
  7. package/dist/components/MemoriWidget/MemoriWidget.js.map +1 -1
  8. package/dist/components/MicrophoneButton/MicrophoneButton.css +36 -3
  9. package/dist/components/MicrophoneButton/MicrophoneButton.js +25 -6
  10. package/dist/components/MicrophoneButton/MicrophoneButton.js.map +1 -1
  11. package/dist/components/SettingsDrawer/SettingsDrawer.d.ts +1 -1
  12. package/dist/components/SettingsDrawer/SettingsDrawer.js +2 -4
  13. package/dist/components/SettingsDrawer/SettingsDrawer.js.map +1 -1
  14. package/dist/helpers/stt/useSTT.d.ts +0 -6
  15. package/dist/helpers/stt/useSTT.js +7 -137
  16. package/dist/helpers/stt/useSTT.js.map +1 -1
  17. package/dist/helpers/tts/useTTS.d.ts +1 -3
  18. package/dist/helpers/tts/useTTS.js +69 -17
  19. package/dist/helpers/tts/useTTS.js.map +1 -1
  20. package/esm/components/ChatBubble/ChatBubble.js +5 -1
  21. package/esm/components/ChatBubble/ChatBubble.js.map +1 -1
  22. package/esm/components/Header/Header.js +28 -14
  23. package/esm/components/Header/Header.js.map +1 -1
  24. package/esm/components/MemoriWidget/MemoriWidget.js +2 -9
  25. package/esm/components/MemoriWidget/MemoriWidget.js.map +1 -1
  26. package/esm/components/MicrophoneButton/MicrophoneButton.css +36 -3
  27. package/esm/components/MicrophoneButton/MicrophoneButton.js +25 -6
  28. package/esm/components/MicrophoneButton/MicrophoneButton.js.map +1 -1
  29. package/esm/components/SettingsDrawer/SettingsDrawer.d.ts +1 -1
  30. package/esm/components/SettingsDrawer/SettingsDrawer.js +2 -4
  31. package/esm/components/SettingsDrawer/SettingsDrawer.js.map +1 -1
  32. package/esm/helpers/stt/useSTT.d.ts +0 -6
  33. package/esm/helpers/stt/useSTT.js +7 -137
  34. package/esm/helpers/stt/useSTT.js.map +1 -1
  35. package/esm/helpers/tts/useTTS.d.ts +1 -3
  36. package/esm/helpers/tts/useTTS.js +69 -17
  37. package/esm/helpers/tts/useTTS.js.map +1 -1
  38. package/package.json +1 -1
  39. package/src/components/ChatBubble/ChatBubble.tsx +8 -1
  40. package/src/components/ChatInputs/__snapshots__/ChatInputs.test.tsx.snap +100 -90
  41. package/src/components/Header/Header.tsx +43 -30
  42. package/src/components/MemoriWidget/MemoriWidget.tsx +3 -19
  43. package/src/components/MicrophoneButton/MicrophoneButton.css +36 -3
  44. package/src/components/MicrophoneButton/MicrophoneButton.tsx +44 -18
  45. package/src/components/SettingsDrawer/SettingsDrawer.tsx +4 -11
  46. package/src/helpers/stt/useSTT.ts +43 -253
  47. package/src/helpers/tts/useTTS.ts +281 -201
  48. package/src/index.stories.tsx +15 -14
  49. package/dist/components/AccountForm/AccountForm.d.ts +0 -11
  50. package/dist/components/AccountForm/AccountForm.js +0 -121
  51. package/dist/components/AccountForm/AccountForm.js.map +0 -1
  52. package/esm/components/AccountForm/AccountForm.d.ts +0 -11
  53. package/esm/components/AccountForm/AccountForm.js +0 -118
  54. package/esm/components/AccountForm/AccountForm.js.map +0 -1
@@ -1,11 +1,7 @@
1
- // hooks/useSTT.ts - Modified version for Azure WAV support
1
+ // hooks/useSTT.ts - Simplified version for Speech-to-Text functionality
2
2
  // Audio format compatibility:
3
3
  // - MediaRecorder supports: webm, mp4, ogg formats
4
- // - Azure STT supported formats:
5
- // * WAV format (required for Azure Speech SDK)
6
- // * webm-16khz-16bit-mono-opus (for REST API)
7
- // * webm-24khz-16bit-24kbps-mono-opus
8
- // * webm-24khz-16bit-mono-opus
4
+ // - Azure STT supported formats: WAV format (required for Azure Speech SDK)
9
5
  // - OpenAI: Supports multiple formats including webm, mp4, ogg
10
6
  import { useState, useCallback, useRef, useEffect } from 'react';
11
7
  import { getLocalConfig } from '../configuration';
@@ -38,10 +34,6 @@ export interface UseSTTOptions {
38
34
  apiUrl?: string;
39
35
  onTranscriptionComplete?: (result: STTResult) => void;
40
36
  onError?: (error: Error) => void;
41
- continuousRecording?: boolean;
42
- autoStart?: boolean;
43
- processSpeechAndSendMessage?: (text: string) => void;
44
- silenceTimeout?: number; // Timeout in milliseconds for silence detection
45
37
  }
46
38
 
47
39
  /**
@@ -49,13 +41,13 @@ export interface UseSTTOptions {
49
41
  */
50
42
  export type RecordingState = 'idle' | 'recording' | 'processing' | 'error';
51
43
 
52
- /**
53
- * Convert audio blob to WAV format
54
- */
44
+ /**
45
+ * Convert audio blob to WAV format
46
+ */
55
47
  async function convertToWav(audioBlob: Blob): Promise<Blob> {
56
48
  return new Promise((resolve, reject) => {
57
- // Safari compatibility: check for AudioContext support
58
- const AudioContextClass = window.AudioContext || (window as any).webkitAudioContext;
49
+ const AudioContextClass =
50
+ window.AudioContext || (window as any).webkitAudioContext;
59
51
  if (!AudioContextClass) {
60
52
  reject(new Error('AudioContext not supported in this browser'));
61
53
  return;
@@ -63,34 +55,31 @@ async function convertToWav(audioBlob: Blob): Promise<Blob> {
63
55
 
64
56
  const audioContext = new AudioContextClass();
65
57
  const fileReader = new FileReader();
66
-
58
+
67
59
  fileReader.onload = async () => {
68
60
  try {
69
61
  const arrayBuffer = fileReader.result as ArrayBuffer;
70
-
71
- // Resume context if suspended (required for Safari)
62
+
72
63
  if (audioContext.state === 'suspended') {
73
64
  await audioContext.resume();
74
65
  }
75
-
66
+
76
67
  const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
77
-
78
- // Convert to WAV format
68
+
79
69
  const wavBlob = audioBufferToWav(audioBuffer);
80
-
81
- // Close the audio context to free resources
70
+
82
71
  await audioContext.close();
83
-
72
+
84
73
  resolve(wavBlob);
85
74
  } catch (error) {
86
75
  reject(error);
87
76
  }
88
77
  };
89
-
78
+
90
79
  fileReader.onerror = () => {
91
80
  reject(new Error('Failed to read audio file'));
92
81
  };
93
-
82
+
94
83
  fileReader.readAsArrayBuffer(audioBlob);
95
84
  });
96
85
  }
@@ -104,14 +93,14 @@ function audioBufferToWav(buffer: AudioBuffer): Blob {
104
93
  const numberOfChannels = buffer.numberOfChannels;
105
94
  const arrayBuffer = new ArrayBuffer(44 + length * numberOfChannels * 2);
106
95
  const view = new DataView(arrayBuffer);
107
-
96
+
108
97
  // WAV header
109
98
  const writeString = (offset: number, string: string) => {
110
99
  for (let i = 0; i < string.length; i++) {
111
100
  view.setUint8(offset + i, string.charCodeAt(i));
112
101
  }
113
102
  };
114
-
103
+
115
104
  writeString(0, 'RIFF');
116
105
  view.setUint32(4, 36 + length * numberOfChannels * 2, true);
117
106
  writeString(8, 'WAVE');
@@ -125,22 +114,29 @@ function audioBufferToWav(buffer: AudioBuffer): Blob {
125
114
  view.setUint16(34, 16, true);
126
115
  writeString(36, 'data');
127
116
  view.setUint32(40, length * numberOfChannels * 2, true);
128
-
117
+
129
118
  // Convert audio data
130
119
  let offset = 44;
131
120
  for (let i = 0; i < length; i++) {
132
121
  for (let channel = 0; channel < numberOfChannels; channel++) {
133
- const sample = Math.max(-1, Math.min(1, buffer.getChannelData(channel)[i]));
134
- view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
122
+ const sample = Math.max(
123
+ -1,
124
+ Math.min(1, buffer.getChannelData(channel)[i])
125
+ );
126
+ view.setInt16(
127
+ offset,
128
+ sample < 0 ? sample * 0x8000 : sample * 0x7fff,
129
+ true
130
+ );
135
131
  offset += 2;
136
132
  }
137
133
  }
138
-
134
+
139
135
  return new Blob([arrayBuffer], { type: 'audio/wav' });
140
136
  }
141
137
 
142
138
  /**
143
- * Unified hook for handling Speech-to-Text functionality
139
+ * Simplified hook for handling Speech-to-Text functionality
144
140
  */
145
141
  export function useSTT(
146
142
  config: STTConfig,
@@ -154,7 +150,6 @@ export function useSTT(
154
150
  getLocalConfig('muteMicrophone', !defaultEnableAudio)
155
151
  );
156
152
  const [hasUserActivatedRecord, setHasUserActivatedRecord] = useState(false);
157
- const [error, setError] = useState<Error | null>(null);
158
153
  const [lastTranscription, setLastTranscription] = useState<STTResult | null>(
159
154
  null
160
155
  );
@@ -166,19 +161,10 @@ export function useSTT(
166
161
  const chunksRef = useRef<Blob[]>([]);
167
162
  const isRecordingRef = useRef<boolean>(false);
168
163
  const isMountedRef = useRef<boolean>(true);
169
- const silenceTimeoutRef = useRef<NodeJS.Timeout | null>(null);
170
- const audioContextRef = useRef<AudioContext | null>(null);
171
- const analyserRef = useRef<AnalyserNode | null>(null);
172
- const dataArrayRef = useRef<Uint8Array | null>(null);
173
- const backgroundNoiseRef = useRef<number>(0);
174
- const audioActivityHistoryRef = useRef<number[]>([]);
175
- const lastStopTimeRef = useRef<number>(0); // Track when recording was last stopped
176
164
  const apiUrl = options.apiUrl || '/api/stt';
177
- const silenceTimeout = options.silenceTimeout || 3; // Increased default to 3 seconds
178
165
 
179
166
  const initializeRecording = useCallback(async (): Promise<boolean> => {
180
167
  try {
181
-
182
168
  if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
183
169
  throw new Error('Media recording is not supported in this browser');
184
170
  }
@@ -188,64 +174,20 @@ export function useSTT(
188
174
  echoCancellation: true,
189
175
  noiseSuppression: true,
190
176
  autoGainControl: true,
191
- sampleRate: 16000, // Optimal for speech recognition
177
+ sampleRate: 16000,
192
178
  },
193
179
  });
194
180
 
195
181
  audioStreamRef.current = stream;
196
182
 
197
- // Initialize audio context for silence detection only if continuous recording is enabled
198
- if (options.continuousRecording) {
199
- try {
200
- // Safari compatibility: check for AudioContext support
201
- const AudioContextClass = window.AudioContext || (window as any).webkitAudioContext;
202
- if (AudioContextClass) {
203
- audioContextRef.current = new AudioContextClass({
204
- sampleRate: 16000, // Match the audio input sample rate
205
- latencyHint: 'interactive' // Better for real-time analysis
206
- });
207
-
208
-
209
- // Resume context if suspended (required for Safari)
210
- if (audioContextRef.current.state === 'suspended') {
211
- await audioContextRef.current.resume();
212
- }
213
-
214
- // Wait a bit for Safari to stabilize the AudioContext
215
- await new Promise(resolve => setTimeout(resolve, 100));
216
-
217
- analyserRef.current = audioContextRef.current.createAnalyser();
218
- analyserRef.current.fftSize = 512; // Increased for better frequency resolution
219
- analyserRef.current.smoothingTimeConstant = 0.3; // Reduced for more responsive detection
220
- analyserRef.current.minDecibels = -90;
221
- analyserRef.current.maxDecibels = -10;
222
-
223
- const bufferLength = analyserRef.current.frequencyBinCount;
224
- dataArrayRef.current = new Uint8Array(bufferLength);
225
-
226
- const source = audioContextRef.current.createMediaStreamSource(stream);
227
- source.connect(analyserRef.current);
228
-
229
- // Initialize audio analysis state
230
- backgroundNoiseRef.current = 0;
231
- audioActivityHistoryRef.current = [];
232
-
233
- }
234
- } catch (err) {
235
- // Silence detection initialization failed but we can continue
236
- }
237
- }
238
-
239
- // Format selection based on provider with Safari compatibility
240
183
  let mimeType = '';
241
184
 
242
- // Safari compatibility: prefer formats that work well on Safari
243
185
  const supportedFormats = [
244
- 'audio/mp4', // Best Safari support
186
+ 'audio/mp4',
245
187
  'audio/webm;codecs=opus',
246
188
  'audio/webm',
247
189
  'audio/ogg;codecs=opus',
248
- 'audio/wav' // Fallback
190
+ 'audio/wav',
249
191
  ];
250
192
 
251
193
  for (const format of supportedFormats) {
@@ -255,7 +197,6 @@ export function useSTT(
255
197
  }
256
198
  }
257
199
 
258
-
259
200
  const mediaRecorder = new MediaRecorder(
260
201
  stream,
261
202
  mimeType ? { mimeType } : {}
@@ -268,8 +209,6 @@ export function useSTT(
268
209
  };
269
210
 
270
211
  mediaRecorder.onstop = async () => {
271
-
272
- // Immediately set processing state to prevent ghost messages
273
212
  setRecordingState('processing');
274
213
  setIsListening(false);
275
214
 
@@ -286,19 +225,18 @@ export function useSTT(
286
225
  throw new Error('Recorded audio is empty');
287
226
  }
288
227
 
289
-
290
- // Convert to WAV if using Azure
291
228
  if (config.provider === 'azure') {
292
229
  try {
293
230
  audioBlob = await convertToWav(audioBlob);
294
231
  } catch (conversionError) {
295
- throw new Error('Failed to convert audio to WAV format for Azure');
232
+ throw new Error(
233
+ 'Failed to convert audio to WAV format for Azure'
234
+ );
296
235
  }
297
236
  }
298
237
 
299
238
  const result = await transcribeAudio(audioBlob);
300
239
 
301
- // Only process if we have meaningful text
302
240
  if (result.text && result.text.trim().length > 0) {
303
241
  if (processSpeechAndSendMessage) {
304
242
  processSpeechAndSendMessage(result.text);
@@ -314,7 +252,6 @@ export function useSTT(
314
252
  setRecordingState('idle');
315
253
  } catch (err) {
316
254
  const errorMsg = err instanceof Error ? err : new Error(String(err));
317
- setError(errorMsg);
318
255
  setRecordingState('error');
319
256
 
320
257
  if (options.onError) {
@@ -328,7 +265,6 @@ export function useSTT(
328
265
 
329
266
  mediaRecorder.onerror = () => {
330
267
  const errorMsg = new Error('Recording failed');
331
- setError(errorMsg);
332
268
  setRecordingState('error');
333
269
  isRecordingRef.current = false;
334
270
 
@@ -342,7 +278,6 @@ export function useSTT(
342
278
  } catch (err) {
343
279
  const errorMsg =
344
280
  err instanceof Error ? err : new Error('Failed to access microphone');
345
- setError(errorMsg);
346
281
  setRecordingState('error');
347
282
 
348
283
  if (options.onError) {
@@ -351,93 +286,8 @@ export function useSTT(
351
286
 
352
287
  return false;
353
288
  }
354
- }, [config.provider, options, silenceTimeout]);
289
+ }, [config.provider, options]);
355
290
 
356
- /**
357
- * Detect if there's audio activity (not silence)
358
- * Only works when continuous recording is enabled
359
- */
360
- const detectAudioActivity = useCallback((): boolean => {
361
- if (!options.continuousRecording) {
362
- return false;
363
- }
364
-
365
- if (!analyserRef.current || !dataArrayRef.current) {
366
- return false;
367
- }
368
-
369
- try {
370
- analyserRef.current.getByteFrequencyData(dataArrayRef.current);
371
-
372
- // Calculate simple average volume
373
- let sum = 0;
374
- for (let i = 0; i < dataArrayRef.current.length; i++) {
375
- sum += dataArrayRef.current[i];
376
- }
377
- const averageVolume = sum / dataArrayRef.current.length;
378
-
379
- // Simple threshold - if volume is above 20, consider it activity
380
- const hasActivity = averageVolume > 20;
381
-
382
-
383
- return hasActivity;
384
- } catch (error) {
385
- return false;
386
- }
387
- }, [options.continuousRecording]);
388
-
389
- /**
390
- * Start silence detection monitoring
391
- * Only works when continuous recording is enabled
392
- */
393
- const startSilenceDetection = useCallback(() => {
394
- if (!options.continuousRecording || !analyserRef.current) {
395
- return;
396
- }
397
-
398
- let silenceCount = 0;
399
- const maxSilenceCount = 30; // 3 seconds of silence (30 * 100ms)
400
-
401
- const checkAudioActivity = () => {
402
- if (!isRecordingRef.current || !isMountedRef.current) {
403
- return;
404
- }
405
-
406
- const hasActivity = detectAudioActivity();
407
-
408
- if (hasActivity) {
409
- // Reset silence counter when activity is detected
410
- silenceCount = 0;
411
- } else {
412
- // Increment silence counter
413
- silenceCount++;
414
-
415
- // Stop recording after 3 seconds of silence
416
- if (silenceCount >= maxSilenceCount) {
417
- isRecordingRef.current = false;
418
- if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
419
- mediaRecorderRef.current.stop();
420
- }
421
- return;
422
- }
423
- }
424
- };
425
-
426
- // Check audio activity every 100ms
427
- const intervalId = setInterval(checkAudioActivity, 100);
428
- (window as any).memoriSilenceDetectionInterval = intervalId;
429
- }, [options.continuousRecording, detectAudioActivity]);
430
-
431
- /**
432
- * Stop silence detection monitoring
433
- * Only works when continuous recording is enabled
434
- */
435
- const stopSilenceDetection = useCallback(() => {
436
- if ((window as any).memoriSilenceDetectionInterval) {
437
- clearInterval((window as any).memoriSilenceDetectionInterval);
438
- (window as any).memoriSilenceDetectionInterval = null;
439
- }
440
- }, []);
441
291
 
442
292
  /**
443
293
  * Transcribe audio blob using the API
@@ -445,11 +295,10 @@ export function useSTT(
445
295
  const transcribeAudio = useCallback(
446
296
  async (audioBlob: Blob): Promise<STTResult> => {
447
297
  const formData = new FormData();
448
- let fileExtension = 'webm'; // default fallback
298
+ let fileExtension = 'webm';
449
299
 
450
- // Determine file extension based on provider and blob type
451
300
  if (config.provider === 'azure') {
452
- fileExtension = 'wav'; // We convert to WAV for Azure
301
+ fileExtension = 'wav';
453
302
  } else if (mediaRecorderRef.current?.mimeType) {
454
303
  if (mediaRecorderRef.current.mimeType.includes('webm')) {
455
304
  fileExtension = 'webm';
@@ -501,17 +350,7 @@ export function useSTT(
501
350
  * Start recording audio
502
351
  */
503
352
  const startRecording = useCallback(async (): Promise<void> => {
504
-
505
- // Prevent immediate restart after stopping (cooldown period)
506
- const timeSinceLastStop = Date.now() - lastStopTimeRef.current;
507
- if (timeSinceLastStop < 1000) { // 1 second cooldown
508
- return;
509
- }
510
-
511
- if (
512
- microphoneMuted ||
513
- recordingState === 'recording'
514
- ) {
353
+ if (microphoneMuted || recordingState === 'recording') {
515
354
  return;
516
355
  }
517
356
 
@@ -520,10 +359,8 @@ export function useSTT(
520
359
  }
521
360
 
522
361
  try {
523
- setError(null);
524
362
  setRecordingState('recording');
525
363
 
526
- // Initialize recording if needed
527
364
  if (!mediaRecorderRef.current) {
528
365
  const initialized = await initializeRecording();
529
366
  if (!initialized) {
@@ -531,7 +368,6 @@ export function useSTT(
531
368
  }
532
369
  }
533
370
 
534
- // Reset chunks and start recording
535
371
  chunksRef.current = [];
536
372
  isRecordingRef.current = true;
537
373
 
@@ -539,24 +375,12 @@ export function useSTT(
539
375
  mediaRecorderRef.current &&
540
376
  mediaRecorderRef.current.state === 'inactive'
541
377
  ) {
542
- // Use different timeslice based on recording mode and browser
543
- // For Safari, use longer timeslice to avoid issues with short recordings
544
- // For other browsers, use shorter timeslice for real-time analysis
545
- const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent);
546
- const timeslice = isSafari ? 500 : 100; // 500ms for Safari, 100ms for others
547
-
548
- mediaRecorderRef.current.start(timeslice);
378
+ mediaRecorderRef.current.start();
549
379
  setIsListening(true);
550
-
551
- // Start silence detection if continuous recording is enabled
552
- if (options.continuousRecording) {
553
- startSilenceDetection();
554
- }
555
380
  }
556
381
  } catch (err) {
557
382
  const errorMsg =
558
383
  err instanceof Error ? err : new Error('Failed to start recording');
559
- setError(errorMsg);
560
384
  setRecordingState('error');
561
385
  isRecordingRef.current = false;
562
386
 
@@ -570,7 +394,6 @@ export function useSTT(
570
394
  hasUserActivatedRecord,
571
395
  initializeRecording,
572
396
  options,
573
- startSilenceDetection,
574
397
  ]);
575
398
 
576
399
  /**
@@ -583,14 +406,6 @@ export function useSTT(
583
406
 
584
407
  try {
585
408
  setIsListening(false);
586
-
587
- // Record the stop time for cooldown
588
- lastStopTimeRef.current = Date.now();
589
-
590
- // Stop silence detection only if continuous recording was enabled
591
- if (options.continuousRecording) {
592
- stopSilenceDetection();
593
- }
594
409
 
595
410
  if (
596
411
  mediaRecorderRef.current &&
@@ -601,7 +416,6 @@ export function useSTT(
601
416
  } catch (err) {
602
417
  const errorMsg =
603
418
  err instanceof Error ? err : new Error('Failed to stop recording');
604
- setError(errorMsg);
605
419
  setRecordingState('error');
606
420
  isRecordingRef.current = false;
607
421
 
@@ -609,7 +423,7 @@ export function useSTT(
609
423
  options.onError(errorMsg);
610
424
  }
611
425
  }
612
- }, [recordingState, options, stopSilenceDetection]);
426
+ }, [recordingState, options]);
613
427
 
614
428
  /**
615
429
  * Toggle recording state
@@ -655,32 +469,10 @@ export function useSTT(
655
469
  audioStreamRef.current = null;
656
470
  }
657
471
 
658
- // Clean up audio context only if continuous recording was enabled
659
- if (options.continuousRecording && audioContextRef.current) {
660
- try {
661
- // Check if AudioContext is still valid before closing
662
- if (audioContextRef.current.state !== 'closed') {
663
- audioContextRef.current.close();
664
- }
665
- } catch (error) {
666
- // Ignore AudioContext close errors
667
- }
668
- audioContextRef.current = null;
669
- }
670
-
671
- // Stop silence detection only if continuous recording was enabled
672
- if (options.continuousRecording) {
673
- stopSilenceDetection();
674
- }
675
-
676
- // Reset audio analysis state
677
- analyserRef.current = null;
678
- dataArrayRef.current = null;
679
-
680
472
  chunksRef.current = [];
681
473
  setIsListening(false);
682
474
  setRecordingState('idle');
683
- }, [options.continuousRecording, stopSilenceDetection]);
475
+ }, []);
684
476
 
685
477
  /**
686
478
  * Cleanup on unmount
@@ -706,7 +498,6 @@ export function useSTT(
706
498
  recordingState,
707
499
  microphoneMuted,
708
500
  hasUserActivatedRecord,
709
- error,
710
501
  lastTranscription,
711
502
  isListening,
712
503
 
@@ -717,9 +508,8 @@ export function useSTT(
717
508
  toggleMute,
718
509
  transcribeAudio,
719
510
  setHasUserActivatedRecord,
720
- setError,
721
511
 
722
512
  // Utils
723
513
  cleanup,
724
514
  };
725
- }
515
+ }