whisper.rn 0.5.0-rc.1 → 0.5.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/README.md +128 -50
  2. package/android/src/main/CMakeLists.txt +1 -0
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +35 -0
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +33 -0
  5. package/android/src/main/jni.cpp +81 -0
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +5 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +5 -0
  8. package/cpp/jsi/RNWhisperJSI.cpp +42 -6
  9. package/ios/RNWhisper.mm +11 -0
  10. package/ios/RNWhisperContext.h +1 -0
  11. package/ios/RNWhisperContext.mm +46 -0
  12. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Info.plist +0 -0
  13. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  14. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +1 -1
  15. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  16. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Info.plist +0 -0
  17. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  18. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +1 -1
  19. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  20. package/lib/commonjs/AudioSessionIos.js +2 -1
  21. package/lib/commonjs/AudioSessionIos.js.map +1 -1
  22. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  23. package/lib/commonjs/index.js +50 -10
  24. package/lib/commonjs/index.js.map +1 -1
  25. package/lib/commonjs/jest-mock.js +126 -0
  26. package/lib/commonjs/jest-mock.js.map +1 -0
  27. package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js +857 -0
  28. package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js.map +1 -0
  29. package/lib/commonjs/realtime-transcription/SliceManager.js +233 -0
  30. package/lib/commonjs/realtime-transcription/SliceManager.js.map +1 -0
  31. package/lib/commonjs/realtime-transcription/adapters/AudioPcmStreamAdapter.js +133 -0
  32. package/lib/commonjs/realtime-transcription/adapters/AudioPcmStreamAdapter.js.map +1 -0
  33. package/lib/commonjs/realtime-transcription/adapters/JestAudioStreamAdapter.js +201 -0
  34. package/lib/commonjs/realtime-transcription/adapters/JestAudioStreamAdapter.js.map +1 -0
  35. package/lib/commonjs/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js +309 -0
  36. package/lib/commonjs/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js.map +1 -0
  37. package/lib/commonjs/realtime-transcription/index.js +27 -0
  38. package/lib/commonjs/realtime-transcription/index.js.map +1 -0
  39. package/lib/commonjs/realtime-transcription/types.js +114 -0
  40. package/lib/commonjs/realtime-transcription/types.js.map +1 -0
  41. package/lib/commonjs/utils/WavFileReader.js +158 -0
  42. package/lib/commonjs/utils/WavFileReader.js.map +1 -0
  43. package/lib/commonjs/utils/WavFileWriter.js +181 -0
  44. package/lib/commonjs/utils/WavFileWriter.js.map +1 -0
  45. package/lib/commonjs/utils/common.js +25 -0
  46. package/lib/commonjs/utils/common.js.map +1 -0
  47. package/lib/module/AudioSessionIos.js +2 -1
  48. package/lib/module/AudioSessionIos.js.map +1 -1
  49. package/lib/module/NativeRNWhisper.js.map +1 -1
  50. package/lib/module/index.js +48 -10
  51. package/lib/module/index.js.map +1 -1
  52. package/lib/module/jest-mock.js +124 -0
  53. package/lib/module/jest-mock.js.map +1 -0
  54. package/lib/module/realtime-transcription/RealtimeTranscriber.js +851 -0
  55. package/lib/module/realtime-transcription/RealtimeTranscriber.js.map +1 -0
  56. package/lib/module/realtime-transcription/SliceManager.js +226 -0
  57. package/lib/module/realtime-transcription/SliceManager.js.map +1 -0
  58. package/lib/module/realtime-transcription/adapters/AudioPcmStreamAdapter.js +124 -0
  59. package/lib/module/realtime-transcription/adapters/AudioPcmStreamAdapter.js.map +1 -0
  60. package/lib/module/realtime-transcription/adapters/JestAudioStreamAdapter.js +194 -0
  61. package/lib/module/realtime-transcription/adapters/JestAudioStreamAdapter.js.map +1 -0
  62. package/lib/module/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js +302 -0
  63. package/lib/module/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js.map +1 -0
  64. package/lib/module/realtime-transcription/index.js +8 -0
  65. package/lib/module/realtime-transcription/index.js.map +1 -0
  66. package/lib/module/realtime-transcription/types.js +107 -0
  67. package/lib/module/realtime-transcription/types.js.map +1 -0
  68. package/lib/module/utils/WavFileReader.js +151 -0
  69. package/lib/module/utils/WavFileReader.js.map +1 -0
  70. package/lib/module/utils/WavFileWriter.js +174 -0
  71. package/lib/module/utils/WavFileWriter.js.map +1 -0
  72. package/lib/module/utils/common.js +18 -0
  73. package/lib/module/utils/common.js.map +1 -0
  74. package/lib/typescript/AudioSessionIos.d.ts +1 -1
  75. package/lib/typescript/AudioSessionIos.d.ts.map +1 -1
  76. package/lib/typescript/NativeRNWhisper.d.ts +1 -0
  77. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  78. package/lib/typescript/index.d.ts +8 -4
  79. package/lib/typescript/index.d.ts.map +1 -1
  80. package/lib/typescript/jest-mock.d.ts +2 -0
  81. package/lib/typescript/jest-mock.d.ts.map +1 -0
  82. package/lib/typescript/realtime-transcription/RealtimeTranscriber.d.ts +166 -0
  83. package/lib/typescript/realtime-transcription/RealtimeTranscriber.d.ts.map +1 -0
  84. package/lib/typescript/realtime-transcription/SliceManager.d.ts +72 -0
  85. package/lib/typescript/realtime-transcription/SliceManager.d.ts.map +1 -0
  86. package/lib/typescript/realtime-transcription/adapters/AudioPcmStreamAdapter.d.ts +22 -0
  87. package/lib/typescript/realtime-transcription/adapters/AudioPcmStreamAdapter.d.ts.map +1 -0
  88. package/lib/typescript/realtime-transcription/adapters/JestAudioStreamAdapter.d.ts +44 -0
  89. package/lib/typescript/realtime-transcription/adapters/JestAudioStreamAdapter.d.ts.map +1 -0
  90. package/lib/typescript/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.d.ts +75 -0
  91. package/lib/typescript/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.d.ts.map +1 -0
  92. package/lib/typescript/realtime-transcription/index.d.ts +6 -0
  93. package/lib/typescript/realtime-transcription/index.d.ts.map +1 -0
  94. package/lib/typescript/realtime-transcription/types.d.ts +222 -0
  95. package/lib/typescript/realtime-transcription/types.d.ts.map +1 -0
  96. package/lib/typescript/utils/WavFileReader.d.ts +61 -0
  97. package/lib/typescript/utils/WavFileReader.d.ts.map +1 -0
  98. package/lib/typescript/utils/WavFileWriter.d.ts +57 -0
  99. package/lib/typescript/utils/WavFileWriter.d.ts.map +1 -0
  100. package/lib/typescript/utils/common.d.ts +9 -0
  101. package/lib/typescript/utils/common.d.ts.map +1 -0
  102. package/package.json +18 -6
  103. package/src/AudioSessionIos.ts +3 -2
  104. package/src/NativeRNWhisper.ts +2 -0
  105. package/src/index.ts +74 -22
  106. package/{jest/mock.js → src/jest-mock.ts} +2 -2
  107. package/src/realtime-transcription/RealtimeTranscriber.ts +1015 -0
  108. package/src/realtime-transcription/SliceManager.ts +252 -0
  109. package/src/realtime-transcription/adapters/AudioPcmStreamAdapter.ts +143 -0
  110. package/src/realtime-transcription/adapters/JestAudioStreamAdapter.ts +251 -0
  111. package/src/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.ts +378 -0
  112. package/src/realtime-transcription/index.ts +34 -0
  113. package/src/realtime-transcription/types.ts +283 -0
  114. package/src/utils/WavFileReader.ts +202 -0
  115. package/src/utils/WavFileWriter.ts +206 -0
  116. package/src/utils/common.ts +17 -0
@@ -0,0 +1,851 @@
1
+ /* eslint-disable class-methods-use-this */
2
+
3
+ import { SliceManager } from './SliceManager';
4
+ import { WavFileWriter } from '../utils/WavFileWriter';
5
+ import { VAD_PRESETS } from './types';
6
+
7
+ /**
8
+ * RealtimeTranscriber provides real-time audio transcription with VAD support.
9
+ *
10
+ * Features:
11
+ * - Automatic slice management based on duration
12
+ * - VAD-based speech detection and auto-slicing
13
+ * - Configurable auto-slice mechanism that triggers on speech_end/silence events
14
+ * - Memory management for audio slices
15
+ * - Queue-based transcription processing
16
+ */
17
+ export class RealtimeTranscriber {
18
+ callbacks = {};
19
+ isActive = false;
20
+ isTranscribing = false;
21
+ vadEnabled = false;
22
+ transcriptionQueue = [];
23
+ accumulatedData = new Uint8Array(0);
24
+ wavFileWriter = null;
25
+
26
+ // Simplified VAD state management
27
+ lastSpeechDetectedTime = 0;
28
+
29
+ // Track VAD state for proper event transitions
30
+ lastVadState = 'silence';
31
+
32
+ // Track last stats to emit only when changed
33
+ lastStatsSnapshot = null;
34
+
35
+ // Store transcription results by slice index
36
+ transcriptionResults = new Map();
37
+
38
+ // Store VAD events by slice index for inclusion in transcribe events
39
+ vadEvents = new Map();
40
+ constructor(dependencies) {
41
+ let options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
42
+ let callbacks = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
43
+ this.whisperContext = dependencies.whisperContext;
44
+ this.vadContext = dependencies.vadContext;
45
+ this.audioStream = dependencies.audioStream;
46
+ this.fs = dependencies.fs;
47
+ this.callbacks = callbacks;
48
+
49
+ // Set default options with proper types
50
+ this.options = {
51
+ audioSliceSec: options.audioSliceSec || 30,
52
+ audioMinSec: options.audioMinSec || 1,
53
+ maxSlicesInMemory: options.maxSlicesInMemory || 3,
54
+ vadOptions: options.vadOptions || VAD_PRESETS.default,
55
+ vadPreset: options.vadPreset,
56
+ autoSliceOnSpeechEnd: options.autoSliceOnSpeechEnd || true,
57
+ autoSliceThreshold: options.autoSliceThreshold || 0.5,
58
+ transcribeOptions: options.transcribeOptions || {},
59
+ initialPrompt: options.initialPrompt,
60
+ promptPreviousSlices: options.promptPreviousSlices ?? true,
61
+ audioOutputPath: options.audioOutputPath,
62
+ logger: options.logger || (() => {})
63
+ };
64
+
65
+ // Apply VAD preset if specified
66
+ if (this.options.vadPreset && VAD_PRESETS[this.options.vadPreset]) {
67
+ this.options.vadOptions = {
68
+ ...VAD_PRESETS[this.options.vadPreset],
69
+ ...this.options.vadOptions
70
+ };
71
+ }
72
+
73
+ // Enable VAD if context is provided and not explicitly disabled
74
+ this.vadEnabled = !!this.vadContext;
75
+
76
+ // Initialize managers
77
+ this.sliceManager = new SliceManager(this.options.audioSliceSec, this.options.maxSlicesInMemory);
78
+
79
+ // Set up audio stream callbacks
80
+ this.audioStream.onData(this.handleAudioData.bind(this));
81
+ this.audioStream.onError(this.handleError.bind(this));
82
+ this.audioStream.onStatusChange(this.handleAudioStatusChange.bind(this));
83
+ }
84
+
85
+ /**
86
+ * Start realtime transcription
87
+ */
88
+ async start() {
89
+ if (this.isActive) {
90
+ throw new Error('Realtime transcription is already active');
91
+ }
92
+ try {
93
+ var _this$callbacks$onSta, _this$callbacks, _this$options$audioSt4, _this$options$audioSt5, _this$options$audioSt6, _this$options$audioSt7, _this$options$audioSt8;
94
+ this.isActive = true;
95
+ (_this$callbacks$onSta = (_this$callbacks = this.callbacks).onStatusChange) === null || _this$callbacks$onSta === void 0 ? void 0 : _this$callbacks$onSta.call(_this$callbacks, true);
96
+
97
+ // Reset all state to ensure clean start
98
+ this.reset();
99
+
100
+ // Initialize WAV file writer if output path is specified
101
+ if (this.fs && this.options.audioOutputPath) {
102
+ var _this$options$audioSt, _this$options$audioSt2, _this$options$audioSt3;
103
+ this.wavFileWriter = new WavFileWriter(this.fs, this.options.audioOutputPath, {
104
+ sampleRate: ((_this$options$audioSt = this.options.audioStreamConfig) === null || _this$options$audioSt === void 0 ? void 0 : _this$options$audioSt.sampleRate) || 16000,
105
+ channels: ((_this$options$audioSt2 = this.options.audioStreamConfig) === null || _this$options$audioSt2 === void 0 ? void 0 : _this$options$audioSt2.channels) || 1,
106
+ bitsPerSample: ((_this$options$audioSt3 = this.options.audioStreamConfig) === null || _this$options$audioSt3 === void 0 ? void 0 : _this$options$audioSt3.bitsPerSample) || 16
107
+ });
108
+ await this.wavFileWriter.initialize();
109
+ }
110
+
111
+ // Start audio recording
112
+ await this.audioStream.initialize({
113
+ sampleRate: ((_this$options$audioSt4 = this.options.audioStreamConfig) === null || _this$options$audioSt4 === void 0 ? void 0 : _this$options$audioSt4.sampleRate) || 16000,
114
+ channels: ((_this$options$audioSt5 = this.options.audioStreamConfig) === null || _this$options$audioSt5 === void 0 ? void 0 : _this$options$audioSt5.channels) || 1,
115
+ bitsPerSample: ((_this$options$audioSt6 = this.options.audioStreamConfig) === null || _this$options$audioSt6 === void 0 ? void 0 : _this$options$audioSt6.bitsPerSample) || 16,
116
+ audioSource: ((_this$options$audioSt7 = this.options.audioStreamConfig) === null || _this$options$audioSt7 === void 0 ? void 0 : _this$options$audioSt7.audioSource) || 6,
117
+ bufferSize: ((_this$options$audioSt8 = this.options.audioStreamConfig) === null || _this$options$audioSt8 === void 0 ? void 0 : _this$options$audioSt8.bufferSize) || 16 * 1024
118
+ });
119
+ await this.audioStream.start();
120
+
121
+ // Emit stats update for status change
122
+ this.emitStatsUpdate('status_change');
123
+ this.log('Realtime transcription started');
124
+ } catch (error) {
125
+ var _this$callbacks$onSta2, _this$callbacks2;
126
+ this.isActive = false;
127
+ (_this$callbacks$onSta2 = (_this$callbacks2 = this.callbacks).onStatusChange) === null || _this$callbacks$onSta2 === void 0 ? void 0 : _this$callbacks$onSta2.call(_this$callbacks2, false);
128
+ throw error;
129
+ }
130
+ }
131
+
132
+ /**
133
+ * Stop realtime transcription
134
+ */
135
+ async stop() {
136
+ if (!this.isActive) {
137
+ return;
138
+ }
139
+ try {
140
+ var _this$callbacks$onSta3, _this$callbacks3;
141
+ this.isActive = false;
142
+
143
+ // Stop audio recording
144
+ await this.audioStream.stop();
145
+
146
+ // Process any remaining accumulated data
147
+ if (this.accumulatedData.length > 0) {
148
+ this.processAccumulatedDataForSliceManagement();
149
+ }
150
+
151
+ // Process any remaining queued transcriptions
152
+ await this.processTranscriptionQueue();
153
+
154
+ // Finalize WAV file
155
+ if (this.wavFileWriter) {
156
+ await this.wavFileWriter.finalize();
157
+ this.wavFileWriter = null;
158
+ }
159
+
160
+ // Reset all state completely
161
+ this.reset();
162
+ (_this$callbacks$onSta3 = (_this$callbacks3 = this.callbacks).onStatusChange) === null || _this$callbacks$onSta3 === void 0 ? void 0 : _this$callbacks$onSta3.call(_this$callbacks3, false);
163
+
164
+ // Emit stats update for status change
165
+ this.emitStatsUpdate('status_change');
166
+ this.log('Realtime transcription stopped');
167
+ } catch (error) {
168
+ this.handleError(`Stop error: ${error}`);
169
+ }
170
+ }
171
+
172
+ /**
173
+ * Handle incoming audio data from audio stream
174
+ */
175
+ handleAudioData(streamData) {
176
+ if (!this.isActive) {
177
+ return;
178
+ }
179
+ try {
180
+ // Write to WAV file if enabled (convert to Uint8Array for WavFileWriter)
181
+ if (this.wavFileWriter) {
182
+ this.wavFileWriter.appendAudioData(streamData.data).catch(error => {
183
+ this.log(`Failed to write audio to WAV file: ${error}`);
184
+ });
185
+ }
186
+
187
+ // Always accumulate data for slice management
188
+ this.accumulateAudioData(streamData.data);
189
+ } catch (error) {
190
+ const errorMessage = error instanceof Error ? error.message : 'Audio processing error';
191
+ this.handleError(errorMessage);
192
+ }
193
+ }
194
+
195
+ /**
196
+ * Accumulate audio data for slice management
197
+ */
198
+ accumulateAudioData(newData) {
199
+ const combined = new Uint8Array(this.accumulatedData.length + newData.length);
200
+ combined.set(this.accumulatedData);
201
+ combined.set(new Uint8Array(newData), this.accumulatedData.length);
202
+ this.accumulatedData = combined;
203
+
204
+ // Process accumulated data when we have enough for slice management
205
+ const minBufferSamples = 16000 * 1; // 1 second for slice management
206
+ if (this.accumulatedData.length >= minBufferSamples) {
207
+ this.processAccumulatedDataForSliceManagement();
208
+ }
209
+ }
210
+
211
+ /**
212
+ * Process accumulated audio data through SliceManager
213
+ */
214
+ processAccumulatedDataForSliceManagement() {
215
+ if (this.accumulatedData.length === 0) {
216
+ return;
217
+ }
218
+
219
+ // Process through slice manager directly with Uint8Array
220
+ const result = this.sliceManager.addAudioData(this.accumulatedData);
221
+ if (result.slice) {
222
+ this.log(`Slice ${result.slice.index} ready (${result.slice.data.length} bytes)`);
223
+
224
+ // Process VAD for the slice if enabled
225
+ if (!this.isTranscribing && this.vadEnabled) {
226
+ this.processSliceVAD(result.slice).catch(error => {
227
+ this.handleError(`VAD processing error: ${error}`);
228
+ });
229
+ } else if (!this.isTranscribing) {
230
+ // If VAD is disabled, transcribe slices as they become ready
231
+ this.queueSliceForTranscription(result.slice).catch(error => {
232
+ this.handleError(`Failed to queue slice for transcription: ${error}`);
233
+ });
234
+ } else {
235
+ this.log(`Skipping slice ${result.slice.index} - already transcribing`);
236
+ }
237
+ this.emitStatsUpdate('memory_change');
238
+ }
239
+
240
+ // Clear accumulated data
241
+ this.accumulatedData = new Uint8Array(0);
242
+ }
243
+
244
+ /**
245
+ * Check if auto-slice should be triggered based on VAD event and timing
246
+ */
247
+ async checkAutoSlice(vadEvent, _slice) {
248
+ if (!this.options.autoSliceOnSpeechEnd || !this.vadEnabled) {
249
+ return;
250
+ }
251
+
252
+ // Only trigger on speech_end or silence events
253
+ const shouldTriggerAutoSlice = vadEvent.type === 'speech_end' || vadEvent.type === 'silence';
254
+ if (!shouldTriggerAutoSlice) {
255
+ return;
256
+ }
257
+
258
+ // Get current slice info from SliceManager
259
+ const currentSliceInfo = this.sliceManager.getCurrentSliceInfo();
260
+ const currentSlice = this.sliceManager.getSliceByIndex(currentSliceInfo.currentSliceIndex);
261
+ if (!currentSlice) {
262
+ return;
263
+ }
264
+
265
+ // Calculate current slice duration
266
+ const currentDuration = (Date.now() - currentSlice.startTime) / 1000; // Convert to seconds
267
+ const targetDuration = this.options.audioSliceSec;
268
+ const minDuration = this.options.audioMinSec;
269
+ const autoSliceThreshold = targetDuration * this.options.autoSliceThreshold;
270
+
271
+ // Check if conditions are met for auto-slice
272
+ const meetsMinDuration = currentDuration >= minDuration;
273
+ const meetsThreshold = currentDuration >= autoSliceThreshold;
274
+ if (meetsMinDuration && meetsThreshold) {
275
+ this.log(`Auto-slicing on ${vadEvent.type} at ${currentDuration.toFixed(1)}s ` + `(min: ${minDuration}s, threshold: ${autoSliceThreshold.toFixed(1)}s, target: ${targetDuration}s)`);
276
+
277
+ // Force next slice
278
+ await this.nextSlice();
279
+ } else {
280
+ this.log(`Auto-slice conditions not met on ${vadEvent.type}: ` + `duration=${currentDuration.toFixed(1)}s, min=${minDuration}s, threshold=${autoSliceThreshold.toFixed(1)}s ` + `(minOk=${meetsMinDuration}, thresholdOk=${meetsThreshold})`);
281
+ }
282
+ }
283
+
284
+ /**
285
+ * Process VAD for a completed slice
286
+ */
287
+ async processSliceVAD(slice) {
288
+ try {
289
+ var _this$callbacks$onVad, _this$callbacks4;
290
+ // Get audio data from the slice for VAD processing
291
+ const audioData = this.sliceManager.getAudioDataForTranscription(slice.index);
292
+ if (!audioData) {
293
+ this.log(`No audio data available for VAD processing of slice ${slice.index}`);
294
+ return;
295
+ }
296
+
297
+ // Convert base64 back to Uint8Array for VAD processing
298
+
299
+ // Detect speech in the slice
300
+ const vadEvent = await this.detectSpeech(audioData, slice.index);
301
+ vadEvent.timestamp = Date.now();
302
+
303
+ // Store VAD event for inclusion in transcribe event
304
+ this.vadEvents.set(slice.index, vadEvent);
305
+
306
+ // Emit VAD event
307
+ (_this$callbacks$onVad = (_this$callbacks4 = this.callbacks).onVad) === null || _this$callbacks$onVad === void 0 ? void 0 : _this$callbacks$onVad.call(_this$callbacks4, vadEvent);
308
+
309
+ // Check if auto-slice should be triggered
310
+ await this.checkAutoSlice(vadEvent, slice);
311
+
312
+ // Check if speech was detected and if we should transcribe
313
+ const isSpeech = vadEvent.type === 'speech_start' || vadEvent.type === 'speech_continue';
314
+ const isSpeechEnd = vadEvent.type === 'speech_end';
315
+ if (isSpeech) {
316
+ const minDuration = this.options.audioMinSec;
317
+ // Check minimum duration requirement
318
+ const speechDuration = slice.data.length / 16000 / 2; // Convert bytes to seconds (16kHz, 16-bit)
319
+
320
+ if (speechDuration >= minDuration) {
321
+ this.log(`Speech detected in slice ${slice.index}, queueing for transcription`);
322
+ await this.queueSliceForTranscription(slice);
323
+ } else {
324
+ this.log(`Speech too short in slice ${slice.index} (${speechDuration.toFixed(2)}s < ${minDuration}s), skipping`);
325
+ }
326
+ } else if (isSpeechEnd) {
327
+ this.log(`Speech ended in slice ${slice.index}`);
328
+ // For speech_end events, we might want to queue the slice for transcription
329
+ // to capture the final part of the speech segment
330
+ const speechDuration = slice.data.length / 16000 / 2; // Convert bytes to seconds
331
+ const minDuration = this.options.audioMinSec;
332
+ if (speechDuration >= minDuration) {
333
+ this.log(`Speech end detected in slice ${slice.index}, queueing final segment for transcription`);
334
+ await this.queueSliceForTranscription(slice);
335
+ } else {
336
+ this.log(`Speech end segment too short in slice ${slice.index} (${speechDuration.toFixed(2)}s < ${minDuration}s), skipping`);
337
+ }
338
+ } else {
339
+ this.log(`No speech detected in slice ${slice.index}`);
340
+ }
341
+
342
+ // Emit stats update for VAD change
343
+ this.emitStatsUpdate('vad_change');
344
+ } catch (error) {
345
+ this.handleError(`VAD processing error for slice ${slice.index}: ${error}`);
346
+ }
347
+ }
348
+
349
+ /**
350
+ * Queue a slice for transcription
351
+ */
352
+ async queueSliceForTranscription(slice) {
353
+ try {
354
+ // Get audio data from the slice
355
+ const audioData = this.sliceManager.getAudioDataForTranscription(slice.index);
356
+ if (!audioData) {
357
+ this.log(`No audio data available for slice ${slice.index}`);
358
+ return;
359
+ }
360
+ if (this.callbacks.onBeginTranscribe) {
361
+ const shouldTranscribe = (await this.callbacks.onBeginTranscribe({
362
+ sliceIndex: slice.index,
363
+ audioData,
364
+ duration: slice.data.length / 16000 / 2 * 1000,
365
+ // Convert to milliseconds
366
+ vadEvent: this.vadEvents.get(slice.index)
367
+ })) ?? true;
368
+ if (!shouldTranscribe) {
369
+ this.log(`User callback declined transcription for slice ${slice.index}`);
370
+ return;
371
+ }
372
+ }
373
+
374
+ // Add to transcription queue
375
+ this.transcriptionQueue.unshift({
376
+ sliceIndex: slice.index,
377
+ audioData
378
+ });
379
+ this.log(`Queued slice ${slice.index} for transcription (${slice.data.length} samples)`);
380
+ await this.processTranscriptionQueue();
381
+ } catch (error) {
382
+ this.handleError(`Failed to queue slice for transcription: ${error}`);
383
+ }
384
+ }
385
+
386
+ /**
387
+ * Detect speech using VAD context
388
+ */
389
+ async detectSpeech(audioData, sliceIndex) {
390
+ if (!this.vadContext) {
391
+ // When no VAD context is available, assume speech is always detected
392
+ // but still follow the state machine pattern
393
+ const currentTimestamp = Date.now();
394
+
395
+ // Assume speech is always detected when no VAD context
396
+ const vadEventType = this.lastVadState === 'silence' ? 'speech_start' : 'speech_continue';
397
+
398
+ // Update VAD state
399
+ this.lastVadState = 'speech';
400
+ const {
401
+ sampleRate = 16000
402
+ } = this.options.audioStreamConfig || {};
403
+ return {
404
+ type: vadEventType,
405
+ lastSpeechDetectedTime: 0,
406
+ timestamp: currentTimestamp,
407
+ confidence: 1.0,
408
+ duration: audioData.length / sampleRate / 2,
409
+ // Convert bytes to seconds
410
+ sliceIndex
411
+ };
412
+ }
413
+ try {
414
+ const audioBuffer = audioData.buffer;
415
+
416
+ // Use VAD context to detect speech segments
417
+ const vadSegments = await this.vadContext.detectSpeechData(audioBuffer, this.options.vadOptions);
418
+
419
+ // Calculate confidence based on speech segments
420
+ let confidence = 0.0;
421
+ let lastSpeechDetectedTime = 0;
422
+ if (vadSegments && vadSegments.length > 0) {
423
+ var _vadSegments;
424
+ // If there are speech segments, calculate average confidence
425
+ const totalTime = vadSegments.reduce((sum, segment) => sum + (segment.t1 - segment.t0), 0);
426
+ const audioDuration = audioData.length / 16000 / 2; // Convert bytes to seconds
427
+ confidence = totalTime > 0 ? Math.min(totalTime / audioDuration, 1.0) : 0.0;
428
+ lastSpeechDetectedTime = ((_vadSegments = vadSegments[vadSegments.length - 1]) === null || _vadSegments === void 0 ? void 0 : _vadSegments.t1) || -1;
429
+ }
430
+ const threshold = this.options.vadOptions.threshold || 0.5;
431
+ let isSpeech = confidence > threshold;
432
+ const currentTimestamp = Date.now();
433
+
434
+ // Determine VAD event type based on current and previous state
435
+ let vadEventType;
436
+ if (isSpeech) {
437
+ vadEventType = this.lastVadState === 'silence' ? 'speech_start' : 'speech_continue';
438
+ const minDuration = this.options.audioMinSec;
439
+ // Check if this is a new speech detection (different from last detected time)
440
+ if (lastSpeechDetectedTime === this.lastSpeechDetectedTime || (lastSpeechDetectedTime - this.lastSpeechDetectedTime) / 100 < minDuration) {
441
+ if (this.lastVadState === 'silence') vadEventType = 'silence';
442
+ if (this.lastVadState === 'speech') vadEventType = 'speech_end';
443
+ isSpeech = false;
444
+ confidence = 0.0;
445
+ }
446
+ this.lastSpeechDetectedTime = lastSpeechDetectedTime;
447
+ } else {
448
+ vadEventType = this.lastVadState === 'speech' ? 'speech_end' : 'silence';
449
+ }
450
+
451
+ // Update VAD state for next detection
452
+ this.lastVadState = isSpeech ? 'speech' : 'silence';
453
+ const {
454
+ sampleRate = 16000
455
+ } = this.options.audioStreamConfig || {};
456
+ return {
457
+ type: vadEventType,
458
+ lastSpeechDetectedTime,
459
+ timestamp: currentTimestamp,
460
+ confidence,
461
+ duration: audioData.length / sampleRate / 2,
462
+ // Convert bytes to seconds
463
+ sliceIndex,
464
+ currentThreshold: threshold
465
+ };
466
+ } catch (error) {
467
+ this.log(`VAD detection error: ${error}`);
468
+ // Re-throw the error so it can be handled by the caller
469
+ throw error;
470
+ }
471
+ }
472
+ isProcessingTranscriptionQueue = false;
473
+
474
+ /**
475
+ * Process the transcription queue
476
+ */
477
+ async processTranscriptionQueue() {
478
+ if (this.isProcessingTranscriptionQueue) return;
479
+ this.isProcessingTranscriptionQueue = true;
480
+ while (this.transcriptionQueue.length > 0) {
481
+ const item = this.transcriptionQueue.shift();
482
+ this.transcriptionQueue = []; // Old items are not needed anymore
483
+ if (item) {
484
+ // eslint-disable-next-line no-await-in-loop
485
+ await this.processTranscription(item).catch(error => {
486
+ this.handleError(`Transcription error: ${error}`);
487
+ });
488
+ }
489
+ }
490
+ this.isProcessingTranscriptionQueue = false;
491
+ }
492
+
493
+ /**
494
+ * Build prompt from initial prompt and previous slices
495
+ */
496
+ buildPrompt(currentSliceIndex) {
497
+ const promptParts = [];
498
+
499
+ // Add initial prompt if provided
500
+ if (this.options.initialPrompt) {
501
+ promptParts.push(this.options.initialPrompt);
502
+ }
503
+
504
+ // Add previous slice results if enabled
505
+ if (this.options.promptPreviousSlices) {
506
+ // Get transcription results from previous slices (up to the current slice)
507
+ const previousResults = Array.from(this.transcriptionResults.entries()).filter(_ref => {
508
+ let [sliceIndex] = _ref;
509
+ return sliceIndex < currentSliceIndex;
510
+ }).sort((_ref2, _ref3) => {
511
+ let [a] = _ref2;
512
+ let [b] = _ref3;
513
+ return a - b;
514
+ }) // Sort by slice index
515
+ .map(_ref4 => {
516
+ var _result$transcribeEve;
517
+ let [, result] = _ref4;
518
+ return (_result$transcribeEve = result.transcribeEvent.data) === null || _result$transcribeEve === void 0 ? void 0 : _result$transcribeEve.result;
519
+ }).filter(result => Boolean(result)); // Filter out empty results with type guard
520
+
521
+ if (previousResults.length > 0) {
522
+ promptParts.push(...previousResults);
523
+ }
524
+ }
525
+ return promptParts.join(' ') || undefined;
526
+ }
527
+
528
+ /**
529
+ * Process a single transcription
530
+ */
531
+ async processTranscription(item) {
532
+ if (!this.isActive) {
533
+ return;
534
+ }
535
+ this.isTranscribing = true;
536
+
537
+ // Emit stats update for status change
538
+ this.emitStatsUpdate('status_change');
539
+ const startTime = Date.now();
540
+ try {
541
+ var _this$callbacks$onTra, _this$callbacks5;
542
+ // Build prompt from initial prompt and previous slices
543
+ const prompt = this.buildPrompt(item.sliceIndex);
544
+ const audioBuffer = item.audioData.buffer;
545
+ const {
546
+ promise
547
+ } = this.whisperContext.transcribeData(audioBuffer, {
548
+ ...this.options.transcribeOptions,
549
+ prompt,
550
+ // Include the constructed prompt
551
+ onProgress: undefined // Disable progress for realtime
552
+ });
553
+
554
+ const result = await promise;
555
+ const endTime = Date.now();
556
+
557
+ // Create transcribe event
558
+ const {
559
+ sampleRate = 16000
560
+ } = this.options.audioStreamConfig || {};
561
+ const transcribeEvent = {
562
+ type: 'transcribe',
563
+ sliceIndex: item.sliceIndex,
564
+ data: result,
565
+ isCapturing: this.audioStream.isRecording(),
566
+ processTime: endTime - startTime,
567
+ recordingTime: item.audioData.length / (sampleRate / 1000) / 2,
568
+ // ms,
569
+ memoryUsage: this.sliceManager.getMemoryUsage(),
570
+ vadEvent: this.vadEvents.get(item.sliceIndex)
571
+ };
572
+
573
+ // Save transcription results
574
+ const slice = this.sliceManager.getSliceByIndex(item.sliceIndex);
575
+ if (slice) {
576
+ this.transcriptionResults.set(item.sliceIndex, {
577
+ slice: {
578
+ // Don't keep data in the slice
579
+ index: slice.index,
580
+ sampleCount: slice.sampleCount,
581
+ startTime: slice.startTime,
582
+ endTime: slice.endTime,
583
+ isProcessed: slice.isProcessed,
584
+ isReleased: slice.isReleased
585
+ },
586
+ transcribeEvent
587
+ });
588
+ }
589
+
590
+ // Emit transcribe event
591
+ (_this$callbacks$onTra = (_this$callbacks5 = this.callbacks).onTranscribe) === null || _this$callbacks$onTra === void 0 ? void 0 : _this$callbacks$onTra.call(_this$callbacks5, transcribeEvent);
592
+ this.vadEvents.delete(item.sliceIndex);
593
+
594
+ // Emit stats update for memory/slice changes
595
+ this.emitStatsUpdate('memory_change');
596
+ this.log(`Transcribed speech segment ${item.sliceIndex}: "${result.result}"`);
597
+ } catch (error) {
598
+ var _this$callbacks$onTra2, _this$callbacks6;
599
+ // Emit error event to transcribe callback
600
+ const errorEvent = {
601
+ type: 'error',
602
+ sliceIndex: item.sliceIndex,
603
+ data: undefined,
604
+ isCapturing: this.audioStream.isRecording(),
605
+ processTime: Date.now() - startTime,
606
+ recordingTime: 0,
607
+ memoryUsage: this.sliceManager.getMemoryUsage(),
608
+ vadEvent: this.vadEvents.get(item.sliceIndex)
609
+ };
610
+ (_this$callbacks$onTra2 = (_this$callbacks6 = this.callbacks).onTranscribe) === null || _this$callbacks$onTra2 === void 0 ? void 0 : _this$callbacks$onTra2.call(_this$callbacks6, errorEvent);
611
+ this.vadEvents.delete(item.sliceIndex);
612
+ this.handleError(`Transcription failed for speech segment ${item.sliceIndex}: ${error}`);
613
+ } finally {
614
+ // Check if we should continue processing queue
615
+ if (this.transcriptionQueue.length > 0) {
616
+ await this.processTranscriptionQueue();
617
+ } else {
618
+ this.isTranscribing = false;
619
+ }
620
+ }
621
+ }
622
+
623
+ /**
624
+ * Handle audio status changes
625
+ */
626
+ handleAudioStatusChange(isRecording) {
627
+ this.log(`Audio recording: ${isRecording ? 'started' : 'stopped'}`);
628
+ }
629
+
630
+ /**
631
+ * Handle errors from components
632
+ */
633
+ handleError(error) {
634
+ var _this$callbacks$onErr, _this$callbacks7;
635
+ this.log(`Error: ${error}`);
636
+ (_this$callbacks$onErr = (_this$callbacks7 = this.callbacks).onError) === null || _this$callbacks$onErr === void 0 ? void 0 : _this$callbacks$onErr.call(_this$callbacks7, error);
637
+ }
638
+
639
+ /**
640
+ * Update callbacks
641
+ */
642
+ updateCallbacks(callbacks) {
643
+ this.callbacks = {
644
+ ...this.callbacks,
645
+ ...callbacks
646
+ };
647
+ }
648
+
649
+ /**
650
+ * Update VAD options dynamically
651
+ */
652
+ updateVadOptions(options) {
653
+ this.options.vadOptions = {
654
+ ...this.options.vadOptions,
655
+ ...options
656
+ };
657
+ }
658
+
659
+ /**
660
+ * Update auto-slice options dynamically
661
+ */
662
+ updateAutoSliceOptions(options) {
663
+ if (options.autoSliceOnSpeechEnd !== undefined) {
664
+ this.options.autoSliceOnSpeechEnd = options.autoSliceOnSpeechEnd;
665
+ }
666
+ if (options.autoSliceThreshold !== undefined) {
667
+ this.options.autoSliceThreshold = options.autoSliceThreshold;
668
+ }
669
+ this.log(`Auto-slice options updated: enabled=${this.options.autoSliceOnSpeechEnd}, threshold=${this.options.autoSliceThreshold}`);
670
+ }
671
+
672
+ /**
673
+ * Get current statistics
674
+ */
675
+ getStatistics() {
676
+ return {
677
+ isActive: this.isActive,
678
+ isTranscribing: this.isTranscribing,
679
+ vadEnabled: this.vadEnabled,
680
+ audioStats: {
681
+ isRecording: this.audioStream.isRecording(),
682
+ accumulatedSamples: this.accumulatedData.length
683
+ },
684
+ vadStats: this.vadEnabled ? {
685
+ enabled: true,
686
+ contextAvailable: !!this.vadContext,
687
+ lastSpeechDetectedTime: this.lastSpeechDetectedTime
688
+ } : null,
689
+ sliceStats: this.sliceManager.getCurrentSliceInfo(),
690
+ autoSliceConfig: {
691
+ enabled: this.options.autoSliceOnSpeechEnd,
692
+ threshold: this.options.autoSliceThreshold,
693
+ targetDuration: this.options.audioSliceSec,
694
+ minDuration: this.options.audioMinSec
695
+ }
696
+ };
697
+ }
698
+
699
+ /**
700
+ * Get all transcription results
701
+ */
702
+ getTranscriptionResults() {
703
+ return Array.from(this.transcriptionResults.values());
704
+ }
705
+
706
+ /**
707
+ * Force move to the next slice, finalizing the current one regardless of capacity
708
+ */
709
+ async nextSlice() {
710
+ var _this$callbacks$onTra3, _this$callbacks8;
711
+ if (!this.isActive) {
712
+ this.log('Cannot force next slice - transcriber is not active');
713
+ return;
714
+ }
715
+
716
+ // Emit start event to indicate slice processing has started
717
+ const startEvent = {
718
+ type: 'start',
719
+ sliceIndex: -1,
720
+ // Use -1 to indicate forced slice
721
+ data: undefined,
722
+ isCapturing: this.audioStream.isRecording(),
723
+ processTime: 0,
724
+ recordingTime: 0,
725
+ memoryUsage: this.sliceManager.getMemoryUsage()
726
+ };
727
+ (_this$callbacks$onTra3 = (_this$callbacks8 = this.callbacks).onTranscribe) === null || _this$callbacks$onTra3 === void 0 ? void 0 : _this$callbacks$onTra3.call(_this$callbacks8, startEvent);
728
+
729
+ // Check if there are pending transcriptions or currently transcribing
730
+ if (this.isTranscribing || this.transcriptionQueue.length > 0) {
731
+ this.log('Waiting for pending transcriptions to complete before forcing next slice...');
732
+
733
+ // Wait for current transcription queue to be processed
734
+ await this.processTranscriptionQueue();
735
+ }
736
+ const result = this.sliceManager.forceNextSlice();
737
+ if (result.slice) {
738
+ this.log(`Forced slice ${result.slice.index} ready (${result.slice.data.length} bytes)`);
739
+
740
+ // Process VAD for the slice if enabled
741
+ if (!this.isTranscribing && this.vadEnabled) {
742
+ this.processSliceVAD(result.slice).catch(error => {
743
+ this.handleError(`VAD processing error: ${error}`);
744
+ });
745
+ } else if (!this.isTranscribing) {
746
+ // If VAD is disabled, transcribe slices as they become ready
747
+ this.queueSliceForTranscription(result.slice).catch(error => {
748
+ this.handleError(`Failed to queue slice for transcription: ${error}`);
749
+ });
750
+ } else {
751
+ this.log(`Skipping slice ${result.slice.index} - already transcribing`);
752
+ }
753
+ this.emitStatsUpdate('memory_change');
754
+ } else {
755
+ this.log('Forced next slice but no slice data to process');
756
+ }
757
+ }
758
+
759
+ /**
760
+ * Reset all components
761
+ */
762
+ reset() {
763
+ this.sliceManager.reset();
764
+ this.transcriptionQueue = [];
765
+ this.isTranscribing = false;
766
+ this.accumulatedData = new Uint8Array(0);
767
+
768
+ // Reset simplified VAD state
769
+ this.lastSpeechDetectedTime = -1;
770
+ this.lastVadState = 'silence';
771
+
772
+ // Reset stats snapshot for clean start
773
+ this.lastStatsSnapshot = null;
774
+
775
+ // Cancel WAV file writing if in progress
776
+ if (this.wavFileWriter) {
777
+ this.wavFileWriter.cancel().catch(error => {
778
+ this.log(`Failed to cancel WAV file writing: ${error}`);
779
+ });
780
+ this.wavFileWriter = null;
781
+ }
782
+
783
+ // Clear transcription results
784
+ this.transcriptionResults.clear();
785
+
786
+ // Clear VAD events
787
+ this.vadEvents.clear();
788
+ }
789
+
790
+ /**
791
+ * Release all resources
792
+ */
793
+ async release() {
794
+ var _this$wavFileWriter;
795
+ if (this.isActive) {
796
+ await this.stop();
797
+ }
798
+ await this.audioStream.release();
799
+ await ((_this$wavFileWriter = this.wavFileWriter) === null || _this$wavFileWriter === void 0 ? void 0 : _this$wavFileWriter.finalize());
800
+ this.vadContext = undefined;
801
+ }
802
+
803
+ /**
804
+ * Emit stats update event if stats have changed significantly
805
+ */
806
+ emitStatsUpdate(eventType) {
807
+ const currentStats = this.getStatistics();
808
+
809
+ // Check if stats have changed significantly
810
+ if (!this.lastStatsSnapshot || RealtimeTranscriber.shouldEmitStatsUpdate(currentStats, this.lastStatsSnapshot)) {
811
+ var _this$callbacks$onSta4, _this$callbacks9;
812
+ const statsEvent = {
813
+ timestamp: Date.now(),
814
+ type: eventType,
815
+ data: currentStats
816
+ };
817
+ (_this$callbacks$onSta4 = (_this$callbacks9 = this.callbacks).onStatsUpdate) === null || _this$callbacks$onSta4 === void 0 ? void 0 : _this$callbacks$onSta4.call(_this$callbacks9, statsEvent);
818
+ this.lastStatsSnapshot = {
819
+ ...currentStats
820
+ };
821
+ }
822
+ }
823
+
824
+ /**
825
+ * Determine if stats update should be emitted
826
+ */
827
+ static shouldEmitStatsUpdate(current, previous) {
828
+ var _current$sliceStats, _current$sliceStats$m, _previous$sliceStats, _previous$sliceStats$;
829
+ // Always emit on status changes
830
+ if (current.isActive !== previous.isActive || current.isTranscribing !== previous.isTranscribing) {
831
+ return true;
832
+ }
833
+
834
+ // Emit on significant memory changes (>10% or >5MB)
835
+ const currentMemory = ((_current$sliceStats = current.sliceStats) === null || _current$sliceStats === void 0 ? void 0 : (_current$sliceStats$m = _current$sliceStats.memoryUsage) === null || _current$sliceStats$m === void 0 ? void 0 : _current$sliceStats$m.estimatedMB) || 0;
836
+ const previousMemory = ((_previous$sliceStats = previous.sliceStats) === null || _previous$sliceStats === void 0 ? void 0 : (_previous$sliceStats$ = _previous$sliceStats.memoryUsage) === null || _previous$sliceStats$ === void 0 ? void 0 : _previous$sliceStats$.estimatedMB) || 0;
837
+ const memoryDiff = Math.abs(currentMemory - previousMemory);
838
+ if (memoryDiff > 5 || previousMemory > 0 && memoryDiff / previousMemory > 0.1) {
839
+ return true;
840
+ }
841
+ return false;
842
+ }
843
+
844
+ /**
845
+ * Logger function
846
+ */
847
+ log(message) {
848
+ this.options.logger(`[RealtimeTranscriber] ${message}`);
849
+ }
850
+ }
851
+ //# sourceMappingURL=RealtimeTranscriber.js.map