whisper.rn 0.5.0-rc.1 → 0.5.0-rc.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. package/README.md +119 -50
  2. package/android/src/main/java/com/rnwhisper/RNWhisper.java +26 -0
  3. package/android/src/main/java/com/rnwhisper/WhisperContext.java +25 -0
  4. package/android/src/main/jni.cpp +81 -0
  5. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +5 -0
  6. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +5 -0
  7. package/ios/RNWhisper.mm +11 -0
  8. package/ios/RNWhisperContext.h +1 -0
  9. package/ios/RNWhisperContext.mm +46 -0
  10. package/lib/commonjs/AudioSessionIos.js +2 -1
  11. package/lib/commonjs/AudioSessionIos.js.map +1 -1
  12. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  13. package/lib/commonjs/index.js +26 -0
  14. package/lib/commonjs/index.js.map +1 -1
  15. package/lib/commonjs/jest-mock.js +126 -0
  16. package/lib/commonjs/jest-mock.js.map +1 -0
  17. package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js +831 -0
  18. package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js.map +1 -0
  19. package/lib/commonjs/realtime-transcription/SliceManager.js +233 -0
  20. package/lib/commonjs/realtime-transcription/SliceManager.js.map +1 -0
  21. package/lib/commonjs/realtime-transcription/adapters/AudioPcmStreamAdapter.js +133 -0
  22. package/lib/commonjs/realtime-transcription/adapters/AudioPcmStreamAdapter.js.map +1 -0
  23. package/lib/commonjs/realtime-transcription/adapters/JestAudioStreamAdapter.js +201 -0
  24. package/lib/commonjs/realtime-transcription/adapters/JestAudioStreamAdapter.js.map +1 -0
  25. package/lib/commonjs/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js +309 -0
  26. package/lib/commonjs/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js.map +1 -0
  27. package/lib/commonjs/realtime-transcription/index.js +27 -0
  28. package/lib/commonjs/realtime-transcription/index.js.map +1 -0
  29. package/lib/commonjs/realtime-transcription/types.js +114 -0
  30. package/lib/commonjs/realtime-transcription/types.js.map +1 -0
  31. package/lib/commonjs/utils/WavFileReader.js +158 -0
  32. package/lib/commonjs/utils/WavFileReader.js.map +1 -0
  33. package/lib/commonjs/utils/WavFileWriter.js +181 -0
  34. package/lib/commonjs/utils/WavFileWriter.js.map +1 -0
  35. package/lib/commonjs/utils/common.js +25 -0
  36. package/lib/commonjs/utils/common.js.map +1 -0
  37. package/lib/module/AudioSessionIos.js +2 -1
  38. package/lib/module/AudioSessionIos.js.map +1 -1
  39. package/lib/module/NativeRNWhisper.js.map +1 -1
  40. package/lib/module/index.js +24 -0
  41. package/lib/module/index.js.map +1 -1
  42. package/lib/module/jest-mock.js +124 -0
  43. package/lib/module/jest-mock.js.map +1 -0
  44. package/lib/module/realtime-transcription/RealtimeTranscriber.js +825 -0
  45. package/lib/module/realtime-transcription/RealtimeTranscriber.js.map +1 -0
  46. package/lib/module/realtime-transcription/SliceManager.js +226 -0
  47. package/lib/module/realtime-transcription/SliceManager.js.map +1 -0
  48. package/lib/module/realtime-transcription/adapters/AudioPcmStreamAdapter.js +124 -0
  49. package/lib/module/realtime-transcription/adapters/AudioPcmStreamAdapter.js.map +1 -0
  50. package/lib/module/realtime-transcription/adapters/JestAudioStreamAdapter.js +194 -0
  51. package/lib/module/realtime-transcription/adapters/JestAudioStreamAdapter.js.map +1 -0
  52. package/lib/module/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js +302 -0
  53. package/lib/module/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.js.map +1 -0
  54. package/lib/module/realtime-transcription/index.js +8 -0
  55. package/lib/module/realtime-transcription/index.js.map +1 -0
  56. package/lib/module/realtime-transcription/types.js +107 -0
  57. package/lib/module/realtime-transcription/types.js.map +1 -0
  58. package/lib/module/utils/WavFileReader.js +151 -0
  59. package/lib/module/utils/WavFileReader.js.map +1 -0
  60. package/lib/module/utils/WavFileWriter.js +174 -0
  61. package/lib/module/utils/WavFileWriter.js.map +1 -0
  62. package/lib/module/utils/common.js +18 -0
  63. package/lib/module/utils/common.js.map +1 -0
  64. package/lib/typescript/AudioSessionIos.d.ts +1 -1
  65. package/lib/typescript/AudioSessionIos.d.ts.map +1 -1
  66. package/lib/typescript/NativeRNWhisper.d.ts +1 -0
  67. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  68. package/lib/typescript/index.d.ts +4 -0
  69. package/lib/typescript/index.d.ts.map +1 -1
  70. package/lib/typescript/jest-mock.d.ts +2 -0
  71. package/lib/typescript/jest-mock.d.ts.map +1 -0
  72. package/lib/typescript/realtime-transcription/RealtimeTranscriber.d.ts +165 -0
  73. package/lib/typescript/realtime-transcription/RealtimeTranscriber.d.ts.map +1 -0
  74. package/lib/typescript/realtime-transcription/SliceManager.d.ts +72 -0
  75. package/lib/typescript/realtime-transcription/SliceManager.d.ts.map +1 -0
  76. package/lib/typescript/realtime-transcription/adapters/AudioPcmStreamAdapter.d.ts +22 -0
  77. package/lib/typescript/realtime-transcription/adapters/AudioPcmStreamAdapter.d.ts.map +1 -0
  78. package/lib/typescript/realtime-transcription/adapters/JestAudioStreamAdapter.d.ts +44 -0
  79. package/lib/typescript/realtime-transcription/adapters/JestAudioStreamAdapter.d.ts.map +1 -0
  80. package/lib/typescript/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.d.ts +75 -0
  81. package/lib/typescript/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.d.ts.map +1 -0
  82. package/lib/typescript/realtime-transcription/index.d.ts +6 -0
  83. package/lib/typescript/realtime-transcription/index.d.ts.map +1 -0
  84. package/lib/typescript/realtime-transcription/types.d.ts +216 -0
  85. package/lib/typescript/realtime-transcription/types.d.ts.map +1 -0
  86. package/lib/typescript/utils/WavFileReader.d.ts +61 -0
  87. package/lib/typescript/utils/WavFileReader.d.ts.map +1 -0
  88. package/lib/typescript/utils/WavFileWriter.d.ts +57 -0
  89. package/lib/typescript/utils/WavFileWriter.d.ts.map +1 -0
  90. package/lib/typescript/utils/common.d.ts +9 -0
  91. package/lib/typescript/utils/common.d.ts.map +1 -0
  92. package/package.json +18 -6
  93. package/src/AudioSessionIos.ts +3 -2
  94. package/src/NativeRNWhisper.ts +2 -0
  95. package/src/index.ts +34 -0
  96. package/{jest/mock.js → src/jest-mock.ts} +2 -2
  97. package/src/realtime-transcription/RealtimeTranscriber.ts +983 -0
  98. package/src/realtime-transcription/SliceManager.ts +252 -0
  99. package/src/realtime-transcription/adapters/AudioPcmStreamAdapter.ts +143 -0
  100. package/src/realtime-transcription/adapters/JestAudioStreamAdapter.ts +251 -0
  101. package/src/realtime-transcription/adapters/SimulateFileAudioStreamAdapter.ts +378 -0
  102. package/src/realtime-transcription/index.ts +34 -0
  103. package/src/realtime-transcription/types.ts +277 -0
  104. package/src/utils/WavFileReader.ts +202 -0
  105. package/src/utils/WavFileWriter.ts +206 -0
  106. package/src/utils/common.ts +17 -0
package/README.md CHANGED
@@ -11,9 +11,9 @@ React Native binding of [whisper.cpp](https://github.com/ggerganov/whisper.cpp).
11
11
  ## Screenshots
12
12
 
13
13
  | <img src="https://github.com/mybigday/whisper.rn/assets/3001525/2fea7b2d-c911-44fb-9afc-8efc7b594446" width="300" /> | <img src="https://github.com/mybigday/whisper.rn/assets/3001525/a5005a6c-44f7-4db9-95e8-0fd951a2e147" width="300" /> |
14
- | :------------------------------------------: | :------------------------------------------: |
15
- | iOS: Tested on iPhone 13 Pro Max | Android: Tested on Pixel 6 |
16
- | (tiny.en, Core ML enabled, release mode + archive) | (tiny.en, armv8.2-a+fp16, release mode) |
14
+ | :------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------: |
15
+ | iOS: Tested on iPhone 13 Pro Max | Android: Tested on Pixel 6 |
16
+ | (tiny.en, Core ML enabled, release mode + archive) | (tiny.en, armv8.2-a+fp16, release mode) |
17
17
 
18
18
  ## Installation
19
19
 
@@ -49,7 +49,9 @@ You will need to prebuild the project before using it. See [Expo guide](https://
49
49
  If you want to use realtime transcribe, you need to add the microphone permission to your app.
50
50
 
51
51
  ### iOS
52
- Add these lines to ```ios/[YOU_APP_NAME]/info.plist```
52
+
53
+ Add these lines to `ios/[YOU_APP_NAME]/info.plist`
54
+
53
55
  ```xml
54
56
  <key>NSMicrophoneUsageDescription</key>
55
57
  <string>This app requires microphone access in order to transcribe speech</string>
@@ -58,10 +60,13 @@ Add these lines to ```ios/[YOU_APP_NAME]/info.plist```
58
60
  For tvOS, please note that the microphone is not supported.
59
61
 
60
62
  ### Android
61
- Add the following line to ```android/app/src/main/AndroidManifest.xml```
63
+
64
+ Add the following line to `android/app/src/main/AndroidManifest.xml`
65
+
62
66
  ```xml
63
67
  <uses-permission android:name="android.permission.RECORD_AUDIO" />
64
68
  ```
69
+
65
70
  ## Tips & Tricks
66
71
 
67
72
  The [Tips & Tricks](docs/TIPS.md) document is a collection of tips and tricks for using `whisper.rn`.
@@ -83,24 +88,6 @@ const { result } = await promise
83
88
  // result: (The inference text result from audio file)
84
89
  ```
85
90
 
86
- Use realtime transcribe:
87
-
88
- ```js
89
- const { stop, subscribe } = await whisperContext.transcribeRealtime(options)
90
-
91
- subscribe(evt => {
92
- const { isCapturing, data, processTime, recordingTime } = evt
93
- console.log(
94
- `Realtime transcribing: ${isCapturing ? 'ON' : 'OFF'}\n` +
95
- // The inference text result from audio record:
96
- `Result: ${data.result}\n\n` +
97
- `Process time: ${processTime}ms\n` +
98
- `Recording time: ${recordingTime}ms`,
99
- )
100
- if (!isCapturing) console.log('Finished realtime transcribing')
101
- })
102
- ```
103
-
104
91
  ## Voice Activity Detection (VAD)
105
92
 
106
93
  Voice Activity Detection allows you to detect speech segments in audio data using the Silero VAD model.
@@ -157,7 +144,11 @@ const segments = await vadContext.detectSpeechData(base64AudioData, {
157
144
 
158
145
  ```typescript
159
146
  segments.forEach((segment, index) => {
160
- console.log(`Segment ${index + 1}: ${segment.t0.toFixed(2)}s - ${segment.t1.toFixed(2)}s`)
147
+ console.log(
148
+ `Segment ${index + 1}: ${segment.t0.toFixed(2)}s - ${segment.t1.toFixed(
149
+ 2,
150
+ )}s`,
151
+ )
161
152
  console.log(`Duration: ${(segment.t1 - segment.t0).toFixed(2)}s`)
162
153
  })
163
154
  ```
@@ -170,35 +161,57 @@ await vadContext.release()
170
161
  await releaseAllWhisperVad()
171
162
  ```
172
163
 
173
- In iOS, You may need to change the Audio Session so that it can be used with other audio playback, or to optimize the quality of the recording. So we have provided AudioSession utilities for you:
164
+ ## Realtime Transcription
165
+
166
+ The new `RealtimeTranscriber` provides enhanced realtime transcription with features like Voice Activity Detection (VAD), auto-slicing, and memory management.
174
167
 
175
- Option 1 - Use options in transcribeRealtime:
176
168
  ```js
177
- import { AudioSessionIos } from 'whisper.rn'
169
+ // If your RN packager is not enable package exports support, use whisper.rn/src/realtime-transcription
170
+ import { RealtimeTranscriber } from 'whisper.rn/realtime-transcription'
171
+ import { AudioPcmStreamAdapter } from 'whisper.rn/realtime-transcription/adapters'
172
+ import RNFS from 'react-native-fs' // or any compatible filesystem
178
173
 
179
- const { stop, subscribe } = await whisperContext.transcribeRealtime({
180
- audioSessionOnStartIos: {
181
- category: AudioSessionIos.Category.PlayAndRecord,
182
- options: [AudioSessionIos.CategoryOption.MixWithOthers],
183
- mode: AudioSessionIos.Mode.Default,
184
- },
185
- audioSessionOnStopIos: 'restore', // Or an AudioSessionSettingIos
174
+ // Dependencies
175
+ const whisperContext = await initWhisper({
176
+ /* ... */
186
177
  })
178
+ const vadContext = await initWhisperVad({
179
+ /* ... */
180
+ })
181
+ const audioStream = new AudioPcmStreamAdapter() // requires @fugood/react-native-audio-pcm-stream
182
+
183
+ // Create transcriber
184
+ const transcriber = new RealtimeTranscriber(
185
+ { whisperContext, vadContext, audioStream, fs: RNFS },
186
+ {
187
+ audioSliceSec: 30,
188
+ vadPreset: 'default',
189
+ autoSliceOnSpeechEnd: true,
190
+ transcribeOptions: { language: 'en' },
191
+ },
192
+ {
193
+ onTranscribe: (event) => console.log('Transcription:', event.data?.result),
194
+ onVad: (event) => console.log('VAD:', event.type, event.confidence),
195
+ onStatusChange: (isActive) =>
196
+ console.log('Status:', isActive ? 'ACTIVE' : 'INACTIVE'),
197
+ onError: (error) => console.error('Error:', error),
198
+ },
199
+ )
200
+
201
+ // Start/stop transcription
202
+ await transcriber.start()
203
+ await transcriber.stop()
187
204
  ```
188
205
 
189
- Option 2 - Manage the Audio Session in anywhere:
190
- ```js
191
- import { AudioSessionIos } from 'whisper.rn'
206
+ **Dependencies:**
192
207
 
193
- await AudioSessionIos.setCategory(
194
- AudioSessionIos.Category.PlayAndRecord, [AudioSessionIos.CategoryOption.MixWithOthers],
195
- )
196
- await AudioSessionIos.setMode(AudioSessionIos.Mode.Default)
197
- await AudioSessionIos.setActive(true)
198
- // Then you can start do recording
199
- ```
208
+ - `@fugood/react-native-audio-pcm-stream` for `AudioPcmStreamAdapter`
209
+ - Compatible filesystem module (e.g., `react-native-fs`). See [filesystem interface](src/utils/WavFileWriter.ts#L9-L16) for TypeScript definition
200
210
 
201
- In Android, you may need to request the microphone permission by [`PermissionAndroid`](https://reactnative.dev/docs/permissionsandroid).
211
+ **Custom Audio Adapters:**
212
+ You can create custom audio stream adapters by implementing the [AudioStreamInterface](src/realtime-transcription/types.ts#L21-L30). This allows integration with different audio sources or custom audio processing pipelines.
213
+
214
+ **Example:** See [complete example](example/src/RealtimeTranscriber.tsx) for full implementation including file simulation and UI.
202
215
 
203
216
  Please visit the [Documentation](docs/) for more details.
204
217
 
@@ -213,8 +226,10 @@ const whisperContext = await initWhisper({
213
226
  filePath: require('../assets/ggml-tiny.en.bin'),
214
227
  })
215
228
 
216
- const { stop, promise } =
217
- whisperContext.transcribe(require('../assets/sample.wav'), options)
229
+ const { stop, promise } = whisperContext.transcribe(
230
+ require('../assets/sample.wav'),
231
+ options,
232
+ )
218
233
 
219
234
  // ...
220
235
  ```
@@ -233,18 +248,19 @@ module.exports = {
233
248
  ...defaultAssetExts,
234
249
  'bin', // whisper.rn: ggml model binary
235
250
  'mil', // whisper.rn: CoreML model asset
236
- ]
251
+ ],
237
252
  },
238
253
  }
239
254
  ```
240
255
 
241
256
  Please note that:
257
+
242
258
  - It will significantly increase the size of the app in release mode.
243
259
  - The RN packager is not allowed file size larger than 2GB, so it not able to use original f16 `large` model (2.9GB), you can use quantized models instead.
244
260
 
245
261
  ## Core ML support
246
262
 
247
- __*Platform: iOS 15.0+, tvOS 15.0+*__
263
+ **_Platform: iOS 15.0+, tvOS 15.0+_**
248
264
 
249
265
  To use Core ML on iOS, you will need to have the Core ML model files.
250
266
 
@@ -301,9 +317,62 @@ Please follow the [Development Workflow section of contributing guide](./CONTRIB
301
317
  We have provided a mock version of `whisper.rn` for testing purpose you can use on Jest:
302
318
 
303
319
  ```js
304
- jest.mock('whisper.rn', () => require('whisper.rn/jest/mock'))
320
+ jest.mock('whisper.rn', () => require('whisper.rn/jest-mock'))
305
321
  ```
306
322
 
323
+ ## Deprecated APIs
324
+
325
+ ### `transcribeRealtime` (Deprecated)
326
+
327
+ > ⚠️ **Deprecated**: Use `RealtimeTranscriber` instead for enhanced features and better performance.
328
+
329
+ ```js
330
+ const { stop, subscribe } = await whisperContext.transcribeRealtime(options)
331
+
332
+ subscribe((evt) => {
333
+ const { isCapturing, data, processTime, recordingTime } = evt
334
+ console.log(
335
+ `Realtime transcribing: ${isCapturing ? 'ON' : 'OFF'}\n` +
336
+ `Result: ${data.result}\n\n` +
337
+ `Process time: ${processTime}ms\n` +
338
+ `Recording time: ${recordingTime}ms`,
339
+ )
340
+ if (!isCapturing) console.log('Finished realtime transcribing')
341
+ })
342
+ ```
343
+
344
+ In iOS, You may need to change the Audio Session so that it can be used with other audio playback, or to optimize the quality of the recording. So we have provided AudioSession utilities for you:
345
+
346
+ Option 1 - Use options in transcribeRealtime:
347
+
348
+ ```js
349
+ import { AudioSessionIos } from 'whisper.rn'
350
+
351
+ const { stop, subscribe } = await whisperContext.transcribeRealtime({
352
+ audioSessionOnStartIos: {
353
+ category: AudioSessionIos.Category.PlayAndRecord,
354
+ options: [AudioSessionIos.CategoryOption.MixWithOthers],
355
+ mode: AudioSessionIos.Mode.Default,
356
+ },
357
+ audioSessionOnStopIos: 'restore', // Or an AudioSessionSettingIos
358
+ })
359
+ ```
360
+
361
+ Option 2 - Manage the Audio Session in anywhere:
362
+
363
+ ```js
364
+ import { AudioSessionIos } from 'whisper.rn'
365
+
366
+ await AudioSessionIos.setCategory(AudioSessionIos.Category.PlayAndRecord, [
367
+ AudioSessionIos.CategoryOption.MixWithOthers,
368
+ ])
369
+ await AudioSessionIos.setMode(AudioSessionIos.Mode.Default)
370
+ await AudioSessionIos.setActive(true)
371
+ // Then you can start do recording
372
+ ```
373
+
374
+ In Android, you may need to request the microphone permission by [`PermissionAndroid`](https://reactnative.dev/docs/permissionsandroid).
375
+
307
376
  ## Contributing
308
377
 
309
378
  See the [contributing guide](CONTRIBUTING.md) to learn how to contribute to the repository and the development workflow.
@@ -95,6 +95,32 @@ public class RNWhisper implements LifecycleEventListener {
95
95
  tasks.put(task, "installJSIBindings");
96
96
  }
97
97
 
98
+ public void toggleNativeLog(boolean enabled, Promise promise) {
99
+ new AsyncTask<Void, Void, Boolean>() {
100
+ private Exception exception;
101
+
102
+ @Override
103
+ protected Boolean doInBackground(Void... voids) {
104
+ try {
105
+ WhisperContext.toggleNativeLog(reactContext, enabled);
106
+ return true;
107
+ } catch (Exception e) {
108
+ exception = e;
109
+ }
110
+ return null;
111
+ }
112
+
113
+ @Override
114
+ protected void onPostExecute(Boolean result) {
115
+ if (exception != null) {
116
+ promise.reject(exception);
117
+ return;
118
+ }
119
+ promise.resolve(result);
120
+ }
121
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
122
+ }
123
+
98
124
  private int getResourceIdentifier(String filePath) {
99
125
  int identifier = reactContext.getResources().getIdentifier(
100
126
  filePath,
@@ -29,6 +29,29 @@ public class WhisperContext {
29
29
 
30
30
  private static String loadedLibrary = "";
31
31
 
32
+ private static class NativeLogCallback {
33
+ DeviceEventManagerModule.RCTDeviceEventEmitter eventEmitter;
34
+
35
+ public NativeLogCallback(ReactApplicationContext reactContext) {
36
+ this.eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class);
37
+ }
38
+
39
+ void emitNativeLog(String level, String text) {
40
+ WritableMap event = Arguments.createMap();
41
+ event.putString("level", level);
42
+ event.putString("text", text);
43
+ eventEmitter.emit("@RNWhisper_onNativeLog", event);
44
+ }
45
+ }
46
+
47
+ static void toggleNativeLog(ReactApplicationContext reactContext, boolean enabled) {
48
+ if (enabled) {
49
+ setupLog(new NativeLogCallback(reactContext));
50
+ } else {
51
+ unsetLog();
52
+ }
53
+ }
54
+
32
55
  private static final int SAMPLE_RATE = 16000;
33
56
  private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO;
34
57
  private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
@@ -571,4 +594,6 @@ public class WhisperContext {
571
594
  // JSI Installation
572
595
  protected static native void installJSIBindings(long runtimePtr, Object callInvokerHolder);
573
596
  protected static native void cleanupJSIBindings();
597
+ protected static native void setupLog(NativeLogCallback logCallback);
598
+ protected static native void unsetLog();
574
599
  }
@@ -23,6 +23,64 @@
23
23
  #define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
24
24
  #define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
25
25
 
26
+ struct log_callback_context {
27
+ JavaVM *jvm;
28
+ jobject callback;
29
+ };
30
+
31
+ static void rnwhisper_log_callback_default(enum wsp_ggml_log_level level, const char * fmt, void * data) {
32
+ if (level == WSP_GGML_LOG_LEVEL_ERROR) __android_log_print(ANDROID_LOG_ERROR, TAG, fmt, data);
33
+ else if (level == WSP_GGML_LOG_LEVEL_INFO) __android_log_print(ANDROID_LOG_INFO, TAG, fmt, data);
34
+ else if (level == WSP_GGML_LOG_LEVEL_WARN) __android_log_print(ANDROID_LOG_WARN, TAG, fmt, data);
35
+ else __android_log_print(ANDROID_LOG_DEFAULT, TAG, fmt, data);
36
+ }
37
+
38
+ static void rnwhisper_log_callback_to_j(enum wsp_ggml_log_level level, const char * text, void * data) {
39
+ const char* level_c = "";
40
+ if (level == WSP_GGML_LOG_LEVEL_ERROR) {
41
+ __android_log_print(ANDROID_LOG_ERROR, TAG, text, nullptr);
42
+ level_c = "error";
43
+ } else if (level == WSP_GGML_LOG_LEVEL_INFO) {
44
+ __android_log_print(ANDROID_LOG_INFO, TAG, text, nullptr);
45
+ level_c = "info";
46
+ } else if (level == WSP_GGML_LOG_LEVEL_WARN) {
47
+ __android_log_print(ANDROID_LOG_WARN, TAG, text, nullptr);
48
+ level_c = "warn";
49
+ } else {
50
+ __android_log_print(ANDROID_LOG_DEFAULT, TAG, text, nullptr);
51
+ }
52
+
53
+ log_callback_context *cb_ctx = (log_callback_context *) data;
54
+
55
+ JNIEnv *env;
56
+ bool need_detach = false;
57
+ int getEnvResult = cb_ctx->jvm->GetEnv((void**)&env, JNI_VERSION_1_6);
58
+
59
+ if (getEnvResult == JNI_EDETACHED) {
60
+ if (cb_ctx->jvm->AttachCurrentThread(&env, nullptr) == JNI_OK) {
61
+ need_detach = true;
62
+ } else {
63
+ return;
64
+ }
65
+ } else if (getEnvResult != JNI_OK) {
66
+ return;
67
+ }
68
+
69
+ jobject callback = cb_ctx->callback;
70
+ jclass cb_class = env->GetObjectClass(callback);
71
+ jmethodID emitNativeLog = env->GetMethodID(cb_class, "emitNativeLog", "(Ljava/lang/String;Ljava/lang/String;)V");
72
+
73
+ jstring level_str = env->NewStringUTF(level_c);
74
+ jstring text_str = env->NewStringUTF(text);
75
+ env->CallVoidMethod(callback, emitNativeLog, level_str, text_str);
76
+ env->DeleteLocalRef(level_str);
77
+ env->DeleteLocalRef(text_str);
78
+
79
+ if (need_detach) {
80
+ cb_ctx->jvm->DetachCurrentThread();
81
+ }
82
+ }
83
+
26
84
  static inline int min(int a, int b) {
27
85
  return (a < b) ? a : b;
28
86
  }
@@ -800,7 +858,30 @@ Java_com_rnwhisper_WhisperContext_cleanupJSIBindings(
800
858
  JNIEnv *env,
801
859
  jclass clazz
802
860
  ) {
861
+ UNUSED(env);
862
+ UNUSED(clazz);
803
863
  rnwhisper_jsi::cleanupJSIBindings();
804
864
  }
805
865
 
866
+ JNIEXPORT void JNICALL
867
+ Java_com_rnwhisper_WhisperContext_setupLog(JNIEnv *env, jobject thiz, jobject logCallback) {
868
+ UNUSED(thiz);
869
+
870
+ log_callback_context *cb_ctx = new log_callback_context;
871
+
872
+ JavaVM *jvm;
873
+ env->GetJavaVM(&jvm);
874
+ cb_ctx->jvm = jvm;
875
+ cb_ctx->callback = env->NewGlobalRef(logCallback);
876
+
877
+ whisper_log_set(rnwhisper_log_callback_to_j, cb_ctx);
878
+ }
879
+
880
+ JNIEXPORT void JNICALL
881
+ Java_com_rnwhisper_WhisperContext_unsetLog(JNIEnv *env, jobject thiz) {
882
+ UNUSED(env);
883
+ UNUSED(thiz);
884
+ whisper_log_set(rnwhisper_log_callback_default, NULL);
885
+ }
886
+
806
887
  } // extern "C"
@@ -31,6 +31,11 @@ public class RNWhisperModule extends NativeRNWhisperSpec {
31
31
  rnwhisper.installJSIBindings(promise);
32
32
  }
33
33
 
34
+ @ReactMethod
35
+ public void toggleNativeLog(boolean enabled, Promise promise) {
36
+ rnwhisper.toggleNativeLog(enabled, promise);
37
+ }
38
+
34
39
  @Override
35
40
  @NonNull
36
41
  public String getName() {
@@ -31,6 +31,11 @@ public class RNWhisperModule extends ReactContextBaseJavaModule {
31
31
  rnwhisper.installJSIBindings(promise);
32
32
  }
33
33
 
34
+ @ReactMethod
35
+ public void toggleNativeLog(boolean enabled, Promise promise) {
36
+ rnwhisper.toggleNativeLog(enabled, promise);
37
+ }
38
+
34
39
  @Override
35
40
  @NonNull
36
41
  public String getName() {
package/ios/RNWhisper.mm CHANGED
@@ -24,6 +24,16 @@ RCT_EXPORT_MODULE()
24
24
  return NO;
25
25
  }
26
26
 
27
+ RCT_EXPORT_METHOD(toggleNativeLog:(BOOL)enabled) {
28
+ void (^onEmitLog)(NSString *level, NSString *text) = nil;
29
+ if (enabled) {
30
+ onEmitLog = ^(NSString *level, NSString *text) {
31
+ [self sendEventWithName:@"@RNWhisper_onNativeLog" body:@{ @"level": level, @"text": text }];
32
+ };
33
+ }
34
+ [RNWhisperContext toggleNativeLog:enabled onEmitLog:onEmitLog];
35
+ }
36
+
27
37
  - (NSDictionary *)constantsToExport
28
38
  {
29
39
  return @{
@@ -107,6 +117,7 @@ RCT_REMAP_METHOD(initContext,
107
117
  @"@RNWhisper_onTranscribeNewSegments",
108
118
  @"@RNWhisper_onRealtimeTranscribe",
109
119
  @"@RNWhisper_onRealtimeTranscribeEnd",
120
+ @"@RNWhisper_onNativeLog",
110
121
  ];
111
122
  }
112
123
 
@@ -47,6 +47,7 @@ typedef struct {
47
47
  bool isMetalEnabled;
48
48
  }
49
49
 
50
+ + (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog;
50
51
  + (instancetype)initWithModelPath:(NSString *)modelPath contextId:(int)contextId noCoreML:(BOOL)noCoreML noMetal:(BOOL)noMetal useFlashAttn:(BOOL)useFlashAttn;
51
52
  - (bool)isMetalEnabled;
52
53
  - (NSString *)reasonNoMetal;
@@ -6,6 +6,52 @@
6
6
 
7
7
  @implementation RNWhisperContext
8
8
 
9
+ static void whisper_log_callback_default(wsp_ggml_log_level level, const char * text, void * user_data) {
10
+ (void) level;
11
+ (void) user_data;
12
+ #ifndef WHISPER_DEBUG
13
+ if (level == WSP_GGML_LOG_LEVEL_DEBUG) {
14
+ return;
15
+ }
16
+ #endif
17
+ fputs(text, stderr);
18
+ fflush(stderr);
19
+ }
20
+
21
+ static void* retained_log_block = nullptr;
22
+
23
+ + (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog {
24
+ if (enabled) {
25
+ void (^copiedBlock)(NSString *, NSString *) = [onEmitLog copy];
26
+ retained_log_block = (__bridge_retained void *)(copiedBlock);
27
+ whisper_log_set([](enum wsp_ggml_log_level level, const char * text, void * data) {
28
+ whisper_log_callback_default(level, text, data);
29
+ NSString *levelStr = @"";
30
+ if (level == WSP_GGML_LOG_LEVEL_ERROR) {
31
+ levelStr = @"error";
32
+ } else if (level == WSP_GGML_LOG_LEVEL_INFO) {
33
+ levelStr = @"info";
34
+ } else if (level == WSP_GGML_LOG_LEVEL_WARN) {
35
+ levelStr = @"warn";
36
+ }
37
+
38
+ NSString *textStr = [NSString stringWithUTF8String:text];
39
+ // NOTE: Convert to UTF-8 string may fail
40
+ if (!textStr) {
41
+ return;
42
+ }
43
+ void (^block)(NSString *, NSString *) = (__bridge void (^)(NSString *, NSString *))(data);
44
+ block(levelStr, textStr);
45
+ }, retained_log_block);
46
+ } else {
47
+ whisper_log_set(whisper_log_callback_default, nullptr);
48
+ if (retained_log_block) {
49
+ CFRelease(retained_log_block);
50
+ retained_log_block = nullptr;
51
+ }
52
+ }
53
+ }
54
+
9
55
  + (instancetype)initWithModelPath:(NSString *)modelPath
10
56
  contextId:(int)contextId
11
57
  noCoreML:(BOOL)noCoreML
@@ -51,10 +51,11 @@ exports.AudioSessionModeIos = AudioSessionModeIos;
51
51
  })(AudioSessionModeIos || (exports.AudioSessionModeIos = AudioSessionModeIos = {}));
52
52
  const checkPlatform = () => {
53
53
  if (_reactNative.Platform.OS !== 'ios') throw new Error('Only supported on iOS');
54
+ console.warn('AudioSessionIos is deprecated. To use whisper.rn for realtime transcription, use the new RealtimeTranscriber instead.');
54
55
  };
55
56
 
56
57
  /**
57
- * AudioSession Utility, iOS only.
58
+ * [Deprecated] AudioSession Utility, iOS only.
58
59
  */
59
60
  var _default = {
60
61
  Category: AudioSessionCategoryIos,
@@ -1 +1 @@
1
- {"version":3,"names":["_reactNative","require","_NativeRNWhisper","_interopRequireDefault","obj","__esModule","default","AudioSessionCategoryIos","exports","AudioSessionCategoryOptionIos","AudioSessionModeIos","checkPlatform","Platform","OS","Error","_default","Category","CategoryOption","Mode","getCurrentCategory","_result$options","result","RNWhisper","getAudioSessionCurrentCategory","category","replace","options","map","option","getCurrentMode","mode","getAudioSessionCurrentMode","setCategory","setAudioSessionCategory","setMode","setAudioSessionMode","setActive","active","setAudioSessionActive"],"sourceRoot":"../../src","sources":["AudioSessionIos.ts"],"mappings":";;;;;;AAAA,IAAAA,YAAA,GAAAC,OAAA;AACA,IAAAC,gBAAA,GAAAC,sBAAA,CAAAF,OAAA;AAAyC,SAAAE,uBAAAC,GAAA,WAAAA,GAAA,IAAAA,GAAA,CAAAC,UAAA,GAAAD,GAAA,KAAAE,OAAA,EAAAF,GAAA;AAEzC;AACA;AACA;AAFA,IAGYG,uBAAuB;AASnC;AACA;AACA;AAFAC,OAAA,CAAAD,uBAAA,GAAAA,uBAAA;AAAA,WATYA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;AAAA,GAAvBA,uBAAuB,KAAAC,OAAA,CAAAD,uBAAA,GAAvBA,uBAAuB;AAAA,IAYvBE,6BAA6B;AAUzC;AACA;AACA;AAFAD,OAAA,CAAAC,6BAAA,GAAAA,6BAAA;AAAA,WAVYA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;AAAA,GAA7BA,6BAA6B,KAAAD,OAAA,CAAAC,6BAAA,GAA7BA,6BAA6B;AAAA,IAa7BC,mBAAmB;AAAAF,OAAA,CAAAE,mBAAA,GAAAA,mBAAA;AAAA,WAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;AAAA,GAAnBA,mBAAmB,KAAAF,OAAA,CAAAE,mBAAA,GAAnBA,mBAAmB;AAW/B,MAAMC,aAAa,GAAGA,CAAA,KAAM;EAC1B,IAAIC,qBAAQ,CAACC,EAAE,KAAK,KAAK,EAAE,MAAM,IAAIC,KAAK,CAAC,uBAAuB,CAAC;AACrE,CAAC;;AAED;AACA;AACA;AAFA,IAAAC,QAAA,GAGe;EACbC,QAAQ,EAAET,uBAAuB;EACjCU,cAAc,EAAER,6BAA6B;EAC7CS,IAAI,EAAER,mBAAmB;EAEzBS,kBAAkB,EAAE,MAAAA,CAAA,KAGd;IAAA,IAAAC,eAAA;IACJT,aAAa,EAAE;IACf,MAAMU,MAAM,GAAG,MAAMC,wBAAS,CAACC,8BAA8B,EAAE;IAC/D,OAAO;MACLC,QAAQ,EAAGH,MAAM,CAACG,QAAQ,CAACC,OAAO,CAAC,wBAAwB,EAAE,EAAE,CAA6B;MAC5FC,OAAO,GAAAN,eAAA,GAAEC,MAAM,CAACK,OAAO,cAAAN,eAAA,uBAAdA,eAAA,CAAgBO,GAAG,CAAEC,MAAc,IAAMA,MAAM,CAACH,OAAO,CAAC,8BAA8B,EAAE,EAAE,CAAmC;IACxI,CAAC;EACH,CAAC;EAEDI,cAAc,EAAE,MAAAA,CAAA,KAA0C;IACxDlB,aAAa,EAAE;IACf,MAAMmB,IAAI,GAAG,MAAMR,wBAAS,CAACS,0BAA0B,EAAE;IACzD,OAAQD,IAAI,CAACL,OAAO,CAAC,oBAAoB,EAAE,EAAE,CAAC;EAChD,CAAC;EAEDO,WAAW,EAAE,MAAAA,CACXR,QAAiC,EACjCE,OAAwC,KACtB;IAClBf,aAAa,EAAE;IACf,MAAMW,wBAAS,CAACW,uBAAuB,CAACT,QAAQ,EAAEE,OAAO,CAAC;EAC5D,CAAC;EAEDQ,OAAO,EAAE,MAAOJ,IAAyB,IAAoB;IAC3DnB,aAAa,EAAE;IACf,MAAMW,wBAAS,CAACa,mBAAmB,CAACL,IAAI,CAAC;EAC3C,CAAC;EAEDM,SAAS,EAAE,MAAOC,MAAe,IAAoB;IACnD1B,aAAa,EAAE;IACf,MAAMW,wBAAS,CAACgB,qBAAqB,CAACD,MAAM,CAAC;EAC/C;AACF,CAAC;AAAA7B,OAAA,CAAAF,OAAA,GAAAS,QAAA"}
1
+ {"version":3,"names":["_reactNative","require","_NativeRNWhisper","_interopRequireDefault","obj","__esModule","default","AudioSessionCategoryIos","exports","AudioSessionCategoryOptionIos","AudioSessionModeIos","checkPlatform","Platform","OS","Error","console","warn","_default","Category","CategoryOption","Mode","getCurrentCategory","_result$options","result","RNWhisper","getAudioSessionCurrentCategory","category","replace","options","map","option","getCurrentMode","mode","getAudioSessionCurrentMode","setCategory","setAudioSessionCategory","setMode","setAudioSessionMode","setActive","active","setAudioSessionActive"],"sourceRoot":"../../src","sources":["AudioSessionIos.ts"],"mappings":";;;;;;AAAA,IAAAA,YAAA,GAAAC,OAAA;AACA,IAAAC,gBAAA,GAAAC,sBAAA,CAAAF,OAAA;AAAyC,SAAAE,uBAAAC,GAAA,WAAAA,GAAA,IAAAA,GAAA,CAAAC,UAAA,GAAAD,GAAA,KAAAE,OAAA,EAAAF,GAAA;AAEzC;AACA;AACA;AAFA,IAGYG,uBAAuB;AASnC;AACA;AACA;AAFAC,OAAA,CAAAD,uBAAA,GAAAA,uBAAA;AAAA,WATYA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;EAAvBA,uBAAuB;AAAA,GAAvBA,uBAAuB,KAAAC,OAAA,CAAAD,uBAAA,GAAvBA,uBAAuB;AAAA,IAYvBE,6BAA6B;AAUzC;AACA;AACA;AAFAD,OAAA,CAAAC,6BAAA,GAAAA,6BAAA;AAAA,WAVYA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;EAA7BA,6BAA6B;AAAA,GAA7BA,6BAA6B,KAAAD,OAAA,CAAAC,6BAAA,GAA7BA,6BAA6B;AAAA,IAa7BC,mBAAmB;AAAAF,OAAA,CAAAE,mBAAA,GAAAA,mBAAA;AAAA,WAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;EAAnBA,mBAAmB;AAAA,GAAnBA,mBAAmB,KAAAF,OAAA,CAAAE,mBAAA,GAAnBA,mBAAmB;AAW/B,MAAMC,aAAa,GAAGA,CAAA,KAAM;EAC1B,IAAIC,qBAAQ,CAACC,EAAE,KAAK,KAAK,EAAE,MAAM,IAAIC,KAAK,CAAC,uBAAuB,CAAC;EACnEC,OAAO,CAACC,IAAI,CAAC,uHAAuH,CAAC;AACvI,CAAC;;AAED;AACA;AACA;AAFA,IAAAC,QAAA,GAGe;EACbC,QAAQ,EAAEX,uBAAuB;EACjCY,cAAc,EAAEV,6BAA6B;EAC7CW,IAAI,EAAEV,mBAAmB;EAEzBW,kBAAkB,EAAE,MAAAA,CAAA,KAGd;IAAA,IAAAC,eAAA;IACJX,aAAa,EAAE;IACf,MAAMY,MAAM,GAAG,MAAMC,wBAAS,CAACC,8BAA8B,EAAE;IAC/D,OAAO;MACLC,QAAQ,EAAGH,MAAM,CAACG,QAAQ,CAACC,OAAO,CAAC,wBAAwB,EAAE,EAAE,CAA6B;MAC5FC,OAAO,GAAAN,eAAA,GAAEC,MAAM,CAACK,OAAO,cAAAN,eAAA,uBAAdA,eAAA,CAAgBO,GAAG,CAAEC,MAAc,IAAMA,MAAM,CAACH,OAAO,CAAC,8BAA8B,EAAE,EAAE,CAAmC;IACxI,CAAC;EACH,CAAC;EAEDI,cAAc,EAAE,MAAAA,CAAA,KAA0C;IACxDpB,aAAa,EAAE;IACf,MAAMqB,IAAI,GAAG,MAAMR,wBAAS,CAACS,0BAA0B,EAAE;IACzD,OAAQD,IAAI,CAACL,OAAO,CAAC,oBAAoB,EAAE,EAAE,CAAC;EAChD,CAAC;EAEDO,WAAW,EAAE,MAAAA,CACXR,QAAiC,EACjCE,OAAwC,KACtB;IAClBjB,aAAa,EAAE;IACf,MAAMa,wBAAS,CAACW,uBAAuB,CAACT,QAAQ,EAAEE,OAAO,CAAC;EAC5D,CAAC;EAEDQ,OAAO,EAAE,MAAOJ,IAAyB,IAAoB;IAC3DrB,aAAa,EAAE;IACf,MAAMa,wBAAS,CAACa,mBAAmB,CAACL,IAAI,CAAC;EAC3C,CAAC;EAEDM,SAAS,EAAE,MAAOC,MAAe,IAAoB;IACnD5B,aAAa,EAAE;IACf,MAAMa,wBAAS,CAACgB,qBAAqB,CAACD,MAAM,CAAC;EAC/C;AACF,CAAC;AAAA/B,OAAA,CAAAF,OAAA,GAAAW,QAAA"}
@@ -1 +1 @@
1
- {"version":3,"names":["_reactNative","require","_default","TurboModuleRegistry","get","exports","default"],"sourceRoot":"../../src","sources":["NativeRNWhisper.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,OAAA;AAAkD,IAAAC,QAAA,GA4JnCC,gCAAmB,CAACC,GAAG,CAAO,WAAW,CAAC;AAAAC,OAAA,CAAAC,OAAA,GAAAJ,QAAA"}
1
+ {"version":3,"names":["_reactNative","require","_default","TurboModuleRegistry","get","exports","default"],"sourceRoot":"../../src","sources":["NativeRNWhisper.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,OAAA;AAAkD,IAAAC,QAAA,GA8JnCC,gCAAmB,CAACC,GAAG,CAAO,WAAW,CAAC;AAAAC,OAAA,CAAAC,OAAA,GAAAJ,QAAA"}
@@ -10,11 +10,13 @@ Object.defineProperty(exports, "AudioSessionIos", {
10
10
  }
11
11
  });
12
12
  exports.WhisperVadContext = exports.WhisperContext = void 0;
13
+ exports.addNativeLogListener = addNativeLogListener;
13
14
  exports.initWhisper = initWhisper;
14
15
  exports.initWhisperVad = initWhisperVad;
15
16
  exports.libVersion = exports.isUseCoreML = exports.isCoreMLAllowFallback = void 0;
16
17
  exports.releaseAllWhisper = releaseAllWhisper;
17
18
  exports.releaseAllWhisperVad = releaseAllWhisperVad;
19
+ exports.toggleNativeLog = toggleNativeLog;
18
20
  var _reactNative = require("react-native");
19
21
  var _NativeRNWhisper = _interopRequireDefault(require("./NativeRNWhisper"));
20
22
  var _AudioSessionIos = _interopRequireDefault(require("./AudioSessionIos"));
@@ -41,8 +43,20 @@ if (_reactNative.Platform.OS === 'android') {
41
43
  }
42
44
  const EVENT_ON_TRANSCRIBE_PROGRESS = '@RNWhisper_onTranscribeProgress';
43
45
  const EVENT_ON_TRANSCRIBE_NEW_SEGMENTS = '@RNWhisper_onTranscribeNewSegments';
46
+ const EVENT_ON_NATIVE_LOG = '@RNWhisper_onNativeLog';
44
47
  const EVENT_ON_REALTIME_TRANSCRIBE = '@RNWhisper_onRealtimeTranscribe';
45
48
  const EVENT_ON_REALTIME_TRANSCRIBE_END = '@RNWhisper_onRealtimeTranscribeEnd';
49
+ const logListeners = [];
50
+
51
+ // @ts-ignore
52
+ if (EventEmitter) {
53
+ var _RNWhisper$toggleNati, _RNWhisper$toggleNati2, _RNWhisper$toggleNati3;
54
+ EventEmitter.addListener(EVENT_ON_NATIVE_LOG, evt => {
55
+ logListeners.forEach(listener => listener(evt.level, evt.text));
56
+ });
57
+ // Trigger unset to use default log callback
58
+ _NativeRNWhisper.default === null || _NativeRNWhisper.default === void 0 ? void 0 : (_RNWhisper$toggleNati = _NativeRNWhisper.default.toggleNativeLog) === null || _RNWhisper$toggleNati === void 0 ? void 0 : (_RNWhisper$toggleNati2 = _RNWhisper$toggleNati.call(_NativeRNWhisper.default, false)) === null || _RNWhisper$toggleNati2 === void 0 ? void 0 : (_RNWhisper$toggleNati3 = _RNWhisper$toggleNati2.catch) === null || _RNWhisper$toggleNati3 === void 0 ? void 0 : _RNWhisper$toggleNati3.call(_RNWhisper$toggleNati2, () => {});
59
+ }
46
60
  const updateAudioSession = async setting => {
47
61
  await _AudioSessionIos.default.setCategory(setting.category, setting.options || []);
48
62
  if (setting.mode) {
@@ -227,6 +241,7 @@ class WhisperContext {
227
241
  /** Transcribe the microphone audio stream, the microphone user permission is required */
228
242
  async transcribeRealtime() {
229
243
  let options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
244
+ console.warn('`transcribeRealtime` is deprecated, use `RealtimeTranscriber` instead');
230
245
  let lastTranscribePayload;
231
246
  const slices = [];
232
247
  let sliceIndex = 0;
@@ -577,4 +592,15 @@ async function initWhisperVad(_ref4) {
577
592
  async function releaseAllWhisperVad() {
578
593
  return _NativeRNWhisper.default.releaseAllVadContexts();
579
594
  }
595
+ async function toggleNativeLog(enabled) {
596
+ return _NativeRNWhisper.default.toggleNativeLog(enabled);
597
+ }
598
+ function addNativeLogListener(listener) {
599
+ logListeners.push(listener);
600
+ return {
601
+ remove: () => {
602
+ logListeners.splice(logListeners.indexOf(listener), 1);
603
+ }
604
+ };
605
+ }
580
606
  //# sourceMappingURL=index.js.map