whisper.rn 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +69 -0
  2. package/android/src/main/java/com/rnwhisper/RNWhisper.java +211 -0
  3. package/android/src/main/java/com/rnwhisper/WhisperContext.java +34 -4
  4. package/android/src/main/java/com/rnwhisper/WhisperVadContext.java +157 -0
  5. package/android/src/main/jni.cpp +196 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnwhisper.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnwhisper_v8fp16_va_2.so +0 -0
  8. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper.so +0 -0
  9. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper_vfpv4.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnwhisper.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnwhisper_x86_64.so +0 -0
  12. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +26 -0
  13. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +26 -0
  14. package/ios/RNWhisper.mm +147 -0
  15. package/ios/RNWhisperContext.mm +18 -24
  16. package/ios/RNWhisperVadContext.h +29 -0
  17. package/ios/RNWhisperVadContext.mm +152 -0
  18. package/jest/mock.js +19 -0
  19. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  20. package/lib/commonjs/index.js +111 -1
  21. package/lib/commonjs/index.js.map +1 -1
  22. package/lib/module/NativeRNWhisper.js.map +1 -1
  23. package/lib/module/index.js +112 -0
  24. package/lib/module/index.js.map +1 -1
  25. package/lib/typescript/NativeRNWhisper.d.ts +35 -0
  26. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  27. package/lib/typescript/index.d.ts +39 -3
  28. package/lib/typescript/index.d.ts.map +1 -1
  29. package/package.json +1 -1
  30. package/src/NativeRNWhisper.ts +48 -0
  31. package/src/index.ts +132 -1
package/README.md CHANGED
@@ -103,6 +103,75 @@ subscribe(evt => {
103
103
  })
104
104
  ```
105
105
 
106
+ ## Voice Activity Detection (VAD)
107
+
108
+ Voice Activity Detection allows you to detect speech segments in audio data using the Silero VAD model.
109
+
110
+ #### Initialize VAD Context
111
+
112
+ ```typescript
113
+ import { initWhisperVad } from 'whisper.rn'
114
+
115
+ const vadContext = await initWhisperVad({
116
+ filePath: require('./assets/ggml-silero-v5.1.2.bin'), // VAD model file
117
+ useGpu: true, // Use GPU acceleration (iOS only)
118
+ nThreads: 4, // Number of threads for processing
119
+ })
120
+ ```
121
+
122
+ #### Detect Speech Segments
123
+
124
+ ##### From Audio Files
125
+
126
+ ```typescript
127
+ // Detect speech in audio file (supports same formats as transcribe)
128
+ const segments = await vadContext.detectSpeech(require('./assets/audio.wav'), {
129
+ threshold: 0.5, // Speech probability threshold (0.0-1.0)
130
+ minSpeechDurationMs: 250, // Minimum speech duration in ms
131
+ minSilenceDurationMs: 100, // Minimum silence duration in ms
132
+ maxSpeechDurationS: 30, // Maximum speech duration in seconds
133
+ speechPadMs: 30, // Padding around speech segments in ms
134
+ samplesOverlap: 0.1, // Overlap between analysis windows
135
+ })
136
+
137
+ // Also supports:
138
+ // - File paths: vadContext.detectSpeech('path/to/audio.wav', options)
139
+ // - HTTP URLs: vadContext.detectSpeech('https://example.com/audio.wav', options)
140
+ // - Base64 WAV: vadContext.detectSpeech('data:audio/wav;base64,...', options)
141
+ // - Assets: vadContext.detectSpeech(require('./assets/audio.wav'), options)
142
+ ```
143
+
144
+ ##### From Raw Audio Data
145
+
146
+ ```typescript
147
+ // Detect speech in base64 encoded float32 PCM data
148
+ const segments = await vadContext.detectSpeechData(base64AudioData, {
149
+ threshold: 0.5,
150
+ minSpeechDurationMs: 250,
151
+ minSilenceDurationMs: 100,
152
+ maxSpeechDurationS: 30,
153
+ speechPadMs: 30,
154
+ samplesOverlap: 0.1,
155
+ })
156
+ ```
157
+
158
+ #### Process Results
159
+
160
+ ```typescript
161
+ segments.forEach((segment, index) => {
162
+ console.log(`Segment ${index + 1}: ${segment.t0.toFixed(2)}s - ${segment.t1.toFixed(2)}s`)
163
+ console.log(`Duration: ${(segment.t1 - segment.t0).toFixed(2)}s`)
164
+ })
165
+ ```
166
+
167
+ #### Release VAD Context
168
+
169
+ ```typescript
170
+ await vadContext.release()
171
+ // Or release all VAD contexts
172
+ await releaseAllWhisperVad()
173
+ ```
174
+
106
175
  In iOS, You may need to change the Audio Session so that it can be used with other audio playback, or to optimize the quality of the recording. So we have provided AudioSession utilities for you:
107
176
 
108
177
  Option 1 - Use options in transcribeRealtime:
@@ -13,6 +13,7 @@ import com.facebook.react.bridge.ReactMethod;
13
13
  import com.facebook.react.bridge.LifecycleEventListener;
14
14
  import com.facebook.react.bridge.ReadableMap;
15
15
  import com.facebook.react.bridge.WritableMap;
16
+ import com.facebook.react.bridge.WritableArray;
16
17
  import com.facebook.react.bridge.Arguments;
17
18
 
18
19
  import java.util.HashMap;
@@ -47,6 +48,7 @@ public class RNWhisper implements LifecycleEventListener {
47
48
  private HashMap<AsyncTask, String> tasks = new HashMap<>();
48
49
 
49
50
  private HashMap<Integer, WhisperContext> contexts = new HashMap<>();
51
+ private HashMap<Integer, WhisperVadContext> vadContexts = new HashMap<>();
50
52
 
51
53
  private int getResourceIdentifier(String filePath) {
52
54
  int identifier = reactContext.getResources().getIdentifier(
@@ -344,6 +346,211 @@ public class RNWhisper implements LifecycleEventListener {
344
346
  tasks.put(task, "releaseAllContexts");
345
347
  }
346
348
 
349
+ public void initVadContext(final ReadableMap options, final Promise promise) {
350
+ AsyncTask task = new AsyncTask<Void, Void, Integer>() {
351
+ private Exception exception;
352
+
353
+ @Override
354
+ protected Integer doInBackground(Void... voids) {
355
+ try {
356
+ String modelPath = options.getString("filePath");
357
+ boolean isBundleAsset = options.getBoolean("isBundleAsset");
358
+
359
+ String modelFilePath = modelPath;
360
+ if (!isBundleAsset && (modelPath.startsWith("http://") || modelPath.startsWith("https://"))) {
361
+ modelFilePath = downloader.downloadFile(modelPath);
362
+ }
363
+
364
+ long vadContext;
365
+ int resId = getResourceIdentifier(modelFilePath);
366
+ if (resId > 0) {
367
+ vadContext = WhisperContext.initVadContextWithInputStream(
368
+ new PushbackInputStream(reactContext.getResources().openRawResource(resId))
369
+ );
370
+ } else if (isBundleAsset) {
371
+ vadContext = WhisperContext.initVadContextWithAsset(reactContext.getAssets(), modelFilePath);
372
+ } else {
373
+ vadContext = WhisperContext.initVadContext(modelFilePath);
374
+ }
375
+ if (vadContext == 0) {
376
+ throw new Exception("Failed to initialize VAD context");
377
+ }
378
+ int id = Math.abs(new Random().nextInt());
379
+ WhisperVadContext whisperVadContext = new WhisperVadContext(id, reactContext, vadContext);
380
+ vadContexts.put(id, whisperVadContext);
381
+ return id;
382
+ } catch (Exception e) {
383
+ exception = e;
384
+ return null;
385
+ }
386
+ }
387
+
388
+ @Override
389
+ protected void onPostExecute(Integer id) {
390
+ if (exception != null) {
391
+ promise.reject(exception);
392
+ return;
393
+ }
394
+ WritableMap result = Arguments.createMap();
395
+ result.putInt("contextId", id);
396
+ result.putBoolean("gpu", false);
397
+ result.putString("reasonNoGPU", "Currently not supported");
398
+ promise.resolve(result);
399
+ tasks.remove(this);
400
+ }
401
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
402
+ tasks.put(task, "initVadContext");
403
+ }
404
+
405
+ public void vadDetectSpeech(double id, String audioDataBase64, ReadableMap options, Promise promise) {
406
+ final WhisperVadContext vadContext = vadContexts.get((int) id);
407
+ if (vadContext == null) {
408
+ promise.reject("VAD context not found");
409
+ return;
410
+ }
411
+
412
+ AsyncTask task = new AsyncTask<Void, Void, WritableArray>() {
413
+ private Exception exception;
414
+
415
+ @Override
416
+ protected WritableArray doInBackground(Void... voids) {
417
+ try {
418
+ return vadContext.detectSpeech(audioDataBase64, options);
419
+ } catch (Exception e) {
420
+ exception = e;
421
+ return null;
422
+ }
423
+ }
424
+
425
+ @Override
426
+ protected void onPostExecute(WritableArray segments) {
427
+ if (exception != null) {
428
+ promise.reject(exception);
429
+ return;
430
+ }
431
+ promise.resolve(segments);
432
+ tasks.remove(this);
433
+ }
434
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
435
+ tasks.put(task, "vadDetectSpeech-" + id);
436
+ }
437
+
438
+ public void vadDetectSpeechFile(double id, String filePathOrBase64, ReadableMap options, Promise promise) {
439
+ final WhisperVadContext vadContext = vadContexts.get((int) id);
440
+ if (vadContext == null) {
441
+ promise.reject("VAD context not found");
442
+ return;
443
+ }
444
+
445
+ AsyncTask task = new AsyncTask<Void, Void, WritableArray>() {
446
+ private Exception exception;
447
+
448
+ @Override
449
+ protected WritableArray doInBackground(Void... voids) {
450
+ try {
451
+ // Handle file processing like transcribeFile does
452
+ String filePath = filePathOrBase64;
453
+ if (filePathOrBase64.startsWith("http://") || filePathOrBase64.startsWith("https://")) {
454
+ filePath = downloader.downloadFile(filePathOrBase64);
455
+ }
456
+
457
+ float[] audioData;
458
+ int resId = getResourceIdentifier(filePath);
459
+ if (resId > 0) {
460
+ audioData = AudioUtils.decodeWaveFile(reactContext.getResources().openRawResource(resId));
461
+ } else if (filePathOrBase64.startsWith("data:audio/wav;base64,")) {
462
+ audioData = AudioUtils.decodeWaveData(filePathOrBase64);
463
+ } else {
464
+ audioData = AudioUtils.decodeWaveFile(new java.io.FileInputStream(new java.io.File(filePath)));
465
+ }
466
+
467
+ if (audioData == null) {
468
+ throw new Exception("Failed to load audio file: " + filePathOrBase64);
469
+ }
470
+
471
+ return vadContext.detectSpeechWithAudioData(audioData, options);
472
+ } catch (Exception e) {
473
+ exception = e;
474
+ return null;
475
+ }
476
+ }
477
+
478
+ @Override
479
+ protected void onPostExecute(WritableArray segments) {
480
+ if (exception != null) {
481
+ promise.reject(exception);
482
+ return;
483
+ }
484
+ promise.resolve(segments);
485
+ tasks.remove(this);
486
+ }
487
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
488
+ tasks.put(task, "vadDetectSpeechFile-" + id);
489
+ }
490
+
491
+ public void releaseVadContext(double id, Promise promise) {
492
+ final int contextId = (int) id;
493
+ AsyncTask task = new AsyncTask<Void, Void, Void>() {
494
+ private Exception exception;
495
+
496
+ @Override
497
+ protected Void doInBackground(Void... voids) {
498
+ try {
499
+ WhisperVadContext vadContext = vadContexts.get(contextId);
500
+ if (vadContext == null) {
501
+ throw new Exception("VAD context " + id + " not found");
502
+ }
503
+ vadContext.release();
504
+ vadContexts.remove(contextId);
505
+ } catch (Exception e) {
506
+ exception = e;
507
+ }
508
+ return null;
509
+ }
510
+
511
+ @Override
512
+ protected void onPostExecute(Void result) {
513
+ if (exception != null) {
514
+ promise.reject(exception);
515
+ return;
516
+ }
517
+ promise.resolve(null);
518
+ tasks.remove(this);
519
+ }
520
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
521
+ tasks.put(task, "releaseVadContext-" + id);
522
+ }
523
+
524
+ public void releaseAllVadContexts(Promise promise) {
525
+ AsyncTask task = new AsyncTask<Void, Void, Void>() {
526
+ private Exception exception;
527
+
528
+ @Override
529
+ protected Void doInBackground(Void... voids) {
530
+ try {
531
+ for (WhisperVadContext vadContext : vadContexts.values()) {
532
+ vadContext.release();
533
+ }
534
+ vadContexts.clear();
535
+ } catch (Exception e) {
536
+ exception = e;
537
+ }
538
+ return null;
539
+ }
540
+
541
+ @Override
542
+ protected void onPostExecute(Void result) {
543
+ if (exception != null) {
544
+ promise.reject(exception);
545
+ return;
546
+ }
547
+ promise.resolve(null);
548
+ tasks.remove(this);
549
+ }
550
+ }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
551
+ tasks.put(task, "releaseAllVadContexts");
552
+ }
553
+
347
554
  @Override
348
555
  public void onHostResume() {
349
556
  }
@@ -367,8 +574,12 @@ public class RNWhisper implements LifecycleEventListener {
367
574
  for (WhisperContext context : contexts.values()) {
368
575
  context.release();
369
576
  }
577
+ for (WhisperVadContext vadContext : vadContexts.values()) {
578
+ vadContext.release();
579
+ }
370
580
  WhisperContext.abortAllTranscribe(); // graceful abort
371
581
  contexts.clear();
582
+ vadContexts.clear();
372
583
  downloader.clearCache();
373
584
  }
374
585
  }
@@ -460,19 +460,19 @@ public class WhisperContext {
460
460
  }
461
461
  }
462
462
 
463
- private static boolean isArm64V8a() {
463
+ public static boolean isArm64V8a() {
464
464
  return Build.SUPPORTED_ABIS[0].equals("arm64-v8a");
465
465
  }
466
466
 
467
- private static boolean isArmeabiV7a() {
467
+ public static boolean isArmeabiV7a() {
468
468
  return Build.SUPPORTED_ABIS[0].equals("armeabi-v7a");
469
469
  }
470
470
 
471
- private static boolean isX86_64() {
471
+ public static boolean isX86_64() {
472
472
  return Build.SUPPORTED_ABIS[0].equals("x86_64");
473
473
  }
474
474
 
475
- private static String getCpuFeatures() {
475
+ public static String getCpuFeatures() {
476
476
  File file = new File("/proc/cpuinfo");
477
477
  StringBuilder stringBuilder = new StringBuilder();
478
478
  try {
@@ -492,6 +492,10 @@ public class WhisperContext {
492
492
  }
493
493
  }
494
494
 
495
+ public static String getLoadedLibrary() {
496
+ return loadedLibrary;
497
+ }
498
+
495
499
  // JNI methods
496
500
  protected static native long initContext(String modelPath);
497
501
  protected static native long initContextWithAsset(AssetManager assetManager, String modelPath);
@@ -529,4 +533,30 @@ public class WhisperContext {
529
533
  int n_samples
530
534
  );
531
535
  protected static native String bench(long context, int n_threads);
536
+
537
+ // VAD JNI methods
538
+ protected static native long initVadContext(String modelPath);
539
+ protected static native long initVadContextWithAsset(AssetManager assetManager, String modelPath);
540
+ protected static native long initVadContextWithInputStream(PushbackInputStream inputStream);
541
+ protected static native void freeVadContext(long vadContextPtr);
542
+ protected static native boolean vadDetectSpeech(long vadContextPtr, float[] audioData, int nSamples);
543
+ protected static native long vadGetSegmentsFromProbs(long vadContextPtr, float threshold,
544
+ int minSpeechDurationMs, int minSilenceDurationMs,
545
+ float maxSpeechDurationS, int speechPadMs,
546
+ float samplesOverlap);
547
+ protected static native int vadGetNSegments(long segmentsPtr);
548
+ protected static native float vadGetSegmentT0(long segmentsPtr, int index);
549
+ protected static native float vadGetSegmentT1(long segmentsPtr, int index);
550
+ protected static native void vadFreeSegments(long segmentsPtr);
551
+
552
+ // Audio file loading utility for VAD
553
+ public static float[] loadAudioFileAsFloat32(String filePath) {
554
+ try {
555
+ java.io.FileInputStream fis = new java.io.FileInputStream(new java.io.File(filePath));
556
+ return AudioUtils.decodeWaveFile(fis);
557
+ } catch (Exception e) {
558
+ Log.e(NAME, "Failed to load audio file: " + filePath, e);
559
+ return null;
560
+ }
561
+ }
532
562
  }
@@ -0,0 +1,157 @@
1
+ package com.rnwhisper;
2
+
3
+ import com.facebook.react.bridge.Arguments;
4
+ import com.facebook.react.bridge.WritableArray;
5
+ import com.facebook.react.bridge.WritableMap;
6
+ import com.facebook.react.bridge.ReadableMap;
7
+ import com.facebook.react.bridge.ReactApplicationContext;
8
+
9
+ import android.util.Log;
10
+ import android.content.res.AssetManager;
11
+ import android.util.Base64;
12
+
13
+ import java.io.PushbackInputStream;
14
+
15
+ public class WhisperVadContext {
16
+ public static final String NAME = "RNWhisperVadContext";
17
+
18
+ private int id;
19
+ private ReactApplicationContext reactContext;
20
+ private long vadContext;
21
+
22
+ public WhisperVadContext(int id, ReactApplicationContext reactContext, long vadContext) {
23
+ this.id = id;
24
+ this.vadContext = vadContext;
25
+ this.reactContext = reactContext;
26
+ }
27
+
28
+ public WritableArray detectSpeech(String audioDataBase64, ReadableMap options) throws Exception {
29
+ if (vadContext == 0) {
30
+ throw new Exception("VAD context is null");
31
+ }
32
+
33
+ // Decode base64 audio data to float array
34
+ byte[] audioBytes = Base64.decode(audioDataBase64, Base64.DEFAULT);
35
+ int numSamples = audioBytes.length / 4; // 4 bytes per float
36
+ float[] audioData = new float[numSamples];
37
+
38
+ for (int i = 0; i < numSamples; i++) {
39
+ int intBits = (audioBytes[i * 4] & 0xFF) |
40
+ ((audioBytes[i * 4 + 1] & 0xFF) << 8) |
41
+ ((audioBytes[i * 4 + 2] & 0xFF) << 16) |
42
+ ((audioBytes[i * 4 + 3] & 0xFF) << 24);
43
+ audioData[i] = Float.intBitsToFloat(intBits);
44
+ }
45
+
46
+ return processVadDetection(audioData, numSamples, options);
47
+ }
48
+
49
+ public WritableArray detectSpeechFile(String filePathOrBase64, ReadableMap options) throws Exception {
50
+ if (vadContext == 0) {
51
+ throw new Exception("VAD context is null");
52
+ }
53
+
54
+ // Follow the same pattern as transcribeFile
55
+ String filePath = filePathOrBase64;
56
+
57
+ // Handle HTTP downloads
58
+ if (filePathOrBase64.startsWith("http://") || filePathOrBase64.startsWith("https://")) {
59
+ // Note: This would require access to the downloader, but for now we'll throw an error
60
+ throw new Exception("HTTP URLs not supported in VAD file detection. Please download the file first.");
61
+ }
62
+
63
+ float[] audioData;
64
+
65
+ // Check for resource identifier (bundled assets)
66
+ int resId = getResourceIdentifier(filePath);
67
+ if (resId > 0) {
68
+ audioData = AudioUtils.decodeWaveFile(reactContext.getResources().openRawResource(resId));
69
+ } else if (filePathOrBase64.startsWith("data:audio/wav;base64,")) {
70
+ // Handle base64 WAV data
71
+ audioData = AudioUtils.decodeWaveData(filePathOrBase64);
72
+ } else {
73
+ // Handle regular file path
74
+ audioData = AudioUtils.decodeWaveFile(new java.io.FileInputStream(new java.io.File(filePath)));
75
+ }
76
+
77
+ if (audioData == null) {
78
+ throw new Exception("Failed to load audio file: " + filePathOrBase64);
79
+ }
80
+
81
+ return processVadDetection(audioData, audioData.length, options);
82
+ }
83
+
84
+ public WritableArray detectSpeechWithAudioData(float[] audioData, ReadableMap options) throws Exception {
85
+ if (vadContext == 0) {
86
+ throw new Exception("VAD context is null");
87
+ }
88
+
89
+ return processVadDetection(audioData, audioData.length, options);
90
+ }
91
+
92
+ private int getResourceIdentifier(String filePath) {
93
+ int identifier = reactContext.getResources().getIdentifier(
94
+ filePath,
95
+ "drawable",
96
+ reactContext.getPackageName()
97
+ );
98
+ if (identifier == 0) {
99
+ identifier = reactContext.getResources().getIdentifier(
100
+ filePath,
101
+ "raw",
102
+ reactContext.getPackageName()
103
+ );
104
+ }
105
+ return identifier;
106
+ }
107
+
108
+ private WritableArray processVadDetection(float[] audioData, int numSamples, ReadableMap options) throws Exception {
109
+ // Run VAD detection using WhisperContext static methods
110
+ boolean speechDetected = WhisperContext.vadDetectSpeech(vadContext, audioData, numSamples);
111
+ if (!speechDetected) {
112
+ return Arguments.createArray();
113
+ }
114
+
115
+ // Set VAD parameters from options
116
+ float threshold = options.hasKey("threshold") ? (float) options.getDouble("threshold") : 0.5f;
117
+ int minSpeechDurationMs = options.hasKey("minSpeechDurationMs") ? options.getInt("minSpeechDurationMs") : 250;
118
+ int minSilenceDurationMs = options.hasKey("minSilenceDurationMs") ? options.getInt("minSilenceDurationMs") : 100;
119
+ float maxSpeechDurationS = options.hasKey("maxSpeechDurationS") ? (float) options.getDouble("maxSpeechDurationS") : 30.0f;
120
+ int speechPadMs = options.hasKey("speechPadMs") ? options.getInt("speechPadMs") : 30;
121
+ float samplesOverlap = options.hasKey("samplesOverlap") ? (float) options.getDouble("samplesOverlap") : 0.1f;
122
+
123
+ // Get segments from VAD using WhisperContext static methods
124
+ long segments = WhisperContext.vadGetSegmentsFromProbs(vadContext, threshold, minSpeechDurationMs,
125
+ minSilenceDurationMs, maxSpeechDurationS,
126
+ speechPadMs, samplesOverlap);
127
+ if (segments == 0) {
128
+ return Arguments.createArray();
129
+ }
130
+
131
+ // Convert segments to WritableArray using WhisperContext static methods
132
+ WritableArray result = Arguments.createArray();
133
+ int nSegments = WhisperContext.vadGetNSegments(segments);
134
+
135
+ for (int i = 0; i < nSegments; i++) {
136
+ float t0 = WhisperContext.vadGetSegmentT0(segments, i);
137
+ float t1 = WhisperContext.vadGetSegmentT1(segments, i);
138
+
139
+ WritableMap segment = Arguments.createMap();
140
+ segment.putDouble("t0", t0);
141
+ segment.putDouble("t1", t1);
142
+ result.pushMap(segment);
143
+ }
144
+
145
+ // Clean up using WhisperContext static methods
146
+ WhisperContext.vadFreeSegments(segments);
147
+
148
+ return result;
149
+ }
150
+
151
+ public void release() {
152
+ if (vadContext != 0) {
153
+ WhisperContext.freeVadContext(vadContext);
154
+ vadContext = 0;
155
+ }
156
+ }
157
+ }