whisper.rn 0.2.5 → 0.3.0-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -46,11 +46,12 @@ Add the following line to ```android/app/src/main/AndroidManifest.xml```
46
46
  ```js
47
47
  import { initWhisper } from 'whisper.rn'
48
48
 
49
- const filePath = 'file://.../ggml.base.en.bin'
50
- const sampleFilePath = 'file://.../sample.wav'
51
-
52
- const whisperContext = await initWhisper({ filePath })
49
+ const whisperContext = await initWhisper({
50
+ filePath: 'file://.../ggml-base.en.bin',
51
+ isBundleAsset: false, // Set to true if you want to load the model from bundle resources, the filePath will be like `ggml-base.en.bin`
52
+ })
53
53
 
54
+ const sampleFilePath = 'file://.../sample.wav'
54
55
  const options = { language: 'en' }
55
56
  const { stop, promise } = whisperContext.transcribe(sampleFilePath, options)
56
57
 
@@ -80,6 +81,30 @@ In Android, you may need to request the microphone permission by [`PermissionAnd
80
81
 
81
82
  The documentation is not ready yet, please see the comments of [index](./src/index.ts) file for more details at the moment.
82
83
 
84
+ ## Core ML support
85
+
86
+ __*Platform: iOS 15.0+, tvOS 15.0+*__
87
+
88
+ To use Core ML on iOS, you will need to have the Core ML model files.
89
+
90
+ The `.mlmodelc` model files is load depend on the ggml model file path. For example, if your ggml model path is `ggml-base.en.bin`, the Core ML model path will be `ggml-base.en-encoder.mlmodelc`. Please note that the ggml model is still needed as decoder or encoder fallback.
91
+
92
+ Currently there is no official way to get the Core ML models by URL, you will need to convert the ggml model to Core ML model folder by yourself. Please see [Core ML Support](https://github.com/ggerganov/whisper.cpp#core-ml-support) of whisper.cpp for more details.
93
+
94
+ During the `.mlmodelc` is a directory, you will need to download 5 files:
95
+
96
+ ```json5
97
+ [
98
+ 'model.mil',
99
+ 'metadata.json',
100
+ 'coremldata.bin',
101
+ 'weights/weights.bin',
102
+ 'analysis/coremldata.bin',
103
+ ]
104
+ ```
105
+
106
+ Or just add them to your app's bundle resourcesas, like the example app does, but this would increase the app size significantly.
107
+
83
108
  ## Run with example
84
109
 
85
110
  The example app is using [react-native-fs](https://github.com/itinance/react-native-fs) to download the model file and audio file.
@@ -40,14 +40,19 @@ public class RNWhisperModule extends ReactContextBaseJavaModule implements Lifec
40
40
  private HashMap<Integer, WhisperContext> contexts = new HashMap<>();
41
41
 
42
42
  @ReactMethod
43
- public void initContext(final String modelPath, final Promise promise) {
43
+ public void initContext(final String modelPath, final boolean isBundleAsset, final Promise promise) {
44
44
  new AsyncTask<Void, Void, Integer>() {
45
45
  private Exception exception;
46
46
 
47
47
  @Override
48
48
  protected Integer doInBackground(Void... voids) {
49
49
  try {
50
- long context = WhisperContext.initContext(modelPath);
50
+ long context;
51
+ if (isBundleAsset) {
52
+ context = WhisperContext.initContextWithAsset(reactContext.getAssets(), modelPath);
53
+ } else {
54
+ context = WhisperContext.initContext(modelPath);
55
+ }
51
56
  if (context == 0) {
52
57
  throw new Exception("Failed to initialize context");
53
58
  }
@@ -186,8 +186,6 @@ public class WhisperContext {
186
186
  if (fullHandler != null) {
187
187
  fullHandler.join(); // Wait for full transcribe to finish
188
188
  }
189
- // Cleanup
190
- resetRealtimeTranscribe();
191
189
  recorder.stop();
192
190
  } catch (Exception e) {
193
191
  e.printStackTrace();
@@ -237,12 +235,11 @@ public class WhisperContext {
237
235
  }
238
236
 
239
237
  nSamplesOfIndex = sliceNSamples.get(transcribeSliceIndex);
240
- if (
241
- isStoppedByAction ||
238
+ boolean isStopped = isStoppedByAction ||
242
239
  !isCapturing &&
243
240
  nSamplesTranscribing == nSamplesOfIndex &&
244
- sliceIndex == transcribeSliceIndex
245
- ) {
241
+ sliceIndex == transcribeSliceIndex;
242
+ if (isStopped) {
246
243
  payload.putBoolean("isCapturing", false);
247
244
  payload.putBoolean("isStoppedByAction", isStoppedByAction);
248
245
  emitTranscribeEvent("@RNWhisper_onRealtimeTranscribeEnd", payload);
@@ -266,6 +263,9 @@ public class WhisperContext {
266
263
  if (!isCapturing && nSamplesTranscribing != nSamplesOfIndex) {
267
264
  // If no more capturing, continue transcribing until all slices are transcribed
268
265
  fullTranscribeSamples(options, true);
266
+ } else if (isStopped) {
267
+ // No next, cleanup
268
+ resetRealtimeTranscribe();
269
269
  }
270
270
  isTranscribing = false;
271
271
  }
@@ -469,6 +469,7 @@ public class WhisperContext {
469
469
  }
470
470
 
471
471
  protected static native long initContext(String modelPath);
472
+ protected static native long initContextWithAsset(AssetManager assetManager, String modelPath);
472
473
  protected static native int fullTranscribe(
473
474
  int job_id,
474
475
  long context,
@@ -20,6 +20,39 @@ static inline int min(int a, int b) {
20
20
  return (a < b) ? a : b;
21
21
  }
22
22
 
23
+ static size_t asset_read(void *ctx, void *output, size_t read_size) {
24
+ return AAsset_read((AAsset *) ctx, output, read_size);
25
+ }
26
+
27
+ static bool asset_is_eof(void *ctx) {
28
+ return AAsset_getRemainingLength64((AAsset *) ctx) <= 0;
29
+ }
30
+
31
+ static void asset_close(void *ctx) {
32
+ AAsset_close((AAsset *) ctx);
33
+ }
34
+
35
+ static struct whisper_context *whisper_init_from_asset(
36
+ JNIEnv *env,
37
+ jobject assetManager,
38
+ const char *asset_path
39
+ ) {
40
+ LOGI("Loading model from asset '%s'\n", asset_path);
41
+ AAssetManager *asset_manager = AAssetManager_fromJava(env, assetManager);
42
+ AAsset *asset = AAssetManager_open(asset_manager, asset_path, AASSET_MODE_STREAMING);
43
+ if (!asset) {
44
+ LOGW("Failed to open '%s'\n", asset_path);
45
+ return NULL;
46
+ }
47
+ whisper_model_loader loader = {
48
+ .context = asset,
49
+ .read = &asset_read,
50
+ .eof = &asset_is_eof,
51
+ .close = &asset_close
52
+ };
53
+ return whisper_init(&loader);
54
+ }
55
+
23
56
  extern "C" {
24
57
 
25
58
  JNIEXPORT jlong JNICALL
@@ -33,6 +66,22 @@ Java_com_rnwhisper_WhisperContext_initContext(
33
66
  return reinterpret_cast<jlong>(context);
34
67
  }
35
68
 
69
+ JNIEXPORT jlong JNICALL
70
+ Java_com_rnwhisper_WhisperContext_initContextWithAsset(
71
+ JNIEnv *env,
72
+ jobject thiz,
73
+ jobject asset_manager,
74
+ jstring model_path_str
75
+ ) {
76
+ UNUSED(thiz);
77
+ struct whisper_context *context = nullptr;
78
+ const char *model_path_chars = env->GetStringUTFChars(model_path_str, nullptr);
79
+ context = whisper_init_from_asset(env, asset_manager, model_path_chars);
80
+ env->ReleaseStringUTFChars(model_path_str, model_path_chars);
81
+ return reinterpret_cast<jlong>(context);
82
+ }
83
+
84
+
36
85
  JNIEXPORT jint JNICALL
37
86
  Java_com_rnwhisper_WhisperContext_fullTranscribe(
38
87
  JNIEnv *env,
@@ -114,11 +163,7 @@ Java_com_rnwhisper_WhisperContext_fullTranscribe(
114
163
  params.temperature_inc = temperature_inc;
115
164
  }
116
165
  if (prompt != nullptr) {
117
- rn_whisper_convert_prompt(
118
- context,
119
- params,
120
- new std::string(env->GetStringUTFChars(prompt, nullptr))
121
- );
166
+ params.initial_prompt = env->GetStringUTFChars(prompt, nullptr);
122
167
  }
123
168
 
124
169
  params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {