whisper.rn 0.2.5 → 0.3.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -46,11 +46,12 @@ Add the following line to ```android/app/src/main/AndroidManifest.xml```
46
46
  ```js
47
47
  import { initWhisper } from 'whisper.rn'
48
48
 
49
- const filePath = 'file://.../ggml.base.en.bin'
50
- const sampleFilePath = 'file://.../sample.wav'
51
-
52
- const whisperContext = await initWhisper({ filePath })
49
+ const whisperContext = await initWhisper({
50
+ filePath: 'file://.../ggml-base.en.bin',
51
+ isBundleAsset: false, // Set to true if you want to load the model from bundle resources, the filePath will be like `ggml-base.en.bin`
52
+ })
53
53
 
54
+ const sampleFilePath = 'file://.../sample.wav'
54
55
  const options = { language: 'en' }
55
56
  const { stop, promise } = whisperContext.transcribe(sampleFilePath, options)
56
57
 
@@ -80,6 +81,30 @@ In Android, you may need to request the microphone permission by [`PermissionAnd
80
81
 
81
82
  The documentation is not ready yet, please see the comments of [index](./src/index.ts) file for more details at the moment.
82
83
 
84
+ ## Core ML support
85
+
86
+ __*Platform: iOS 15.0+, tvOS 15.0+*__
87
+
88
+ To use Core ML on iOS, you will need to have the Core ML model files.
89
+
90
+ The `.mlmodelc` model files is load depend on the ggml model file path. For example, if your ggml model path is `ggml-base.en.bin`, the Core ML model path will be `ggml-base.en-encoder.mlmodelc`. Please note that the ggml model is still needed as decoder or encoder fallback.
91
+
92
+ Currently there is no official way to get the Core ML models by URL, you will need to convert the ggml model to Core ML model folder by yourself. Please see [Core ML Support](https://github.com/ggerganov/whisper.cpp#core-ml-support) of whisper.cpp for more details.
93
+
94
+ During the `.mlmodelc` is a directory, you will need to download 5 files:
95
+
96
+ ```json5
97
+ [
98
+ 'model.mil',
99
+ 'metadata.json',
100
+ 'coremldata.bin',
101
+ 'weights/weights.bin',
102
+ 'analysis/coremldata.bin',
103
+ ]
104
+ ```
105
+
106
+ Or just add them to your app's bundle resourcesas, like the example app does, but this would increase the app size significantly.
107
+
83
108
  ## Run with example
84
109
 
85
110
  The example app is using [react-native-fs](https://github.com/itinance/react-native-fs) to download the model file and audio file.
@@ -40,14 +40,19 @@ public class RNWhisperModule extends ReactContextBaseJavaModule implements Lifec
40
40
  private HashMap<Integer, WhisperContext> contexts = new HashMap<>();
41
41
 
42
42
  @ReactMethod
43
- public void initContext(final String modelPath, final Promise promise) {
43
+ public void initContext(final String modelPath, final boolean isBundleAsset, final Promise promise) {
44
44
  new AsyncTask<Void, Void, Integer>() {
45
45
  private Exception exception;
46
46
 
47
47
  @Override
48
48
  protected Integer doInBackground(Void... voids) {
49
49
  try {
50
- long context = WhisperContext.initContext(modelPath);
50
+ long context;
51
+ if (isBundleAsset) {
52
+ context = WhisperContext.initContextWithAsset(reactContext.getAssets(), modelPath);
53
+ } else {
54
+ context = WhisperContext.initContext(modelPath);
55
+ }
51
56
  if (context == 0) {
52
57
  throw new Exception("Failed to initialize context");
53
58
  }
@@ -186,8 +186,6 @@ public class WhisperContext {
186
186
  if (fullHandler != null) {
187
187
  fullHandler.join(); // Wait for full transcribe to finish
188
188
  }
189
- // Cleanup
190
- resetRealtimeTranscribe();
191
189
  recorder.stop();
192
190
  } catch (Exception e) {
193
191
  e.printStackTrace();
@@ -237,12 +235,11 @@ public class WhisperContext {
237
235
  }
238
236
 
239
237
  nSamplesOfIndex = sliceNSamples.get(transcribeSliceIndex);
240
- if (
241
- isStoppedByAction ||
238
+ boolean isStopped = isStoppedByAction ||
242
239
  !isCapturing &&
243
240
  nSamplesTranscribing == nSamplesOfIndex &&
244
- sliceIndex == transcribeSliceIndex
245
- ) {
241
+ sliceIndex == transcribeSliceIndex;
242
+ if (isStopped) {
246
243
  payload.putBoolean("isCapturing", false);
247
244
  payload.putBoolean("isStoppedByAction", isStoppedByAction);
248
245
  emitTranscribeEvent("@RNWhisper_onRealtimeTranscribeEnd", payload);
@@ -266,6 +263,9 @@ public class WhisperContext {
266
263
  if (!isCapturing && nSamplesTranscribing != nSamplesOfIndex) {
267
264
  // If no more capturing, continue transcribing until all slices are transcribed
268
265
  fullTranscribeSamples(options, true);
266
+ } else if (isStopped) {
267
+ // No next, cleanup
268
+ resetRealtimeTranscribe();
269
269
  }
270
270
  isTranscribing = false;
271
271
  }
@@ -469,6 +469,7 @@ public class WhisperContext {
469
469
  }
470
470
 
471
471
  protected static native long initContext(String modelPath);
472
+ protected static native long initContextWithAsset(AssetManager assetManager, String modelPath);
472
473
  protected static native int fullTranscribe(
473
474
  int job_id,
474
475
  long context,
@@ -20,6 +20,39 @@ static inline int min(int a, int b) {
20
20
  return (a < b) ? a : b;
21
21
  }
22
22
 
23
+ static size_t asset_read(void *ctx, void *output, size_t read_size) {
24
+ return AAsset_read((AAsset *) ctx, output, read_size);
25
+ }
26
+
27
+ static bool asset_is_eof(void *ctx) {
28
+ return AAsset_getRemainingLength64((AAsset *) ctx) <= 0;
29
+ }
30
+
31
+ static void asset_close(void *ctx) {
32
+ AAsset_close((AAsset *) ctx);
33
+ }
34
+
35
+ static struct whisper_context *whisper_init_from_asset(
36
+ JNIEnv *env,
37
+ jobject assetManager,
38
+ const char *asset_path
39
+ ) {
40
+ LOGI("Loading model from asset '%s'\n", asset_path);
41
+ AAssetManager *asset_manager = AAssetManager_fromJava(env, assetManager);
42
+ AAsset *asset = AAssetManager_open(asset_manager, asset_path, AASSET_MODE_STREAMING);
43
+ if (!asset) {
44
+ LOGW("Failed to open '%s'\n", asset_path);
45
+ return NULL;
46
+ }
47
+ whisper_model_loader loader = {
48
+ .context = asset,
49
+ .read = &asset_read,
50
+ .eof = &asset_is_eof,
51
+ .close = &asset_close
52
+ };
53
+ return whisper_init(&loader);
54
+ }
55
+
23
56
  extern "C" {
24
57
 
25
58
  JNIEXPORT jlong JNICALL
@@ -33,6 +66,22 @@ Java_com_rnwhisper_WhisperContext_initContext(
33
66
  return reinterpret_cast<jlong>(context);
34
67
  }
35
68
 
69
+ JNIEXPORT jlong JNICALL
70
+ Java_com_rnwhisper_WhisperContext_initContextWithAsset(
71
+ JNIEnv *env,
72
+ jobject thiz,
73
+ jobject asset_manager,
74
+ jstring model_path_str
75
+ ) {
76
+ UNUSED(thiz);
77
+ struct whisper_context *context = nullptr;
78
+ const char *model_path_chars = env->GetStringUTFChars(model_path_str, nullptr);
79
+ context = whisper_init_from_asset(env, asset_manager, model_path_chars);
80
+ env->ReleaseStringUTFChars(model_path_str, model_path_chars);
81
+ return reinterpret_cast<jlong>(context);
82
+ }
83
+
84
+
36
85
  JNIEXPORT jint JNICALL
37
86
  Java_com_rnwhisper_WhisperContext_fullTranscribe(
38
87
  JNIEnv *env,
@@ -114,11 +163,7 @@ Java_com_rnwhisper_WhisperContext_fullTranscribe(
114
163
  params.temperature_inc = temperature_inc;
115
164
  }
116
165
  if (prompt != nullptr) {
117
- rn_whisper_convert_prompt(
118
- context,
119
- params,
120
- new std::string(env->GetStringUTFChars(prompt, nullptr))
121
- );
166
+ params.initial_prompt = env->GetStringUTFChars(prompt, nullptr);
122
167
  }
123
168
 
124
169
  params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
@@ -0,0 +1,146 @@
1
+ //
2
+ // whisper-decoder-impl.h
3
+ //
4
+ // This file was automatically generated and should not be edited.
5
+ //
6
+
7
+ #import <Foundation/Foundation.h>
8
+ #import <CoreML/CoreML.h>
9
+ #include <stdint.h>
10
+ #include <os/log.h>
11
+
12
+ NS_ASSUME_NONNULL_BEGIN
13
+
14
+
15
+ /// Model Prediction Input Type
16
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
17
+ @interface whisper_decoder_implInput : NSObject<MLFeatureProvider>
18
+
19
+ /// token_data as 1 by 1 matrix of 32-bit integers
20
+ @property (readwrite, nonatomic, strong) MLMultiArray * token_data;
21
+
22
+ /// audio_data as 1 × 384 × 1 × 1500 4-dimensional array of floats
23
+ @property (readwrite, nonatomic, strong) MLMultiArray * audio_data;
24
+ - (instancetype)init NS_UNAVAILABLE;
25
+ - (instancetype)initWithToken_data:(MLMultiArray *)token_data audio_data:(MLMultiArray *)audio_data NS_DESIGNATED_INITIALIZER;
26
+
27
+ @end
28
+
29
+
30
+ /// Model Prediction Output Type
31
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
32
+ @interface whisper_decoder_implOutput : NSObject<MLFeatureProvider>
33
+
34
+ /// var_1346 as multidimensional array of floats
35
+ @property (readwrite, nonatomic, strong) MLMultiArray * var_1346;
36
+ - (instancetype)init NS_UNAVAILABLE;
37
+ - (instancetype)initWithVar_1346:(MLMultiArray *)var_1346 NS_DESIGNATED_INITIALIZER;
38
+
39
+ @end
40
+
41
+
42
+ /// Class for model loading and prediction
43
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
44
+ @interface whisper_decoder_impl : NSObject
45
+ @property (readonly, nonatomic, nullable) MLModel * model;
46
+
47
+ /**
48
+ URL of the underlying .mlmodelc directory.
49
+ */
50
+ + (nullable NSURL *)URLOfModelInThisBundle;
51
+
52
+ /**
53
+ Initialize whisper_decoder_impl instance from an existing MLModel object.
54
+
55
+ Usually the application does not use this initializer unless it makes a subclass of whisper_decoder_impl.
56
+ Such application may want to use `-[MLModel initWithContentsOfURL:configuration:error:]` and `+URLOfModelInThisBundle` to create a MLModel object to pass-in.
57
+ */
58
+ - (instancetype)initWithMLModel:(MLModel *)model NS_DESIGNATED_INITIALIZER;
59
+
60
+ /**
61
+ Initialize whisper_decoder_impl instance with the model in this bundle.
62
+ */
63
+ - (nullable instancetype)init;
64
+
65
+ /**
66
+ Initialize whisper_decoder_impl instance with the model in this bundle.
67
+
68
+ @param configuration The model configuration object
69
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
70
+ */
71
+ - (nullable instancetype)initWithConfiguration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
72
+
73
+ /**
74
+ Initialize whisper_decoder_impl instance from the model URL.
75
+
76
+ @param modelURL URL to the .mlmodelc directory for whisper_decoder_impl.
77
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
78
+ */
79
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL error:(NSError * _Nullable __autoreleasing * _Nullable)error;
80
+
81
+ /**
82
+ Initialize whisper_decoder_impl instance from the model URL.
83
+
84
+ @param modelURL URL to the .mlmodelc directory for whisper_decoder_impl.
85
+ @param configuration The model configuration object
86
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
87
+ */
88
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
89
+
90
+ /**
91
+ Construct whisper_decoder_impl instance asynchronously with configuration.
92
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
93
+
94
+ @param configuration The model configuration
95
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_decoder_impl instance or NSError object.
96
+ */
97
+ + (void)loadWithConfiguration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_decoder_impl * _Nullable model, NSError * _Nullable error))handler;
98
+
99
+ /**
100
+ Construct whisper_decoder_impl instance asynchronously with URL of .mlmodelc directory and optional configuration.
101
+
102
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
103
+
104
+ @param modelURL The model URL.
105
+ @param configuration The model configuration
106
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_decoder_impl instance or NSError object.
107
+ */
108
+ + (void)loadContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_decoder_impl * _Nullable model, NSError * _Nullable error))handler;
109
+
110
+ /**
111
+ Make a prediction using the standard interface
112
+ @param input an instance of whisper_decoder_implInput to predict from
113
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
114
+ @return the prediction as whisper_decoder_implOutput
115
+ */
116
+ - (nullable whisper_decoder_implOutput *)predictionFromFeatures:(whisper_decoder_implInput *)input error:(NSError * _Nullable __autoreleasing * _Nullable)error;
117
+
118
+ /**
119
+ Make a prediction using the standard interface
120
+ @param input an instance of whisper_decoder_implInput to predict from
121
+ @param options prediction options
122
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
123
+ @return the prediction as whisper_decoder_implOutput
124
+ */
125
+ - (nullable whisper_decoder_implOutput *)predictionFromFeatures:(whisper_decoder_implInput *)input options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
126
+
127
+ /**
128
+ Make a prediction using the convenience interface
129
+ @param token_data as 1 by 1 matrix of 32-bit integers:
130
+ @param audio_data as 1 × 384 × 1 × 1500 4-dimensional array of floats:
131
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
132
+ @return the prediction as whisper_decoder_implOutput
133
+ */
134
+ - (nullable whisper_decoder_implOutput *)predictionFromToken_data:(MLMultiArray *)token_data audio_data:(MLMultiArray *)audio_data error:(NSError * _Nullable __autoreleasing * _Nullable)error;
135
+
136
+ /**
137
+ Batch prediction
138
+ @param inputArray array of whisper_decoder_implInput instances to obtain predictions from
139
+ @param options prediction options
140
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
141
+ @return the predictions as NSArray<whisper_decoder_implOutput *>
142
+ */
143
+ - (nullable NSArray<whisper_decoder_implOutput *> *)predictionsFromInputs:(NSArray<whisper_decoder_implInput*> *)inputArray options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
144
+ @end
145
+
146
+ NS_ASSUME_NONNULL_END
@@ -0,0 +1,201 @@
1
+ //
2
+ // whisper-decoder-impl.m
3
+ //
4
+ // This file was automatically generated and should not be edited.
5
+ //
6
+
7
+ #if !__has_feature(objc_arc)
8
+ #error This file must be compiled with automatic reference counting enabled (-fobjc-arc)
9
+ #endif
10
+
11
+ #import "whisper-decoder-impl.h"
12
+
13
+ @implementation whisper_decoder_implInput
14
+
15
+ - (instancetype)initWithToken_data:(MLMultiArray *)token_data audio_data:(MLMultiArray *)audio_data {
16
+ self = [super init];
17
+ if (self) {
18
+ _token_data = token_data;
19
+ _audio_data = audio_data;
20
+ }
21
+ return self;
22
+ }
23
+
24
+ - (NSSet<NSString *> *)featureNames {
25
+ return [NSSet setWithArray:@[@"token_data", @"audio_data"]];
26
+ }
27
+
28
+ - (nullable MLFeatureValue *)featureValueForName:(NSString *)featureName {
29
+ if ([featureName isEqualToString:@"token_data"]) {
30
+ return [MLFeatureValue featureValueWithMultiArray:self.token_data];
31
+ }
32
+ if ([featureName isEqualToString:@"audio_data"]) {
33
+ return [MLFeatureValue featureValueWithMultiArray:self.audio_data];
34
+ }
35
+ return nil;
36
+ }
37
+
38
+ @end
39
+
40
+ @implementation whisper_decoder_implOutput
41
+
42
+ - (instancetype)initWithVar_1346:(MLMultiArray *)var_1346 {
43
+ self = [super init];
44
+ if (self) {
45
+ _var_1346 = var_1346;
46
+ }
47
+ return self;
48
+ }
49
+
50
+ - (NSSet<NSString *> *)featureNames {
51
+ return [NSSet setWithArray:@[@"var_1346"]];
52
+ }
53
+
54
+ - (nullable MLFeatureValue *)featureValueForName:(NSString *)featureName {
55
+ if ([featureName isEqualToString:@"var_1346"]) {
56
+ return [MLFeatureValue featureValueWithMultiArray:self.var_1346];
57
+ }
58
+ return nil;
59
+ }
60
+
61
+ @end
62
+
63
+ @implementation whisper_decoder_impl
64
+
65
+
66
+ /**
67
+ URL of the underlying .mlmodelc directory.
68
+ */
69
+ + (nullable NSURL *)URLOfModelInThisBundle {
70
+ NSString *assetPath = [[NSBundle bundleForClass:[self class]] pathForResource:@"whisper_decoder_impl" ofType:@"mlmodelc"];
71
+ if (nil == assetPath) { os_log_error(OS_LOG_DEFAULT, "Could not load whisper-decoder-impl.mlmodelc in the bundle resource"); return nil; }
72
+ return [NSURL fileURLWithPath:assetPath];
73
+ }
74
+
75
+
76
+ /**
77
+ Initialize whisper_decoder_impl instance from an existing MLModel object.
78
+
79
+ Usually the application does not use this initializer unless it makes a subclass of whisper_decoder_impl.
80
+ Such application may want to use `-[MLModel initWithContentsOfURL:configuration:error:]` and `+URLOfModelInThisBundle` to create a MLModel object to pass-in.
81
+ */
82
+ - (instancetype)initWithMLModel:(MLModel *)model {
83
+ self = [super init];
84
+ if (!self) { return nil; }
85
+ _model = model;
86
+ if (_model == nil) { return nil; }
87
+ return self;
88
+ }
89
+
90
+
91
+ /**
92
+ Initialize whisper_decoder_impl instance with the model in this bundle.
93
+ */
94
+ - (nullable instancetype)init {
95
+ return [self initWithContentsOfURL:(NSURL * _Nonnull)self.class.URLOfModelInThisBundle error:nil];
96
+ }
97
+
98
+
99
+ /**
100
+ Initialize whisper_decoder_impl instance with the model in this bundle.
101
+
102
+ @param configuration The model configuration object
103
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
104
+ */
105
+ - (nullable instancetype)initWithConfiguration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error {
106
+ return [self initWithContentsOfURL:(NSURL * _Nonnull)self.class.URLOfModelInThisBundle configuration:configuration error:error];
107
+ }
108
+
109
+
110
+ /**
111
+ Initialize whisper_decoder_impl instance from the model URL.
112
+
113
+ @param modelURL URL to the .mlmodelc directory for whisper_decoder_impl.
114
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
115
+ */
116
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL error:(NSError * _Nullable __autoreleasing * _Nullable)error {
117
+ MLModel *model = [MLModel modelWithContentsOfURL:modelURL error:error];
118
+ if (model == nil) { return nil; }
119
+ return [self initWithMLModel:model];
120
+ }
121
+
122
+
123
+ /**
124
+ Initialize whisper_decoder_impl instance from the model URL.
125
+
126
+ @param modelURL URL to the .mlmodelc directory for whisper_decoder_impl.
127
+ @param configuration The model configuration object
128
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
129
+ */
130
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error {
131
+ MLModel *model = [MLModel modelWithContentsOfURL:modelURL configuration:configuration error:error];
132
+ if (model == nil) { return nil; }
133
+ return [self initWithMLModel:model];
134
+ }
135
+
136
+
137
+ /**
138
+ Construct whisper_decoder_impl instance asynchronously with configuration.
139
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
140
+
141
+ @param configuration The model configuration
142
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_decoder_impl instance or NSError object.
143
+ */
144
+ + (void)loadWithConfiguration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_decoder_impl * _Nullable model, NSError * _Nullable error))handler {
145
+ [self loadContentsOfURL:(NSURL * _Nonnull)[self URLOfModelInThisBundle]
146
+ configuration:configuration
147
+ completionHandler:handler];
148
+ }
149
+
150
+
151
+ /**
152
+ Construct whisper_decoder_impl instance asynchronously with URL of .mlmodelc directory and optional configuration.
153
+
154
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
155
+
156
+ @param modelURL The model URL.
157
+ @param configuration The model configuration
158
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_decoder_impl instance or NSError object.
159
+ */
160
+ + (void)loadContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_decoder_impl * _Nullable model, NSError * _Nullable error))handler {
161
+ [MLModel loadContentsOfURL:modelURL
162
+ configuration:configuration
163
+ completionHandler:^(MLModel *model, NSError *error) {
164
+ if (model != nil) {
165
+ whisper_decoder_impl *typedModel = [[whisper_decoder_impl alloc] initWithMLModel:model];
166
+ handler(typedModel, nil);
167
+ } else {
168
+ handler(nil, error);
169
+ }
170
+ }];
171
+ }
172
+
173
+ - (nullable whisper_decoder_implOutput *)predictionFromFeatures:(whisper_decoder_implInput *)input error:(NSError * _Nullable __autoreleasing * _Nullable)error {
174
+ return [self predictionFromFeatures:input options:[[MLPredictionOptions alloc] init] error:error];
175
+ }
176
+
177
+ - (nullable whisper_decoder_implOutput *)predictionFromFeatures:(whisper_decoder_implInput *)input options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error {
178
+ id<MLFeatureProvider> outFeatures = [self.model predictionFromFeatures:input options:options error:error];
179
+ if (!outFeatures) { return nil; }
180
+ return [[whisper_decoder_implOutput alloc] initWithVar_1346:(MLMultiArray *)[outFeatures featureValueForName:@"var_1346"].multiArrayValue];
181
+ }
182
+
183
+ - (nullable whisper_decoder_implOutput *)predictionFromToken_data:(MLMultiArray *)token_data audio_data:(MLMultiArray *)audio_data error:(NSError * _Nullable __autoreleasing * _Nullable)error {
184
+ whisper_decoder_implInput *input_ = [[whisper_decoder_implInput alloc] initWithToken_data:token_data audio_data:audio_data];
185
+ return [self predictionFromFeatures:input_ error:error];
186
+ }
187
+
188
+ - (nullable NSArray<whisper_decoder_implOutput *> *)predictionsFromInputs:(NSArray<whisper_decoder_implInput*> *)inputArray options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error {
189
+ id<MLBatchProvider> inBatch = [[MLArrayBatchProvider alloc] initWithFeatureProviderArray:inputArray];
190
+ id<MLBatchProvider> outBatch = [self.model predictionsFromBatch:inBatch options:options error:error];
191
+ if (!outBatch) { return nil; }
192
+ NSMutableArray<whisper_decoder_implOutput*> *results = [NSMutableArray arrayWithCapacity:(NSUInteger)outBatch.count];
193
+ for (NSInteger i = 0; i < outBatch.count; i++) {
194
+ id<MLFeatureProvider> resultProvider = [outBatch featuresAtIndex:i];
195
+ whisper_decoder_implOutput * result = [[whisper_decoder_implOutput alloc] initWithVar_1346:(MLMultiArray *)[resultProvider featureValueForName:@"var_1346"].multiArrayValue];
196
+ [results addObject:result];
197
+ }
198
+ return results;
199
+ }
200
+
201
+ @end
@@ -0,0 +1,142 @@
1
+ //
2
+ // whisper-encoder-impl.h
3
+ //
4
+ // This file was automatically generated and should not be edited.
5
+ //
6
+
7
+ #import <Foundation/Foundation.h>
8
+ #import <CoreML/CoreML.h>
9
+ #include <stdint.h>
10
+ #include <os/log.h>
11
+
12
+ NS_ASSUME_NONNULL_BEGIN
13
+
14
+
15
+ /// Model Prediction Input Type
16
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
17
+ @interface whisper_encoder_implInput : NSObject<MLFeatureProvider>
18
+
19
+ /// logmel_data as 1 × 80 × 3000 3-dimensional array of floats
20
+ @property (readwrite, nonatomic, strong) MLMultiArray * logmel_data;
21
+ - (instancetype)init NS_UNAVAILABLE;
22
+ - (instancetype)initWithLogmel_data:(MLMultiArray *)logmel_data NS_DESIGNATED_INITIALIZER;
23
+
24
+ @end
25
+
26
+
27
+ /// Model Prediction Output Type
28
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
29
+ @interface whisper_encoder_implOutput : NSObject<MLFeatureProvider>
30
+
31
+ /// output as multidimensional array of floats
32
+ @property (readwrite, nonatomic, strong) MLMultiArray * output;
33
+ - (instancetype)init NS_UNAVAILABLE;
34
+ - (instancetype)initWithOutput:(MLMultiArray *)output NS_DESIGNATED_INITIALIZER;
35
+
36
+ @end
37
+
38
+
39
+ /// Class for model loading and prediction
40
+ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((visibility("hidden")))
41
+ @interface whisper_encoder_impl : NSObject
42
+ @property (readonly, nonatomic, nullable) MLModel * model;
43
+
44
+ /**
45
+ URL of the underlying .mlmodelc directory.
46
+ */
47
+ + (nullable NSURL *)URLOfModelInThisBundle;
48
+
49
+ /**
50
+ Initialize whisper_encoder_impl instance from an existing MLModel object.
51
+
52
+ Usually the application does not use this initializer unless it makes a subclass of whisper_encoder_impl.
53
+ Such application may want to use `-[MLModel initWithContentsOfURL:configuration:error:]` and `+URLOfModelInThisBundle` to create a MLModel object to pass-in.
54
+ */
55
+ - (instancetype)initWithMLModel:(MLModel *)model NS_DESIGNATED_INITIALIZER;
56
+
57
+ /**
58
+ Initialize whisper_encoder_impl instance with the model in this bundle.
59
+ */
60
+ - (nullable instancetype)init;
61
+
62
+ /**
63
+ Initialize whisper_encoder_impl instance with the model in this bundle.
64
+
65
+ @param configuration The model configuration object
66
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
67
+ */
68
+ - (nullable instancetype)initWithConfiguration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
69
+
70
+ /**
71
+ Initialize whisper_encoder_impl instance from the model URL.
72
+
73
+ @param modelURL URL to the .mlmodelc directory for whisper_encoder_impl.
74
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
75
+ */
76
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL error:(NSError * _Nullable __autoreleasing * _Nullable)error;
77
+
78
+ /**
79
+ Initialize whisper_encoder_impl instance from the model URL.
80
+
81
+ @param modelURL URL to the .mlmodelc directory for whisper_encoder_impl.
82
+ @param configuration The model configuration object
83
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
84
+ */
85
+ - (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
86
+
87
+ /**
88
+ Construct whisper_encoder_impl instance asynchronously with configuration.
89
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
90
+
91
+ @param configuration The model configuration
92
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_encoder_impl instance or NSError object.
93
+ */
94
+ + (void)loadWithConfiguration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_encoder_impl * _Nullable model, NSError * _Nullable error))handler;
95
+
96
+ /**
97
+ Construct whisper_encoder_impl instance asynchronously with URL of .mlmodelc directory and optional configuration.
98
+
99
+ Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
100
+
101
+ @param modelURL The model URL.
102
+ @param configuration The model configuration
103
+ @param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid whisper_encoder_impl instance or NSError object.
104
+ */
105
+ + (void)loadContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration completionHandler:(void (^)(whisper_encoder_impl * _Nullable model, NSError * _Nullable error))handler;
106
+
107
+ /**
108
+ Make a prediction using the standard interface
109
+ @param input an instance of whisper_encoder_implInput to predict from
110
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
111
+ @return the prediction as whisper_encoder_implOutput
112
+ */
113
+ - (nullable whisper_encoder_implOutput *)predictionFromFeatures:(whisper_encoder_implInput *)input error:(NSError * _Nullable __autoreleasing * _Nullable)error;
114
+
115
+ /**
116
+ Make a prediction using the standard interface
117
+ @param input an instance of whisper_encoder_implInput to predict from
118
+ @param options prediction options
119
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
120
+ @return the prediction as whisper_encoder_implOutput
121
+ */
122
+ - (nullable whisper_encoder_implOutput *)predictionFromFeatures:(whisper_encoder_implInput *)input options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
123
+
124
+ /**
125
+ Make a prediction using the convenience interface
126
+ @param logmel_data as 1 × 80 × 3000 3-dimensional array of floats:
127
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
128
+ @return the prediction as whisper_encoder_implOutput
129
+ */
130
+ - (nullable whisper_encoder_implOutput *)predictionFromLogmel_data:(MLMultiArray *)logmel_data error:(NSError * _Nullable __autoreleasing * _Nullable)error;
131
+
132
+ /**
133
+ Batch prediction
134
+ @param inputArray array of whisper_encoder_implInput instances to obtain predictions from
135
+ @param options prediction options
136
+ @param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
137
+ @return the predictions as NSArray<whisper_encoder_implOutput *>
138
+ */
139
+ - (nullable NSArray<whisper_encoder_implOutput *> *)predictionsFromInputs:(NSArray<whisper_encoder_implInput*> *)inputArray options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
140
+ @end
141
+
142
+ NS_ASSUME_NONNULL_END