@automatalabs/react-native-transformers 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Automata Labs
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,346 @@
1
+ # @automatalabs/react-native-transformers
2
+
3
+ Use [`@huggingface/transformers`](https://www.npmjs.com/package/@huggingface/transformers) in Expo / React Native apps through [`onnxruntime-react-native`](https://www.npmjs.com/package/onnxruntime-react-native), without forking Transformers.js.
4
+
5
+ ## What this package does
6
+
7
+ - adds an Expo config plugin that composes `onnxruntime-react-native`
8
+ - adds a Metro helper that aliases Transformers.js onto a React Native wrapper
9
+ - routes `onnxruntime-node` and `onnxruntime-web` imports to a React Native adapter
10
+ - normalizes React Native-friendly device options like `coreml`, `xnnpack`, `nnapi`, and `qnn`
11
+ - prefers `expo/fetch` automatically when available for streamed model downloads
12
+ - caches downloaded model files with `expo-file-system` so they survive app restarts
13
+ - supports ONNX models that use external data files (for example `*.onnx_data`)
14
+
15
+ The package keeps the public app-facing API centered on:
16
+
17
+ ```js
18
+ import { pipeline, AutoTokenizer, AutoModelForSequenceClassification } from '@huggingface/transformers';
19
+ ```
20
+
21
+ ## Requirements
22
+
23
+ - Node `>= 18`
24
+ - `@huggingface/transformers` `^4`
25
+ - `onnxruntime-react-native` `>= 1.24.3 < 2`
26
+ - `react`
27
+ - `react-native`
28
+ - `expo` is optional, but this package is primarily aimed at Expo / Expo dev-client workflows
29
+ - `expo-file-system` is optional, but recommended if you want persistent model caching across app restarts
30
+
31
+ ## Install
32
+
33
+ In an Expo app, install your native/runtime dependencies with Expo and then install this package plus Transformers.js. Include `expo-file-system` if you want automatic persistent model caching:
34
+
35
+ ```sh
36
+ npx expo install expo react react-native onnxruntime-react-native expo-file-system
37
+ npm install @huggingface/transformers @automatalabs/react-native-transformers
38
+ ```
39
+
40
+ If your app already has Expo / React Native set up, you only need to add the missing packages.
41
+
42
+ ## Expo config plugin
43
+
44
+ Add the plugin in your app config:
45
+
46
+ ```js
47
+ // app.config.js
48
+ module.exports = {
49
+ expo: {
50
+ plugins: ['@automatalabs/react-native-transformers'],
51
+ },
52
+ };
53
+ ```
54
+
55
+ For local development against this repository's bundled `example/` app, a relative plugin path is more reliable:
56
+
57
+ ```json
58
+ {
59
+ "expo": {
60
+ "plugins": ["../app.plugin.js"]
61
+ }
62
+ }
63
+ ```
64
+
65
+ ### ONNX Runtime Extensions
66
+
67
+ You **do not** need ONNX Runtime Extensions just to use the `coreml`, `xnnpack`, `cpu`, `nnapi`, or `qnn` execution providers.
68
+
69
+ Only enable extensions if the model itself requires ONNX Runtime Extensions custom ops. When needed, add this top-level field to your app's root `package.json`:
70
+
71
+ ```json
72
+ {
73
+ "onnxruntimeExtensionsEnabled": "true"
74
+ }
75
+ ```
76
+
77
+ Then rebuild native code.
78
+
79
+ ## Metro
80
+
81
+ Install the Metro helper so React Native resolves Transformers.js through the wrapper and adds `onnx` / `ort` asset extensions:
82
+
83
+ ```js
84
+ // metro.config.js
85
+ const { getDefaultConfig } = require('expo/metro-config');
86
+ const { withTransformersReactNativeMetro } = require('@automatalabs/react-native-transformers/metro');
87
+
88
+ module.exports = withTransformersReactNativeMetro(getDefaultConfig(__dirname));
89
+ ```
90
+
91
+ ### Monorepos / local `file:..` development
92
+
93
+ If you are developing the library and the app side by side, you may also want `watchFolders` and explicit singleton aliases for packages like `react-native` and `onnxruntime-react-native`.
94
+
95
+ See [`example/metro.config.js`](./example/metro.config.js) for a working local-dev setup.
96
+
97
+ ## Babel
98
+
99
+ The published `@huggingface/transformers` web bundle uses `import.meta`, so Expo apps need Babel's import-meta transform enabled:
100
+
101
+ ```js
102
+ // babel.config.js
103
+ module.exports = function babelConfig(api) {
104
+ api.cache(true);
105
+
106
+ return {
107
+ presets: [['babel-preset-expo', { unstable_transformImportMeta: true }]],
108
+ };
109
+ };
110
+ ```
111
+
112
+ ## Basic usage
113
+
114
+ Once Metro is configured, import from `@huggingface/transformers` as usual.
115
+
116
+ ### Example: sentiment analysis pipeline
117
+
118
+ ```js
119
+ import { pipeline } from '@huggingface/transformers';
120
+
121
+ const classifier = await pipeline(
122
+ 'sentiment-analysis',
123
+ 'Xenova/distilbert-base-uncased-finetuned-sst-2-english',
124
+ {
125
+ device: 'coreml', // iOS: coreml -> cpu, Android users would typically use nnapi/qnn/xnnpack/cpu
126
+ dtype: 'q8',
127
+ },
128
+ );
129
+
130
+ const result = await classifier('Running Transformers.js in Expo feels great.');
131
+ console.log(result);
132
+ ```
133
+
134
+ ### Example: direct model helpers
135
+
136
+ ```js
137
+ import {
138
+ AutoModelForSequenceClassification,
139
+ AutoTokenizer,
140
+ } from '@huggingface/transformers';
141
+
142
+ const MODEL_ID = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english';
143
+
144
+ const tokenizer = await AutoTokenizer.from_pretrained(MODEL_ID, {
145
+ device: 'coreml',
146
+ });
147
+
148
+ const model = await AutoModelForSequenceClassification.from_pretrained(MODEL_ID, {
149
+ device: 'coreml',
150
+ dtype: 'q8',
151
+ });
152
+
153
+ const inputs = await tokenizer('React Native inference on device is useful.');
154
+ const output = await model(inputs);
155
+ console.log(output.logits.dims);
156
+ ```
157
+
158
+ ### Example: chat generation with `onnx-community/LFM2.5-350M-ONNX`
159
+
160
+ This model card explicitly documents chat-style usage with Transformers.js, and this package supports its ONNX external-data layout on React Native.
161
+
162
+ ```js
163
+ import { pipeline } from '@huggingface/transformers';
164
+
165
+ const generator = await pipeline(
166
+ 'text-generation',
167
+ 'onnx-community/LFM2.5-350M-ONNX',
168
+ {
169
+ device: 'coreml',
170
+ dtype: 'q4',
171
+ },
172
+ );
173
+
174
+ const messages = [
175
+ {
176
+ role: 'system',
177
+ content: 'You are a helpful assistant. Reply with one short sentence.',
178
+ },
179
+ {
180
+ role: 'user',
181
+ content: 'Explain one benefit of running AI directly on a phone.',
182
+ },
183
+ ];
184
+
185
+ const output = await generator(messages, {
186
+ max_new_tokens: 64,
187
+ do_sample: false,
188
+ repetition_penalty: 1.05,
189
+ });
190
+
191
+ const assistantMessage = output[0].generated_text.at(-1)?.content;
192
+ console.log(assistantMessage);
193
+ ```
194
+
195
+ ## React Native-specific device options
196
+
197
+ This package accepts React Native-oriented device shorthands and translates them into ONNX Runtime execution providers.
198
+
199
+ Common values:
200
+
201
+ - `auto`
202
+ - `coreml` (iOS)
203
+ - `xnnpack`
204
+ - `cpu`
205
+ - `nnapi` (Android)
206
+ - `qnn` (Android)
207
+
208
+ Example:
209
+
210
+ ```js
211
+ const generator = await pipeline('text-generation', MODEL_ID, {
212
+ device: 'xnnpack',
213
+ });
214
+ ```
215
+
216
+ Under the hood these are normalized into `session_options.executionProviders` so they work with current Transformers.js expectations.
217
+
218
+ ## Runtime helpers
219
+
220
+ The package also exports a few helpers from the root entrypoint.
221
+
222
+ ### List supported execution providers
223
+
224
+ ```js
225
+ import { getSupportedExecutionProviderNames } from '@automatalabs/react-native-transformers';
226
+
227
+ console.log(getSupportedExecutionProviderNames());
228
+ // e.g. ['cpu', 'xnnpack', 'coreml']
229
+ ```
230
+
231
+ ### Normalize options explicitly
232
+
233
+ ```js
234
+ import { normalizeTransformersOptions } from '@automatalabs/react-native-transformers';
235
+
236
+ const options = normalizeTransformersOptions({
237
+ device: 'coreml',
238
+ });
239
+
240
+ console.log(options.session_options.executionProviders);
241
+ ```
242
+
243
+ ## Notes
244
+
245
+ ### `coreml` means CoreML execution provider, not native `.mlmodel` loading
246
+
247
+ Inference still goes through ONNX Runtime. Using:
248
+
249
+ ```js
250
+ { device: 'coreml' }
251
+ ```
252
+
253
+ means “prefer ONNX Runtime's CoreML execution provider on iOS”, not “load a native CoreML model artifact directly”.
254
+
255
+ ### `expo/fetch`
256
+
257
+ The wrapper automatically prefers `expo/fetch` when available, because the default React Native fetch implementation does not expose the response stream reader that Transformers.js expects for efficient downloads.
258
+
259
+ You can still override `env.fetch` manually if you want to.
260
+
261
+ ### Model file caching
262
+
263
+ When `expo-file-system` is installed, downloaded model files are cached automatically under Expo's cache directory at:
264
+
265
+ - `Paths.cache/automatalabs-react-native-transformers/models`
266
+
267
+ That cache survives normal app restarts, but because it lives in the cache directory the OS may still evict it under storage pressure.
268
+
269
+ If you want a different location, you can provide your own cache implementation:
270
+
271
+ ```js
272
+ import { env } from '@huggingface/transformers';
273
+ import { Paths } from 'expo-file-system';
274
+ import { createExpoFileSystemCache } from '@automatalabs/react-native-transformers';
275
+
276
+ env.customCache = createExpoFileSystemCache({
277
+ directory: Paths.document,
278
+ });
279
+ env.useCustomCache = true;
280
+ ```
281
+
282
+ If `expo-file-system` is not installed, the package still works — it simply skips persistent model caching.
283
+
284
+ To disable persistent model caching entirely:
285
+
286
+ ```js
287
+ env.customCache = null;
288
+ env.useCustomCache = false;
289
+ ```
290
+
291
+ ### Fallback visibility
292
+
293
+ ONNX Runtime's JavaScript API does not expose exact per-node execution-provider usage for a successful session. You can know:
294
+
295
+ - what execution-provider order was requested
296
+ - whether your app retried on a different device / execution-provider chain
297
+
298
+ But you generally cannot prove exact per-op fallback from JavaScript alone.
299
+
300
+ ## How it works
301
+
302
+ This package takes a no-fork approach:
303
+
304
+ - aliases `@huggingface/transformers` to `src/transformers.js`
305
+ - aliases `onnxruntime-node`, `onnxruntime-web`, and `onnxruntime-web/webgpu` to a React Native adapter
306
+ - reuses the unified ONNX Runtime JavaScript API shape exposed by `onnxruntime-react-native`
307
+ - patches the create-session path for React Native buffer / external-data model loading
308
+ - normalizes public `from_pretrained()` and `pipeline()` options for React Native execution providers
309
+
310
+ ## Example app
311
+
312
+ The repository includes an Expo example in [`example/`](./example).
313
+
314
+ Run it with:
315
+
316
+ ```sh
317
+ npm install
318
+ npm run example:ios
319
+ ```
320
+
321
+ If you need a clean Metro session:
322
+
323
+ ```sh
324
+ cd example
325
+ npx expo start --dev-client --clear
326
+ ```
327
+
328
+ The current example app validates:
329
+
330
+ - speech text generation with `onnx-community/granite-4.0-1b-speech-ONNX`
331
+ - chat generation with `onnx-community/LFM2.5-350M-ONNX`
332
+ - requested execution-provider order and app-level retry / fallback reporting
333
+
334
+ ## Package exports
335
+
336
+ - `@automatalabs/react-native-transformers`
337
+ - runtime helpers like `getSupportedExecutionProviderNames()`
338
+ - cache helpers like `createExpoFileSystemCache()`
339
+ - `@automatalabs/react-native-transformers/metro`
340
+ - Metro helper
341
+ - `@automatalabs/react-native-transformers/plugin`
342
+ - Expo config plugin entrypoint
343
+ - `@automatalabs/react-native-transformers/transformers`
344
+ - explicit wrapper entrypoint
345
+ - `@automatalabs/react-native-transformers/app.plugin`
346
+ - root plugin file
package/app.plugin.js ADDED
@@ -0,0 +1 @@
1
+ module.exports = require('./plugin/src');
package/package.json ADDED
@@ -0,0 +1,72 @@
1
+ {
2
+ "name": "@automatalabs/react-native-transformers",
3
+ "version": "0.1.0",
4
+ "description": "Use @huggingface/transformers in Expo and React Native apps through onnxruntime-react-native.",
5
+ "license": "MIT",
6
+ "author": "Automata Labs",
7
+ "homepage": "https://github.com/VikashLoomba/react-native-transformers#readme",
8
+ "bugs": {
9
+ "url": "https://github.com/VikashLoomba/react-native-transformers/issues"
10
+ },
11
+ "repository": {
12
+ "type": "git",
13
+ "url": "git+https://github.com/VikashLoomba/react-native-transformers.git"
14
+ },
15
+ "publishConfig": {
16
+ "access": "public"
17
+ },
18
+ "engines": {
19
+ "node": ">=18"
20
+ },
21
+ "keywords": [
22
+ "expo",
23
+ "react-native",
24
+ "huggingface",
25
+ "transformers",
26
+ "onnxruntime",
27
+ "onnxruntime-react-native",
28
+ "mobile-ai",
29
+ "on-device-ai"
30
+ ],
31
+ "scripts": {
32
+ "check": "node --check app.plugin.js plugin/src/index.js src/index.js src/runtime.js src/expoFileSystemCache.js src/metro.js src/transformers.js src/adapter/onnxruntime-web-webgpu.js",
33
+ "prepublishOnly": "npm run check",
34
+ "example": "npm --prefix example run start",
35
+ "example:ios": "npm --prefix example run ios"
36
+ },
37
+ "main": "./src/index.js",
38
+ "react-native": "./src/index.js",
39
+ "exports": {
40
+ ".": "./src/index.js",
41
+ "./metro": "./src/metro.js",
42
+ "./plugin": "./plugin/src/index.js",
43
+ "./transformers": "./src/transformers.js",
44
+ "./adapter/onnxruntime-web-webgpu": "./src/adapter/onnxruntime-web-webgpu.js",
45
+ "./app.plugin": "./app.plugin.js",
46
+ "./package.json": "./package.json"
47
+ },
48
+ "files": [
49
+ "plugin",
50
+ "src",
51
+ "app.plugin.js",
52
+ "README.md",
53
+ "LICENSE"
54
+ ],
55
+ "peerDependencies": {
56
+ "@huggingface/transformers": "^4.0.0",
57
+ "expo": "*",
58
+ "expo-file-system": "*",
59
+ "onnxruntime-react-native": ">=1.24.3 <2",
60
+ "react": "*",
61
+ "react-native": "*"
62
+ },
63
+ "peerDependenciesMeta": {
64
+ "expo": {
65
+ "optional": true
66
+ },
67
+ "expo-file-system": {
68
+ "optional": true
69
+ }
70
+ },
71
+ "dependencies": {}
72
+ }
@@ -0,0 +1,13 @@
1
+ function requireFromProject(moduleId) {
2
+ return require(require.resolve(moduleId, { paths: [process.cwd(), __dirname] }));
3
+ }
4
+
5
+ function withReactNativeTransformers(config) {
6
+ const onnxruntimeReactNativePlugin = requireFromProject('onnxruntime-react-native/app.plugin');
7
+ const withOnnxruntimeReactNative =
8
+ onnxruntimeReactNativePlugin.default ?? onnxruntimeReactNativePlugin;
9
+
10
+ return withOnnxruntimeReactNative(config);
11
+ }
12
+
13
+ module.exports = withReactNativeTransformers;
@@ -0,0 +1,246 @@
1
+ const { NativeModules } = require('react-native');
2
+ const ortReactNative = require('onnxruntime-react-native');
3
+ const { sanitizeSessionOptions } = require('../runtime');
4
+
5
+ const Module = NativeModules?.Onnxruntime;
6
+
7
+ if (typeof globalThis.OrtApi === 'undefined' && typeof Module?.install === 'function') {
8
+ Module.install();
9
+ }
10
+
11
+ const OrtApi =
12
+ globalThis.OrtApi ??
13
+ new Proxy(
14
+ {},
15
+ {
16
+ get() {
17
+ throw new Error(
18
+ 'OrtApi is not initialized. Please make sure Onnxruntime installation is successful.',
19
+ );
20
+ },
21
+ },
22
+ );
23
+
24
+ const dataTypeStrings = [
25
+ undefined,
26
+ 'float32',
27
+ 'uint8',
28
+ 'int8',
29
+ 'uint16',
30
+ 'int16',
31
+ 'int32',
32
+ 'int64',
33
+ 'string',
34
+ 'bool',
35
+ 'float16',
36
+ 'float64',
37
+ 'uint32',
38
+ 'uint64',
39
+ undefined,
40
+ undefined,
41
+ undefined,
42
+ undefined,
43
+ undefined,
44
+ undefined,
45
+ undefined,
46
+ 'uint4',
47
+ 'int4',
48
+ ];
49
+
50
+ function fillNamesAndMetadata(rawMetadata = []) {
51
+ const names = [];
52
+ const metadata = [];
53
+
54
+ for (const item of rawMetadata) {
55
+ names.push(item.name);
56
+
57
+ if (!item.isTensor) {
58
+ metadata.push({
59
+ name: item.name,
60
+ isTensor: false,
61
+ });
62
+ continue;
63
+ }
64
+
65
+ const type = dataTypeStrings[item.type];
66
+ if (type === undefined) {
67
+ throw new Error(`Unsupported data type: ${item.type}`);
68
+ }
69
+
70
+ const shape = [];
71
+ for (let index = 0; index < item.shape.length; index += 1) {
72
+ const dim = item.shape[index];
73
+ if (dim === -1) {
74
+ shape.push(item.symbolicDimensions[index]);
75
+ } else if (dim >= 0) {
76
+ shape.push(dim);
77
+ } else {
78
+ throw new Error(`Invalid dimension: ${dim}`);
79
+ }
80
+ }
81
+
82
+ metadata.push({
83
+ name: item.name,
84
+ isTensor: true,
85
+ type,
86
+ shape,
87
+ });
88
+ }
89
+
90
+ return [names, metadata];
91
+ }
92
+
93
+ function getLogLevelValue(logLevel) {
94
+ switch (logLevel) {
95
+ case 'verbose':
96
+ return 0;
97
+ case 'info':
98
+ return 1;
99
+ case 'warning':
100
+ case undefined:
101
+ return 2;
102
+ case 'error':
103
+ return 3;
104
+ case 'fatal':
105
+ return 4;
106
+ default:
107
+ throw new Error(`Unsupported log level: ${logLevel}`);
108
+ }
109
+ }
110
+
111
+ function normalizeCreateArguments(args) {
112
+ const [arg0, arg1, arg2, arg3] = args;
113
+
114
+ if (typeof arg0 === 'string') {
115
+ if (arg1 !== undefined && (typeof arg1 !== 'object' || arg1 === null || Array.isArray(arg1))) {
116
+ throw new TypeError("'options' must be an object.");
117
+ }
118
+
119
+ return {
120
+ modelPath: arg0,
121
+ modelBytes: null,
122
+ options: arg1 ?? {},
123
+ };
124
+ }
125
+
126
+ if (arg0 instanceof Uint8Array) {
127
+ if (arg1 !== undefined && (typeof arg1 !== 'object' || arg1 === null || Array.isArray(arg1))) {
128
+ throw new TypeError("'options' must be an object.");
129
+ }
130
+
131
+ return {
132
+ modelPath: null,
133
+ modelBytes: arg0,
134
+ options: arg1 ?? {},
135
+ };
136
+ }
137
+
138
+ if (
139
+ arg0 instanceof ArrayBuffer ||
140
+ (typeof SharedArrayBuffer !== 'undefined' && arg0 instanceof SharedArrayBuffer)
141
+ ) {
142
+ let byteOffset = 0;
143
+ let byteLength = arg0.byteLength;
144
+ let options = {};
145
+
146
+ if (typeof arg1 === 'object' && arg1 !== null) {
147
+ options = arg1;
148
+ } else if (typeof arg1 === 'number') {
149
+ byteOffset = arg1;
150
+ byteLength = typeof arg2 === 'number' ? arg2 : arg0.byteLength - byteOffset;
151
+ options = typeof arg3 === 'object' && arg3 !== null ? arg3 : {};
152
+ } else if (arg1 !== undefined) {
153
+ throw new TypeError("'options' must be an object.");
154
+ }
155
+
156
+ return {
157
+ modelPath: null,
158
+ modelBytes: new Uint8Array(arg0, byteOffset, byteLength),
159
+ options,
160
+ };
161
+ }
162
+
163
+ throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");
164
+ }
165
+
166
+ function toExactArrayBuffer(uint8Array) {
167
+ if (uint8Array.byteOffset === 0 && uint8Array.byteLength === uint8Array.buffer.byteLength) {
168
+ return uint8Array.buffer;
169
+ }
170
+
171
+ return uint8Array.buffer.slice(
172
+ uint8Array.byteOffset,
173
+ uint8Array.byteOffset + uint8Array.byteLength,
174
+ );
175
+ }
176
+
177
+ class ReactNativeSessionHandler {
178
+ #inferenceSession;
179
+
180
+ constructor(session) {
181
+ this.#inferenceSession = session;
182
+
183
+ const [inputNames, inputMetadata] = fillNamesAndMetadata(session.inputMetadata);
184
+ const [outputNames, outputMetadata] = fillNamesAndMetadata(session.outputMetadata);
185
+
186
+ this.inputNames = inputNames;
187
+ this.outputNames = outputNames;
188
+ this.inputMetadata = inputMetadata;
189
+ this.outputMetadata = outputMetadata;
190
+ }
191
+
192
+ async run(feeds, fetches, options) {
193
+ return this.#inferenceSession.run(feeds, fetches, options);
194
+ }
195
+
196
+ async dispose() {
197
+ this.#inferenceSession.dispose();
198
+ }
199
+
200
+ async release() {
201
+ this.#inferenceSession.dispose();
202
+ }
203
+
204
+ startProfiling() {
205
+ // no-op; profiling is enabled at load time by session options if requested
206
+ }
207
+
208
+ endProfiling() {
209
+ return this.#inferenceSession.endProfiling();
210
+ }
211
+ }
212
+
213
+ class PatchedInferenceSession extends ortReactNative.InferenceSession {
214
+ static #initialized = false;
215
+
216
+ static async create(...args) {
217
+ const { modelPath, modelBytes, options } = normalizeCreateArguments(args);
218
+ const sessionOptions = sanitizeSessionOptions({
219
+ ...options,
220
+ ortExtLibPath: options?.ortExtLibPath ?? Module?.ORT_EXTENSIONS_PATH,
221
+ });
222
+
223
+ if (!PatchedInferenceSession.#initialized) {
224
+ PatchedInferenceSession.#initialized = true;
225
+ OrtApi.initOrtOnce(getLogLevelValue(ortReactNative.env.logLevel), ortReactNative.Tensor);
226
+ }
227
+
228
+ const session = OrtApi.createInferenceSession();
229
+
230
+ if (typeof modelPath === 'string') {
231
+ await session.loadModel(modelPath, sessionOptions);
232
+ } else {
233
+ await session.loadModel(toExactArrayBuffer(modelBytes), sessionOptions);
234
+ }
235
+
236
+ return new PatchedInferenceSession(new ReactNativeSessionHandler(session));
237
+ }
238
+ }
239
+
240
+ module.exports = {
241
+ ...ortReactNative,
242
+ InferenceSession: PatchedInferenceSession,
243
+ Tensor: ortReactNative.Tensor,
244
+ env: ortReactNative.env,
245
+ listSupportedBackends: OrtApi.listSupportedBackends ?? ortReactNative.listSupportedBackends,
246
+ };