react-native-nitro-mlx 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/MLXReactNative.podspec +42 -0
  2. package/ios/Bridge.h +8 -0
  3. package/ios/Sources/MLXReactNative.h +16 -0
  4. package/lib/module/index.js +6 -0
  5. package/lib/module/index.js.map +1 -0
  6. package/lib/module/llm.js +105 -0
  7. package/lib/module/llm.js.map +1 -0
  8. package/lib/module/modelManager.js +79 -0
  9. package/lib/module/modelManager.js.map +1 -0
  10. package/lib/module/models.js +41 -0
  11. package/lib/module/models.js.map +1 -0
  12. package/lib/module/package.json +1 -0
  13. package/lib/module/specs/LLM.nitro.js +4 -0
  14. package/lib/module/specs/LLM.nitro.js.map +1 -0
  15. package/lib/module/specs/ModelManager.nitro.js +4 -0
  16. package/lib/module/specs/ModelManager.nitro.js.map +1 -0
  17. package/lib/typescript/package.json +1 -0
  18. package/lib/typescript/src/index.d.ts +6 -0
  19. package/lib/typescript/src/index.d.ts.map +1 -0
  20. package/lib/typescript/src/llm.d.ts +69 -0
  21. package/lib/typescript/src/llm.d.ts.map +1 -0
  22. package/lib/typescript/src/modelManager.d.ts +53 -0
  23. package/lib/typescript/src/modelManager.d.ts.map +1 -0
  24. package/lib/typescript/src/models.d.ts +29 -0
  25. package/lib/typescript/src/models.d.ts.map +1 -0
  26. package/lib/typescript/src/specs/LLM.nitro.d.ts +61 -0
  27. package/lib/typescript/src/specs/LLM.nitro.d.ts.map +1 -0
  28. package/lib/typescript/src/specs/ModelManager.nitro.d.ts +41 -0
  29. package/lib/typescript/src/specs/ModelManager.nitro.d.ts.map +1 -0
  30. package/nitrogen/generated/.gitattributes +1 -0
  31. package/nitrogen/generated/ios/MLXReactNative+autolinking.rb +60 -0
  32. package/nitrogen/generated/ios/MLXReactNative-Swift-Cxx-Bridge.cpp +98 -0
  33. package/nitrogen/generated/ios/MLXReactNative-Swift-Cxx-Bridge.hpp +312 -0
  34. package/nitrogen/generated/ios/MLXReactNative-Swift-Cxx-Umbrella.hpp +55 -0
  35. package/nitrogen/generated/ios/MLXReactNativeAutolinking.mm +41 -0
  36. package/nitrogen/generated/ios/MLXReactNativeAutolinking.swift +40 -0
  37. package/nitrogen/generated/ios/c++/HybridLLMSpecSwift.cpp +11 -0
  38. package/nitrogen/generated/ios/c++/HybridLLMSpecSwift.hpp +132 -0
  39. package/nitrogen/generated/ios/c++/HybridModelManagerSpecSwift.cpp +11 -0
  40. package/nitrogen/generated/ios/c++/HybridModelManagerSpecSwift.hpp +116 -0
  41. package/nitrogen/generated/ios/swift/Func_void.swift +47 -0
  42. package/nitrogen/generated/ios/swift/Func_void_bool.swift +47 -0
  43. package/nitrogen/generated/ios/swift/Func_void_double.swift +47 -0
  44. package/nitrogen/generated/ios/swift/Func_void_std__exception_ptr.swift +47 -0
  45. package/nitrogen/generated/ios/swift/Func_void_std__string.swift +47 -0
  46. package/nitrogen/generated/ios/swift/Func_void_std__vector_std__string_.swift +47 -0
  47. package/nitrogen/generated/ios/swift/GenerationStats.swift +69 -0
  48. package/nitrogen/generated/ios/swift/HybridLLMSpec.swift +64 -0
  49. package/nitrogen/generated/ios/swift/HybridLLMSpec_cxx.swift +250 -0
  50. package/nitrogen/generated/ios/swift/HybridModelManagerSpec.swift +60 -0
  51. package/nitrogen/generated/ios/swift/HybridModelManagerSpec_cxx.swift +234 -0
  52. package/nitrogen/generated/shared/c++/GenerationStats.hpp +87 -0
  53. package/nitrogen/generated/shared/c++/HybridLLMSpec.cpp +32 -0
  54. package/nitrogen/generated/shared/c++/HybridLLMSpec.hpp +76 -0
  55. package/nitrogen/generated/shared/c++/HybridModelManagerSpec.cpp +27 -0
  56. package/nitrogen/generated/shared/c++/HybridModelManagerSpec.hpp +70 -0
  57. package/package.json +96 -0
  58. package/src/index.ts +6 -0
  59. package/src/llm.ts +116 -0
  60. package/src/modelManager.ts +88 -0
  61. package/src/models.ts +45 -0
  62. package/src/specs/LLM.nitro.ts +66 -0
  63. package/src/specs/ModelManager.nitro.ts +44 -0
@@ -0,0 +1,42 @@
1
+ require "json"
2
+
3
+ package = JSON.parse(File.read(File.join(__dir__, "package.json")))
4
+
5
+ Pod::Spec.new do |s|
6
+ s.name = "MLXReactNative"
7
+ s.version = package["version"]
8
+ s.summary = package["description"]
9
+ s.homepage = package["homepage"]
10
+ s.license = package["license"]
11
+ s.authors = package["author"]
12
+
13
+ s.platforms = { :ios => 26.0, :visionos => 1.0 }
14
+ s.source = { :git => "https://github.com/corasan/react-native-nitro-mlx.git", :tag => "#{s.version}" }
15
+
16
+ s.source_files = [
17
+ # Implementation (Swift)
18
+ "ios/Sources/**/*.{swift}",
19
+ # Autolinking/Registration (Objective-C++)
20
+ "ios/**/*.{m,mm}",
21
+ # Implementation (C++ objects)
22
+ "cpp/**/*.{hpp,cpp}",
23
+ ]
24
+
25
+ spm_dependency(s,
26
+ url: "https://github.com/ml-explore/mlx-swift-lm.git",
27
+ requirement: {kind: "upToNextMinorVersion", minimumVersion: "2.29.2"},
28
+ products: ["MLXLLM", "MLXLMCommon"]
29
+ )
30
+
31
+ s.pod_target_xcconfig = {
32
+ # C++ compiler flags, mainly for folly.
33
+ "GCC_PREPROCESSOR_DEFINITIONS" => "$(inherited) FOLLY_NO_CONFIG FOLLY_CFG_NO_COROUTINES"
34
+ }
35
+
36
+ load 'nitrogen/generated/ios/MLXReactNative+autolinking.rb'
37
+ add_nitrogen_files(s)
38
+
39
+ s.dependency 'React-jsi'
40
+ s.dependency 'React-callinvoker'
41
+ install_modules_dependencies(s)
42
+ end
package/ios/Bridge.h ADDED
@@ -0,0 +1,8 @@
1
+ //
2
+ // Bridge.h
3
+ // mlx
4
+ //
5
+ // Created by Henry Paulino on 2/20/2025
6
+ //
7
+
8
+ #pragma once
@@ -0,0 +1,16 @@
1
+ //
2
+ // MLXReactNative.h
3
+ // MLXReactNative
4
+ //
5
+ // Created by Henry on 2/20/25.
6
+ //
7
+
8
+ #import <Foundation/Foundation.h>
9
+
10
+ //! Project version number for MLXReactNative.
11
+ FOUNDATION_EXPORT double MLXReactNativeVersionNumber;
12
+
13
+ //! Project version string for MLXReactNative.
14
+ FOUNDATION_EXPORT const unsigned char MLXReactNativeVersionString[];
15
+
16
+ // In this header, you should import all the public headers of your framework using statements like #import <MLXReactNative/PublicHeader.h>
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+
3
+ export { LLM } from "./llm.js";
4
+ export { ModelManager } from "./modelManager.js";
5
+ export { MLXModel } from "./models.js";
6
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["LLM","ModelManager","MLXModel"],"sourceRoot":"../../src","sources":["index.ts"],"mappings":";;AAAA,SAASA,GAAG,QAAQ,UAAO;AAC3B,SAASC,YAAY,QAAQ,mBAAgB;AAC7C,SAASC,QAAQ,QAAQ,aAAU","ignoreList":[]}
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+
3
+ import { NitroModules } from 'react-native-nitro-modules';
4
+ let instance = null;
5
+ function getInstance() {
6
+ if (!instance) {
7
+ instance = NitroModules.createHybridObject('LLM');
8
+ }
9
+ return instance;
10
+ }
11
+
12
+ /**
13
+ * LLM text generation using MLX on Apple Silicon.
14
+ *
15
+ * @example
16
+ * ```ts
17
+ * import { LLM } from 'react-native-nitro-mlx'
18
+ *
19
+ * // Load a model
20
+ * await LLM.load('mlx-community/Qwen3-0.6B-4bit', progress => {
21
+ * console.log(`Loading: ${(progress * 100).toFixed(0)}%`)
22
+ * })
23
+ *
24
+ * // Stream a response
25
+ * await LLM.stream('Hello!', token => {
26
+ * process.stdout.write(token)
27
+ * })
28
+ *
29
+ * // Get generation stats
30
+ * const stats = LLM.getLastGenerationStats()
31
+ * console.log(`${stats.tokensPerSecond} tokens/sec`)
32
+ * ```
33
+ */
34
+ export const LLM = {
35
+ /**
36
+ * Load a model into memory. Downloads the model from HuggingFace if not already cached.
37
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
38
+ * @param onProgress - Callback invoked with loading progress (0-1)
39
+ */
40
+ load(modelId, onProgress) {
41
+ return getInstance().load(modelId, onProgress);
42
+ },
43
+ /**
44
+ * Generate a complete response for a prompt. Blocks until generation is complete.
45
+ * For streaming responses, use `stream()` instead.
46
+ * @param prompt - The input text to generate a response for
47
+ * @returns The complete generated text
48
+ */
49
+ generate(prompt) {
50
+ return getInstance().generate(prompt);
51
+ },
52
+ /**
53
+ * Stream a response token by token.
54
+ * @param prompt - The input text to generate a response for
55
+ * @param onToken - Callback invoked for each generated token
56
+ * @returns The complete generated text
57
+ */
58
+ stream(prompt, onToken) {
59
+ return getInstance().stream(prompt, onToken);
60
+ },
61
+ /**
62
+ * Stop the current generation. Safe to call even if not generating.
63
+ */
64
+ stop() {
65
+ getInstance().stop();
66
+ },
67
+ /**
68
+ * Get statistics from the last generation.
69
+ * @returns Statistics including token count, tokens/sec, TTFT, and total time
70
+ */
71
+ getLastGenerationStats() {
72
+ return getInstance().getLastGenerationStats();
73
+ },
74
+ /** Whether a model is currently loaded and ready for generation */
75
+ get isLoaded() {
76
+ return getInstance().isLoaded;
77
+ },
78
+ /** Whether text is currently being generated */
79
+ get isGenerating() {
80
+ return getInstance().isGenerating;
81
+ },
82
+ /** The ID of the currently loaded model, or empty string if none */
83
+ get modelId() {
84
+ return getInstance().modelId;
85
+ },
86
+ /** Enable debug logging to console */
87
+ get debug() {
88
+ return getInstance().debug;
89
+ },
90
+ set debug(value) {
91
+ getInstance().debug = value;
92
+ },
93
+ /**
94
+ * System prompt used when loading the model.
95
+ * Set this before calling `load()`. Changes require reloading the model.
96
+ * @default "You are a helpful assistant."
97
+ */
98
+ get systemPrompt() {
99
+ return getInstance().systemPrompt;
100
+ },
101
+ set systemPrompt(value) {
102
+ getInstance().systemPrompt = value;
103
+ }
104
+ };
105
+ //# sourceMappingURL=llm.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["NitroModules","instance","getInstance","createHybridObject","LLM","load","modelId","onProgress","generate","prompt","stream","onToken","stop","getLastGenerationStats","isLoaded","isGenerating","debug","value","systemPrompt"],"sourceRoot":"../../src","sources":["llm.ts"],"mappings":";;AAAA,SAASA,YAAY,QAAQ,4BAA4B;AAGzD,IAAIC,QAAwB,GAAG,IAAI;AAEnC,SAASC,WAAWA,CAAA,EAAY;EAC9B,IAAI,CAACD,QAAQ,EAAE;IACbA,QAAQ,GAAGD,YAAY,CAACG,kBAAkB,CAAU,KAAK,CAAC;EAC5D;EACA,OAAOF,QAAQ;AACjB;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,MAAMG,GAAG,GAAG;EACjB;AACF;AACA;AACA;AACA;EACEC,IAAIA,CAACC,OAAe,EAAEC,UAAsC,EAAiB;IAC3E,OAAOL,WAAW,CAAC,CAAC,CAACG,IAAI,CAACC,OAAO,EAAEC,UAAU,CAAC;EAChD,CAAC;EAED;AACF;AACA;AACA;AACA;AACA;EACEC,QAAQA,CAACC,MAAc,EAAmB;IACxC,OAAOP,WAAW,CAAC,CAAC,CAACM,QAAQ,CAACC,MAAM,CAAC;EACvC,CAAC;EAED;AACF;AACA;AACA;AACA;AACA;EACEC,MAAMA,CAACD,MAAc,EAAEE,OAAgC,EAAmB;IACxE,OAAOT,WAAW,CAAC,CAAC,CAACQ,MAAM,CAACD,MAAM,EAAEE,OAAO,CAAC;EAC9C,CAAC;EAED;AACF;AACA;EACEC,IAAIA,CAAA,EAAS;IACXV,WAAW,CAAC,CAAC,CAACU,IAAI,CAAC,CAAC;EACtB,CAAC;EAED;AACF;AACA;AACA;EACEC,sBAAsBA,CAAA,EAAoB;IACxC,OAAOX,WAAW,CAAC,CAAC,CAACW,sBAAsB,CAAC,CAAC;EAC/C,CAAC;EAED;EACA,IAAIC,QAAQA,CAAA,EAAY;IACtB,OAAOZ,WAAW,CAAC,CAAC,CAACY,QAAQ;EAC/B,CAAC;EAED;EACA,IAAIC,YAAYA,CAAA,EAAY;IAC1B,OAAOb,WAAW,CAAC,CAAC,CAACa,YAAY;EACnC,CAAC;EAED;EACA,IAAIT,OAAOA,CAAA,EAAW;IACpB,OAAOJ,WAAW,CAAC,CAAC,CAACI,OAAO;EAC9B,CAAC;EAED;EACA,IAAIU,KAAKA,CAAA,EAAY;IACnB,OAAOd,WAAW,CAAC,CAAC,CAACc,KAAK;EAC5B,CAAC;EAED,IAAIA,KAAKA,CAACC,KAAc,EAAE;IACxBf,WAAW,CAAC,CAAC,CAACc,KAAK,GAAGC,KAAK;EAC7B,CAAC;EAED;AACF;AACA;AACA;AACA;EACE,IAAIC,YAAYA,CAAA,EAAW;IACzB,OAAOhB,WAAW,CAAC,CAAC,CAACgB,YAAY;EACnC,CAAC;EAED,IAAIA,YAAYA,CAACD,KAAa,EAAE;IAC9Bf,WAAW,CAAC,CAAC,CAACgB,YAAY,GAAGD,KAAK;EACpC;AACF,CAAC","ignoreList":[]}
@@ -0,0 +1,79 @@
1
+ "use strict";
2
+
3
+ import { NitroModules } from 'react-native-nitro-modules';
4
+ let instance = null;
5
+ function getInstance() {
6
+ if (!instance) {
7
+ instance = NitroModules.createHybridObject('ModelManager');
8
+ }
9
+ return instance;
10
+ }
11
+
12
+ /**
13
+ * Manage MLX model downloads from HuggingFace.
14
+ *
15
+ * @example
16
+ * ```ts
17
+ * import { ModelManager } from 'react-native-nitro-mlx'
18
+ *
19
+ * // Download a model
20
+ * await ModelManager.download('mlx-community/Qwen3-0.6B-4bit', progress => {
21
+ * console.log(`Downloading: ${(progress * 100).toFixed(0)}%`)
22
+ * })
23
+ *
24
+ * // Check if downloaded
25
+ * const isReady = await ModelManager.isDownloaded('mlx-community/Qwen3-0.6B-4bit')
26
+ *
27
+ * // List all downloaded models
28
+ * const models = await ModelManager.getDownloadedModels()
29
+ * ```
30
+ */
31
+ export const ModelManager = {
32
+ /**
33
+ * Download a model from HuggingFace.
34
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
35
+ * @param progressCallback - Callback invoked with download progress (0-1)
36
+ * @returns Path to the downloaded model directory
37
+ */
38
+ download(modelId, progressCallback) {
39
+ return getInstance().download(modelId, progressCallback);
40
+ },
41
+ /**
42
+ * Check if a model is already downloaded.
43
+ * @param modelId - HuggingFace model ID
44
+ * @returns True if the model is fully downloaded
45
+ */
46
+ isDownloaded(modelId) {
47
+ return getInstance().isDownloaded(modelId);
48
+ },
49
+ /**
50
+ * Get a list of all downloaded model IDs.
51
+ * @returns Array of model IDs that are available locally
52
+ */
53
+ getDownloadedModels() {
54
+ return getInstance().getDownloadedModels();
55
+ },
56
+ /**
57
+ * Delete a downloaded model to free up disk space.
58
+ * @param modelId - HuggingFace model ID
59
+ */
60
+ deleteModel(modelId) {
61
+ return getInstance().deleteModel(modelId);
62
+ },
63
+ /**
64
+ * Get the local filesystem path for a downloaded model.
65
+ * @param modelId - HuggingFace model ID
66
+ * @returns Absolute path to the model directory
67
+ */
68
+ getModelPath(modelId) {
69
+ return getInstance().getModelPath(modelId);
70
+ },
71
+ /** Enable debug logging to console */
72
+ get debug() {
73
+ return getInstance().debug;
74
+ },
75
+ set debug(value) {
76
+ getInstance().debug = value;
77
+ }
78
+ };
79
+ //# sourceMappingURL=modelManager.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["NitroModules","instance","getInstance","createHybridObject","ModelManager","download","modelId","progressCallback","isDownloaded","getDownloadedModels","deleteModel","getModelPath","debug","value"],"sourceRoot":"../../src","sources":["modelManager.ts"],"mappings":";;AAAA,SAASA,YAAY,QAAQ,4BAA4B;AAGzD,IAAIC,QAAiC,GAAG,IAAI;AAE5C,SAASC,WAAWA,CAAA,EAAqB;EACvC,IAAI,CAACD,QAAQ,EAAE;IACbA,QAAQ,GAAGD,YAAY,CAACG,kBAAkB,CAAmB,cAAc,CAAC;EAC9E;EACA,OAAOF,QAAQ;AACjB;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,MAAMG,YAAY,GAAG;EAC1B;AACF;AACA;AACA;AACA;AACA;EACEC,QAAQA,CACNC,OAAe,EACfC,gBAA4C,EAC3B;IACjB,OAAOL,WAAW,CAAC,CAAC,CAACG,QAAQ,CAACC,OAAO,EAAEC,gBAAgB,CAAC;EAC1D,CAAC;EAED;AACF;AACA;AACA;AACA;EACEC,YAAYA,CAACF,OAAe,EAAoB;IAC9C,OAAOJ,WAAW,CAAC,CAAC,CAACM,YAAY,CAACF,OAAO,CAAC;EAC5C,CAAC;EAED;AACF;AACA;AACA;EACEG,mBAAmBA,CAAA,EAAsB;IACvC,OAAOP,WAAW,CAAC,CAAC,CAACO,mBAAmB,CAAC,CAAC;EAC5C,CAAC;EAED;AACF;AACA;AACA;EACEC,WAAWA,CAACJ,OAAe,EAAiB;IAC1C,OAAOJ,WAAW,CAAC,CAAC,CAACQ,WAAW,CAACJ,OAAO,CAAC;EAC3C,CAAC;EAED;AACF;AACA;AACA;AACA;EACEK,YAAYA,CAACL,OAAe,EAAmB;IAC7C,OAAOJ,WAAW,CAAC,CAAC,CAACS,YAAY,CAACL,OAAO,CAAC;EAC5C,CAAC;EAED;EACA,IAAIM,KAAKA,CAAA,EAAY;IACnB,OAAOV,WAAW,CAAC,CAAC,CAACU,KAAK;EAC5B,CAAC;EAED,IAAIA,KAAKA,CAACC,KAAc,EAAE;IACxBX,WAAW,CAAC,CAAC,CAACU,KAAK,GAAGC,KAAK;EAC7B;AACF,CAAC","ignoreList":[]}
@@ -0,0 +1,41 @@
1
+ "use strict";
2
+
3
+ export let MLXModel = /*#__PURE__*/function (MLXModel) {
4
+ // Llama 3.2 (Meta) - 1B and 3B variants
5
+ MLXModel["Llama_3_2_1B_Instruct_4bit"] = "mlx-community/Llama-3.2-1B-Instruct-4bit";
6
+ MLXModel["Llama_3_2_1B_Instruct_8bit"] = "mlx-community/Llama-3.2-1B-Instruct-8bit";
7
+ MLXModel["Llama_3_2_3B_Instruct_4bit"] = "mlx-community/Llama-3.2-3B-Instruct-4bit";
8
+ MLXModel["Llama_3_2_3B_Instruct_8bit"] = "mlx-community/Llama-3.2-3B-Instruct-8bit";
9
+ // Qwen 2.5 (Alibaba) - 0.5B, 1.5B, 3B variants
10
+ MLXModel["Qwen2_5_0_5B_Instruct_4bit"] = "mlx-community/Qwen2.5-0.5B-Instruct-4bit";
11
+ MLXModel["Qwen2_5_0_5B_Instruct_8bit"] = "mlx-community/Qwen2.5-0.5B-Instruct-8bit";
12
+ MLXModel["Qwen2_5_1_5B_Instruct_4bit"] = "mlx-community/Qwen2.5-1.5B-Instruct-4bit";
13
+ MLXModel["Qwen2_5_1_5B_Instruct_8bit"] = "mlx-community/Qwen2.5-1.5B-Instruct-8bit";
14
+ MLXModel["Qwen2_5_3B_Instruct_4bit"] = "mlx-community/Qwen2.5-3B-Instruct-4bit";
15
+ MLXModel["Qwen2_5_3B_Instruct_8bit"] = "mlx-community/Qwen2.5-3B-Instruct-8bit";
16
+ // Qwen 3 - 1.7B variant
17
+ MLXModel["Qwen3_1_7B_4bit"] = "mlx-community/Qwen3-1.7B-4bit";
18
+ MLXModel["Qwen3_1_7B_8bit"] = "mlx-community/Qwen3-1.7B-8bit";
19
+ // Gemma 3 (Google) - 1B variant
20
+ MLXModel["Gemma_3_1B_IT_4bit"] = "mlx-community/gemma-3-1b-it-4bit";
21
+ MLXModel["Gemma_3_1B_IT_8bit"] = "mlx-community/gemma-3-1b-it-8bit";
22
+ // Phi 3.5 Mini (Microsoft) - ~3.8B but runs well on mobile
23
+ MLXModel["Phi_3_5_Mini_Instruct_4bit"] = "mlx-community/Phi-3.5-mini-instruct-4bit";
24
+ MLXModel["Phi_3_5_Mini_Instruct_8bit"] = "mlx-community/Phi-3.5-mini-instruct-8bit";
25
+ // Phi 4 Mini (Microsoft)
26
+ MLXModel["Phi_4_Mini_Instruct_4bit"] = "mlx-community/Phi-4-mini-instruct-4bit";
27
+ MLXModel["Phi_4_Mini_Instruct_8bit"] = "mlx-community/Phi-4-mini-instruct-8bit";
28
+ // SmolLM (HuggingFace) - 1.7B
29
+ MLXModel["SmolLM_1_7B_Instruct_4bit"] = "mlx-community/SmolLM-1.7B-Instruct-4bit";
30
+ MLXModel["SmolLM_1_7B_Instruct_8bit"] = "mlx-community/SmolLM-1.7B-Instruct-8bit";
31
+ // SmolLM2 (HuggingFace) - 1.7B
32
+ MLXModel["SmolLM2_1_7B_Instruct_4bit"] = "mlx-community/SmolLM2-1.7B-Instruct-4bit";
33
+ MLXModel["SmolLM2_1_7B_Instruct_8bit"] = "mlx-community/SmolLM2-1.7B-Instruct-8bit";
34
+ // OpenELM (Apple) - 1.1B and 3B
35
+ MLXModel["OpenELM_1_1B_4bit"] = "mlx-community/OpenELM-1_1B-4bit";
36
+ MLXModel["OpenELM_1_1B_8bit"] = "mlx-community/OpenELM-1_1B-8bit";
37
+ MLXModel["OpenELM_3B_4bit"] = "mlx-community/OpenELM-3B-4bit";
38
+ MLXModel["OpenELM_3B_8bit"] = "mlx-community/OpenELM-3B-8bit";
39
+ return MLXModel;
40
+ }({});
41
+ //# sourceMappingURL=models.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["MLXModel"],"sourceRoot":"../../src","sources":["models.ts"],"mappings":";;AAAA,WAAYA,QAAQ,0BAARA,QAAQ;EAClB;EADUA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAOlB;EAPUA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAelB;EAfUA,QAAQ;EAARA,QAAQ;EAmBlB;EAnBUA,QAAQ;EAARA,QAAQ;EAuBlB;EAvBUA,QAAQ;EAARA,QAAQ;EA2BlB;EA3BUA,QAAQ;EAARA,QAAQ;EA+BlB;EA/BUA,QAAQ;EAARA,QAAQ;EAmClB;EAnCUA,QAAQ;EAARA,QAAQ;EAuClB;EAvCUA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAARA,QAAQ;EAAA,OAARA,QAAQ;AAAA","ignoreList":[]}
@@ -0,0 +1 @@
1
+ {"type":"module"}
@@ -0,0 +1,4 @@
1
+ "use strict";
2
+
3
+ export {};
4
+ //# sourceMappingURL=LLM.nitro.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":[],"sourceRoot":"../../../src","sources":["specs/LLM.nitro.ts"],"mappings":"","ignoreList":[]}
@@ -0,0 +1,4 @@
1
+ "use strict";
2
+
3
+ export {};
4
+ //# sourceMappingURL=ModelManager.nitro.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":[],"sourceRoot":"../../../src","sources":["specs/ModelManager.nitro.ts"],"mappings":"","ignoreList":[]}
@@ -0,0 +1 @@
1
+ {"type":"module"}
@@ -0,0 +1,6 @@
1
+ export { LLM } from './llm';
2
+ export { ModelManager } from './modelManager';
3
+ export { MLXModel } from './models';
4
+ export type { GenerationStats, LLM as LLMSpec } from './specs/LLM.nitro';
5
+ export type { ModelManager as ModelManagerSpec } from './specs/ModelManager.nitro';
6
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,GAAG,EAAE,MAAM,OAAO,CAAA;AAC3B,OAAO,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAA;AAC7C,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AAEnC,YAAY,EAAE,eAAe,EAAE,GAAG,IAAI,OAAO,EAAE,MAAM,mBAAmB,CAAA;AACxE,YAAY,EAAE,YAAY,IAAI,gBAAgB,EAAE,MAAM,4BAA4B,CAAA"}
@@ -0,0 +1,69 @@
1
+ import type { GenerationStats } from './specs/LLM.nitro';
2
+ /**
3
+ * LLM text generation using MLX on Apple Silicon.
4
+ *
5
+ * @example
6
+ * ```ts
7
+ * import { LLM } from 'react-native-nitro-mlx'
8
+ *
9
+ * // Load a model
10
+ * await LLM.load('mlx-community/Qwen3-0.6B-4bit', progress => {
11
+ * console.log(`Loading: ${(progress * 100).toFixed(0)}%`)
12
+ * })
13
+ *
14
+ * // Stream a response
15
+ * await LLM.stream('Hello!', token => {
16
+ * process.stdout.write(token)
17
+ * })
18
+ *
19
+ * // Get generation stats
20
+ * const stats = LLM.getLastGenerationStats()
21
+ * console.log(`${stats.tokensPerSecond} tokens/sec`)
22
+ * ```
23
+ */
24
+ export declare const LLM: {
25
+ /**
26
+ * Load a model into memory. Downloads the model from HuggingFace if not already cached.
27
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
28
+ * @param onProgress - Callback invoked with loading progress (0-1)
29
+ */
30
+ load(modelId: string, onProgress: (progress: number) => void): Promise<void>;
31
+ /**
32
+ * Generate a complete response for a prompt. Blocks until generation is complete.
33
+ * For streaming responses, use `stream()` instead.
34
+ * @param prompt - The input text to generate a response for
35
+ * @returns The complete generated text
36
+ */
37
+ generate(prompt: string): Promise<string>;
38
+ /**
39
+ * Stream a response token by token.
40
+ * @param prompt - The input text to generate a response for
41
+ * @param onToken - Callback invoked for each generated token
42
+ * @returns The complete generated text
43
+ */
44
+ stream(prompt: string, onToken: (token: string) => void): Promise<string>;
45
+ /**
46
+ * Stop the current generation. Safe to call even if not generating.
47
+ */
48
+ stop(): void;
49
+ /**
50
+ * Get statistics from the last generation.
51
+ * @returns Statistics including token count, tokens/sec, TTFT, and total time
52
+ */
53
+ getLastGenerationStats(): GenerationStats;
54
+ /** Whether a model is currently loaded and ready for generation */
55
+ readonly isLoaded: boolean;
56
+ /** Whether text is currently being generated */
57
+ readonly isGenerating: boolean;
58
+ /** The ID of the currently loaded model, or empty string if none */
59
+ readonly modelId: string;
60
+ /** Enable debug logging to console */
61
+ debug: boolean;
62
+ /**
63
+ * System prompt used when loading the model.
64
+ * Set this before calling `load()`. Changes require reloading the model.
65
+ * @default "You are a helpful assistant."
66
+ */
67
+ systemPrompt: string;
68
+ };
69
+ //# sourceMappingURL=llm.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../../src/llm.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,eAAe,EAAkB,MAAM,mBAAmB,CAAA;AAWxE;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,eAAO,MAAM,GAAG;IACd;;;;OAIG;kBACW,MAAM,cAAc,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAI5E;;;;;OAKG;qBACc,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAIzC;;;;;OAKG;mBACY,MAAM,WAAW,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,MAAM,CAAC;IAIzE;;OAEG;YACK,IAAI;IAIZ;;;OAGG;8BACuB,eAAe;IAIzC,mEAAmE;uBACnD,OAAO;IAIvB,gDAAgD;2BAC5B,OAAO;IAI3B,oEAAoE;sBACrD,MAAM;IAIrB,sCAAsC;WACzB,OAAO;IAQpB;;;;OAIG;kBACiB,MAAM;CAO3B,CAAA"}
@@ -0,0 +1,53 @@
1
+ /**
2
+ * Manage MLX model downloads from HuggingFace.
3
+ *
4
+ * @example
5
+ * ```ts
6
+ * import { ModelManager } from 'react-native-nitro-mlx'
7
+ *
8
+ * // Download a model
9
+ * await ModelManager.download('mlx-community/Qwen3-0.6B-4bit', progress => {
10
+ * console.log(`Downloading: ${(progress * 100).toFixed(0)}%`)
11
+ * })
12
+ *
13
+ * // Check if downloaded
14
+ * const isReady = await ModelManager.isDownloaded('mlx-community/Qwen3-0.6B-4bit')
15
+ *
16
+ * // List all downloaded models
17
+ * const models = await ModelManager.getDownloadedModels()
18
+ * ```
19
+ */
20
+ export declare const ModelManager: {
21
+ /**
22
+ * Download a model from HuggingFace.
23
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
24
+ * @param progressCallback - Callback invoked with download progress (0-1)
25
+ * @returns Path to the downloaded model directory
26
+ */
27
+ download(modelId: string, progressCallback: (progress: number) => void): Promise<string>;
28
+ /**
29
+ * Check if a model is already downloaded.
30
+ * @param modelId - HuggingFace model ID
31
+ * @returns True if the model is fully downloaded
32
+ */
33
+ isDownloaded(modelId: string): Promise<boolean>;
34
+ /**
35
+ * Get a list of all downloaded model IDs.
36
+ * @returns Array of model IDs that are available locally
37
+ */
38
+ getDownloadedModels(): Promise<string[]>;
39
+ /**
40
+ * Delete a downloaded model to free up disk space.
41
+ * @param modelId - HuggingFace model ID
42
+ */
43
+ deleteModel(modelId: string): Promise<void>;
44
+ /**
45
+ * Get the local filesystem path for a downloaded model.
46
+ * @param modelId - HuggingFace model ID
47
+ * @returns Absolute path to the model directory
48
+ */
49
+ getModelPath(modelId: string): Promise<string>;
50
+ /** Enable debug logging to console */
51
+ debug: boolean;
52
+ };
53
+ //# sourceMappingURL=modelManager.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"modelManager.d.ts","sourceRoot":"","sources":["../../../src/modelManager.ts"],"names":[],"mappings":"AAYA;;;;;;;;;;;;;;;;;;GAkBG;AACH,eAAO,MAAM,YAAY;IACvB;;;;;OAKG;sBAEQ,MAAM,oBACG,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,GAC3C,OAAO,CAAC,MAAM,CAAC;IAIlB;;;;OAIG;0BACmB,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC;IAI/C;;;OAGG;2BACoB,OAAO,CAAC,MAAM,EAAE,CAAC;IAIxC;;;OAGG;yBACkB,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAI3C;;;;OAIG;0BACmB,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAI9C,sCAAsC;WACzB,OAAO;CAOrB,CAAA"}
@@ -0,0 +1,29 @@
1
+ export declare enum MLXModel {
2
+ Llama_3_2_1B_Instruct_4bit = "mlx-community/Llama-3.2-1B-Instruct-4bit",
3
+ Llama_3_2_1B_Instruct_8bit = "mlx-community/Llama-3.2-1B-Instruct-8bit",
4
+ Llama_3_2_3B_Instruct_4bit = "mlx-community/Llama-3.2-3B-Instruct-4bit",
5
+ Llama_3_2_3B_Instruct_8bit = "mlx-community/Llama-3.2-3B-Instruct-8bit",
6
+ Qwen2_5_0_5B_Instruct_4bit = "mlx-community/Qwen2.5-0.5B-Instruct-4bit",
7
+ Qwen2_5_0_5B_Instruct_8bit = "mlx-community/Qwen2.5-0.5B-Instruct-8bit",
8
+ Qwen2_5_1_5B_Instruct_4bit = "mlx-community/Qwen2.5-1.5B-Instruct-4bit",
9
+ Qwen2_5_1_5B_Instruct_8bit = "mlx-community/Qwen2.5-1.5B-Instruct-8bit",
10
+ Qwen2_5_3B_Instruct_4bit = "mlx-community/Qwen2.5-3B-Instruct-4bit",
11
+ Qwen2_5_3B_Instruct_8bit = "mlx-community/Qwen2.5-3B-Instruct-8bit",
12
+ Qwen3_1_7B_4bit = "mlx-community/Qwen3-1.7B-4bit",
13
+ Qwen3_1_7B_8bit = "mlx-community/Qwen3-1.7B-8bit",
14
+ Gemma_3_1B_IT_4bit = "mlx-community/gemma-3-1b-it-4bit",
15
+ Gemma_3_1B_IT_8bit = "mlx-community/gemma-3-1b-it-8bit",
16
+ Phi_3_5_Mini_Instruct_4bit = "mlx-community/Phi-3.5-mini-instruct-4bit",
17
+ Phi_3_5_Mini_Instruct_8bit = "mlx-community/Phi-3.5-mini-instruct-8bit",
18
+ Phi_4_Mini_Instruct_4bit = "mlx-community/Phi-4-mini-instruct-4bit",
19
+ Phi_4_Mini_Instruct_8bit = "mlx-community/Phi-4-mini-instruct-8bit",
20
+ SmolLM_1_7B_Instruct_4bit = "mlx-community/SmolLM-1.7B-Instruct-4bit",
21
+ SmolLM_1_7B_Instruct_8bit = "mlx-community/SmolLM-1.7B-Instruct-8bit",
22
+ SmolLM2_1_7B_Instruct_4bit = "mlx-community/SmolLM2-1.7B-Instruct-4bit",
23
+ SmolLM2_1_7B_Instruct_8bit = "mlx-community/SmolLM2-1.7B-Instruct-8bit",
24
+ OpenELM_1_1B_4bit = "mlx-community/OpenELM-1_1B-4bit",
25
+ OpenELM_1_1B_8bit = "mlx-community/OpenELM-1_1B-8bit",
26
+ OpenELM_3B_4bit = "mlx-community/OpenELM-3B-4bit",
27
+ OpenELM_3B_8bit = "mlx-community/OpenELM-3B-8bit"
28
+ }
29
+ //# sourceMappingURL=models.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"models.d.ts","sourceRoot":"","sources":["../../../src/models.ts"],"names":[],"mappings":"AAAA,oBAAY,QAAQ;IAElB,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IAGvE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IACvE,wBAAwB,2CAA2C;IACnE,wBAAwB,2CAA2C;IAGnE,eAAe,kCAAkC;IACjD,eAAe,kCAAkC;IAGjD,kBAAkB,qCAAqC;IACvD,kBAAkB,qCAAqC;IAGvD,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IAGvE,wBAAwB,2CAA2C;IACnE,wBAAwB,2CAA2C;IAGnE,yBAAyB,4CAA4C;IACrE,yBAAyB,4CAA4C;IAGrE,0BAA0B,6CAA6C;IACvE,0BAA0B,6CAA6C;IAGvE,iBAAiB,oCAAoC;IACrD,iBAAiB,oCAAoC;IACrD,eAAe,kCAAkC;IACjD,eAAe,kCAAkC;CAClD"}
@@ -0,0 +1,61 @@
1
+ import type { HybridObject } from 'react-native-nitro-modules';
2
+ /**
3
+ * Statistics from the last text generation.
4
+ */
5
+ export interface GenerationStats {
6
+ /** Total number of tokens generated */
7
+ tokenCount: number;
8
+ /** Generation speed in tokens per second */
9
+ tokensPerSecond: number;
10
+ /** Time in milliseconds until the first token was generated */
11
+ timeToFirstToken: number;
12
+ /** Total generation time in milliseconds */
13
+ totalTime: number;
14
+ }
15
+ /**
16
+ * Low-level LLM interface for text generation using MLX.
17
+ * @internal Use the `LLM` export from `react-native-nitro-mlx` instead.
18
+ */
19
+ export interface LLM extends HybridObject<{
20
+ ios: 'swift';
21
+ }> {
22
+ /**
23
+ * Load a model into memory. Downloads from HuggingFace if not already cached.
24
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
25
+ * @param onProgress - Callback invoked with loading progress (0-1)
26
+ */
27
+ load(modelId: string, onProgress: (progress: number) => void): Promise<void>;
28
+ /**
29
+ * Generate a complete response for a prompt.
30
+ * @param prompt - The input text to generate a response for
31
+ * @returns The generated text
32
+ */
33
+ generate(prompt: string): Promise<string>;
34
+ /**
35
+ * Stream a response token by token.
36
+ * @param prompt - The input text to generate a response for
37
+ * @param onToken - Callback invoked for each generated token
38
+ * @returns The complete generated text
39
+ */
40
+ stream(prompt: string, onToken: (token: string) => void): Promise<string>;
41
+ /**
42
+ * Stop the current generation.
43
+ */
44
+ stop(): void;
45
+ /**
46
+ * Get statistics from the last generation.
47
+ * @returns Statistics including token count, speed, and timing
48
+ */
49
+ getLastGenerationStats(): GenerationStats;
50
+ /** Whether a model is currently loaded */
51
+ readonly isLoaded: boolean;
52
+ /** Whether text is currently being generated */
53
+ readonly isGenerating: boolean;
54
+ /** The ID of the currently loaded model */
55
+ readonly modelId: string;
56
+ /** Enable debug logging */
57
+ debug: boolean;
58
+ /** System prompt used when loading the model */
59
+ systemPrompt: string;
60
+ }
61
+ //# sourceMappingURL=LLM.nitro.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"LLM.nitro.d.ts","sourceRoot":"","sources":["../../../../src/specs/LLM.nitro.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAA;AAE9D;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,uCAAuC;IACvC,UAAU,EAAE,MAAM,CAAA;IAClB,4CAA4C;IAC5C,eAAe,EAAE,MAAM,CAAA;IACvB,+DAA+D;IAC/D,gBAAgB,EAAE,MAAM,CAAA;IACxB,4CAA4C;IAC5C,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,GAAI,SAAQ,YAAY,CAAC;IAAE,GAAG,EAAE,OAAO,CAAA;CAAE,CAAC;IACzD;;;;OAIG;IACH,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,UAAU,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAE5E;;;;OAIG;IACH,QAAQ,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAEzC;;;;;OAKG;IACH,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAEzE;;OAEG;IACH,IAAI,IAAI,IAAI,CAAA;IAEZ;;;OAGG;IACH,sBAAsB,IAAI,eAAe,CAAA;IAEzC,0CAA0C;IAC1C,QAAQ,CAAC,QAAQ,EAAE,OAAO,CAAA;IAC1B,gDAAgD;IAChD,QAAQ,CAAC,YAAY,EAAE,OAAO,CAAA;IAC9B,2CAA2C;IAC3C,QAAQ,CAAC,OAAO,EAAE,MAAM,CAAA;IAExB,2BAA2B;IAC3B,KAAK,EAAE,OAAO,CAAA;IACd,gDAAgD;IAChD,YAAY,EAAE,MAAM,CAAA;CACrB"}
@@ -0,0 +1,41 @@
1
+ import type { HybridObject } from 'react-native-nitro-modules';
2
+ /**
3
+ * Low-level interface for managing MLX model downloads.
4
+ * @internal Use the `ModelManager` export from `react-native-nitro-mlx` instead.
5
+ */
6
+ export interface ModelManager extends HybridObject<{
7
+ ios: 'swift';
8
+ }> {
9
+ /**
10
+ * Download a model from HuggingFace.
11
+ * @param modelId - HuggingFace model ID (e.g., 'mlx-community/Qwen3-0.6B-4bit')
12
+ * @param progressCallback - Callback invoked with download progress (0-1)
13
+ * @returns Path to the downloaded model directory
14
+ */
15
+ download(modelId: string, progressCallback: (progress: number) => void): Promise<string>;
16
+ /**
17
+ * Check if a model is already downloaded.
18
+ * @param modelId - HuggingFace model ID
19
+ * @returns True if the model is downloaded
20
+ */
21
+ isDownloaded(modelId: string): Promise<boolean>;
22
+ /**
23
+ * Get a list of all downloaded model IDs.
24
+ * @returns Array of downloaded model IDs
25
+ */
26
+ getDownloadedModels(): Promise<string[]>;
27
+ /**
28
+ * Delete a downloaded model.
29
+ * @param modelId - HuggingFace model ID
30
+ */
31
+ deleteModel(modelId: string): Promise<void>;
32
+ /**
33
+ * Get the local filesystem path for a downloaded model.
34
+ * @param modelId - HuggingFace model ID
35
+ * @returns Path to the model directory
36
+ */
37
+ getModelPath(modelId: string): Promise<string>;
38
+ /** Enable debug logging */
39
+ debug: boolean;
40
+ }
41
+ //# sourceMappingURL=ModelManager.nitro.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ModelManager.nitro.d.ts","sourceRoot":"","sources":["../../../../src/specs/ModelManager.nitro.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAA;AAE9D;;;GAGG;AACH,MAAM,WAAW,YAAa,SAAQ,YAAY,CAAC;IAAE,GAAG,EAAE,OAAO,CAAA;CAAE,CAAC;IAClE;;;;;OAKG;IACH,QAAQ,CAAC,OAAO,EAAE,MAAM,EAAE,gBAAgB,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAExF;;;;OAIG;IACH,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;IAE/C;;;OAGG;IACH,mBAAmB,IAAI,OAAO,CAAC,MAAM,EAAE,CAAC,CAAA;IAExC;;;OAGG;IACH,WAAW,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAE3C;;;;OAIG;IACH,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAE9C,2BAA2B;IAC3B,KAAK,EAAE,OAAO,CAAA;CACf"}
@@ -0,0 +1 @@
1
+ ** linguist-generated=true