@inferrlm/react-native-mlx 0.4.1 → 0.4.2-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -66,6 +66,45 @@ class HybridLLM: HybridLLMSpec {
66
66
  allocatedMB, cacheMB, peakMB)
67
67
  }
68
68
 
69
+ private func parseEosTokenIds(from modelDir: URL) -> Set<Int> {
70
+ let configURL = modelDir.appendingPathComponent("config.json")
71
+ guard let data = try? Data(contentsOf: configURL),
72
+ let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any]
73
+ else { return [] }
74
+
75
+ var allIds = Set<Int>()
76
+
77
+ if let ids = extractEosIds(from: json) {
78
+ allIds.formUnion(ids)
79
+ }
80
+
81
+ for key in ["text_config", "language_config", "llm_config"] {
82
+ if let nested = json[key] as? [String: Any],
83
+ let ids = extractEosIds(from: nested) {
84
+ allIds.formUnion(ids)
85
+ }
86
+ }
87
+
88
+ let genConfigURL = modelDir.appendingPathComponent("generation_config.json")
89
+ if let genData = try? Data(contentsOf: genConfigURL),
90
+ let genJson = try? JSONSerialization.jsonObject(with: genData) as? [String: Any],
91
+ let ids = extractEosIds(from: genJson) {
92
+ allIds = ids
93
+ }
94
+
95
+ return allIds
96
+ }
97
+
98
+ private func extractEosIds(from dict: [String: Any]) -> Set<Int>? {
99
+ if let id = dict["eos_token_id"] as? Int {
100
+ return [id]
101
+ }
102
+ if let ids = dict["eos_token_id"] as? [Int], !ids.isEmpty {
103
+ return Set(ids)
104
+ }
105
+ return nil
106
+ }
107
+
69
108
  private func buildToolSchema(from tool: ToolDefinition) -> ToolSpec {
70
109
  var properties: [String: [String: Any]] = [:]
71
110
  var required: [String] = []
@@ -125,6 +164,33 @@ class HybridLLM: HybridLLMSpec {
125
164
 
126
165
  try Task.checkCancellation()
127
166
 
167
+ /*
168
+ mlx-swift-lm only reads top-level eos_token_id from config.json.
169
+ Models like Qwen3.5 nest it inside text_config, leaving the stop
170
+ set empty. Parse it ourselves and patch the container.
171
+ Also add common chat stop tokens as extraEOSTokens.
172
+ */
173
+ let containerEos = await loadedContainer.configuration.eosTokenIds
174
+ let containerExtra = await loadedContainer.configuration.extraEOSTokens
175
+ log("EOS state after load - ids: \(containerEos), extra: \(containerExtra)")
176
+
177
+ let parsed = self.parseEosTokenIds(from: modelDir)
178
+ let chatStopTokens: Set<String> = ["<|endoftext|>", "<|im_end|>", "<|im_start|>"]
179
+ let needsIdPatch = containerEos.isEmpty && !parsed.isEmpty
180
+ let missingExtra = chatStopTokens.subtracting(containerExtra)
181
+
182
+ if needsIdPatch || !missingExtra.isEmpty {
183
+ await loadedContainer.update { ctx in
184
+ if needsIdPatch {
185
+ ctx.configuration.eosTokenIds = parsed
186
+ }
187
+ ctx.configuration.extraEOSTokens.formUnion(chatStopTokens)
188
+ }
189
+ let updated = await loadedContainer.configuration.eosTokenIds
190
+ let updatedExtra = await loadedContainer.configuration.extraEOSTokens
191
+ log("EOS patched - ids: \(updated), extra: \(updatedExtra)")
192
+ }
193
+
128
194
  let memoryAfterContainer = self.getMemoryUsage()
129
195
  let gpuAfterContainer = self.getGPUMemoryUsage()
130
196
  log("Model loaded - Host: \(memoryAfterContainer), GPU: \(gpuAfterContainer)")
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@inferrlm/react-native-mlx",
3
3
  "description": "MLX Swift integration for React Native - InferrLM fork with enhanced features",
4
- "version": "0.4.1",
4
+ "version": "0.4.2-alpha.1",
5
5
  "main": "./lib/module/index.js",
6
6
  "module": "./lib/module/index.js",
7
7
  "types": "./lib/typescript/src/index.d.ts",