@inferrlm/react-native-mlx 0.4.2-alpha.0 → 0.4.2-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -72,16 +72,27 @@ class HybridLLM: HybridLLMSpec {
72
72
  let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any]
73
73
  else { return [] }
74
74
 
75
+ var allIds = Set<Int>()
76
+
75
77
  if let ids = extractEosIds(from: json) {
76
- return ids
78
+ allIds.formUnion(ids)
79
+ }
80
+
81
+ for key in ["text_config", "language_config", "llm_config"] {
82
+ if let nested = json[key] as? [String: Any],
83
+ let ids = extractEosIds(from: nested) {
84
+ allIds.formUnion(ids)
85
+ }
77
86
  }
78
87
 
79
- if let textConfig = json["text_config"] as? [String: Any],
80
- let ids = extractEosIds(from: textConfig) {
81
- return ids
88
+ let genConfigURL = modelDir.appendingPathComponent("generation_config.json")
89
+ if let genData = try? Data(contentsOf: genConfigURL),
90
+ let genJson = try? JSONSerialization.jsonObject(with: genData) as? [String: Any],
91
+ let ids = extractEosIds(from: genJson) {
92
+ allIds = ids
82
93
  }
83
94
 
84
- return []
95
+ return allIds
85
96
  }
86
97
 
87
98
  private func extractEosIds(from dict: [String: Any]) -> Set<Int>? {
@@ -157,16 +168,27 @@ class HybridLLM: HybridLLMSpec {
157
168
  mlx-swift-lm only reads top-level eos_token_id from config.json.
158
169
  Models like Qwen3.5 nest it inside text_config, leaving the stop
159
170
  set empty. Parse it ourselves and patch the container.
171
+ Also add common chat stop tokens as extraEOSTokens.
160
172
  */
161
173
  let containerEos = await loadedContainer.configuration.eosTokenIds
162
- if containerEos.isEmpty {
163
- let parsed = self.parseEosTokenIds(from: modelDir)
164
- if !parsed.isEmpty {
165
- log("Patching eosTokenIds from config: \(parsed)")
166
- await loadedContainer.update { ctx in
174
+ let containerExtra = await loadedContainer.configuration.extraEOSTokens
175
+ log("EOS state after load - ids: \(containerEos), extra: \(containerExtra)")
176
+
177
+ let parsed = self.parseEosTokenIds(from: modelDir)
178
+ let chatStopTokens: Set<String> = ["<|endoftext|>", "<|im_end|>", "<|im_start|>"]
179
+ let needsIdPatch = containerEos.isEmpty && !parsed.isEmpty
180
+ let missingExtra = chatStopTokens.subtracting(containerExtra)
181
+
182
+ if needsIdPatch || !missingExtra.isEmpty {
183
+ await loadedContainer.update { ctx in
184
+ if needsIdPatch {
167
185
  ctx.configuration.eosTokenIds = parsed
168
186
  }
187
+ ctx.configuration.extraEOSTokens.formUnion(chatStopTokens)
169
188
  }
189
+ let updated = await loadedContainer.configuration.eosTokenIds
190
+ let updatedExtra = await loadedContainer.configuration.extraEOSTokens
191
+ log("EOS patched - ids: \(updated), extra: \(updatedExtra)")
170
192
  }
171
193
 
172
194
  let memoryAfterContainer = self.getMemoryUsage()
@@ -772,6 +794,9 @@ class HybridLLM: HybridLLMSpec {
772
794
 
773
795
  func clearHistory() throws {
774
796
  messageHistory = []
775
- log("Message history cleared")
797
+ if let container = self.container {
798
+ self.session = ChatSession(container, instructions: self.systemPrompt)
799
+ }
800
+ log("History and session cleared")
776
801
  }
777
802
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@inferrlm/react-native-mlx",
3
3
  "description": "MLX Swift integration for React Native - InferrLM fork with enhanced features",
4
- "version": "0.4.2-alpha.0",
4
+ "version": "0.4.2-alpha.2",
5
5
  "main": "./lib/module/index.js",
6
6
  "module": "./lib/module/index.js",
7
7
  "types": "./lib/typescript/src/index.d.ts",