fount-vlm-nell-02 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (258) hide show
  1. fount_vlm_nell_02-0.3.11.dist-info/METADATA +418 -0
  2. fount_vlm_nell_02-0.3.11.dist-info/RECORD +258 -0
  3. fount_vlm_nell_02-0.3.11.dist-info/WHEEL +5 -0
  4. fount_vlm_nell_02-0.3.11.dist-info/entry_points.txt +5 -0
  5. fount_vlm_nell_02-0.3.11.dist-info/licenses/LICENSE +21 -0
  6. fount_vlm_nell_02-0.3.11.dist-info/top_level.txt +1 -0
  7. mlx_vlm/__init__.py +16 -0
  8. mlx_vlm/__main__.py +24 -0
  9. mlx_vlm/chat.py +234 -0
  10. mlx_vlm/chat_ui.py +508 -0
  11. mlx_vlm/convert.py +284 -0
  12. mlx_vlm/deprecation.py +52 -0
  13. mlx_vlm/evals/__init__.py +0 -0
  14. mlx_vlm/evals/math_vista.py +565 -0
  15. mlx_vlm/evals/mmmu.py +528 -0
  16. mlx_vlm/evals/mmstar.py +343 -0
  17. mlx_vlm/evals/ocrbench.py +453 -0
  18. mlx_vlm/evals/utils.py +37 -0
  19. mlx_vlm/generate.py +1457 -0
  20. mlx_vlm/lora.py +207 -0
  21. mlx_vlm/models/__init__.py +0 -0
  22. mlx_vlm/models/aya_vision/__init__.py +2 -0
  23. mlx_vlm/models/aya_vision/aya_vision.py +188 -0
  24. mlx_vlm/models/aya_vision/config.py +52 -0
  25. mlx_vlm/models/aya_vision/language.py +202 -0
  26. mlx_vlm/models/aya_vision/vision.py +340 -0
  27. mlx_vlm/models/base.py +356 -0
  28. mlx_vlm/models/cache.py +238 -0
  29. mlx_vlm/models/deepseek_vl_v2/__init__.py +2 -0
  30. mlx_vlm/models/deepseek_vl_v2/config.py +159 -0
  31. mlx_vlm/models/deepseek_vl_v2/conversation.py +264 -0
  32. mlx_vlm/models/deepseek_vl_v2/deepseek_vl_v2.py +418 -0
  33. mlx_vlm/models/deepseek_vl_v2/language.py +539 -0
  34. mlx_vlm/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +536 -0
  35. mlx_vlm/models/deepseek_vl_v2/vision.py +322 -0
  36. mlx_vlm/models/deepseekocr/__init__.py +2 -0
  37. mlx_vlm/models/deepseekocr/config.py +173 -0
  38. mlx_vlm/models/deepseekocr/conversation.py +264 -0
  39. mlx_vlm/models/deepseekocr/deepseekocr.py +371 -0
  40. mlx_vlm/models/deepseekocr/language.py +547 -0
  41. mlx_vlm/models/deepseekocr/processing_deepseekocr.py +655 -0
  42. mlx_vlm/models/deepseekocr/sam.py +489 -0
  43. mlx_vlm/models/deepseekocr/vision.py +263 -0
  44. mlx_vlm/models/deepseekocr_2/__init__.py +12 -0
  45. mlx_vlm/models/deepseekocr_2/config.py +216 -0
  46. mlx_vlm/models/deepseekocr_2/deepseekocr_2.py +297 -0
  47. mlx_vlm/models/deepseekocr_2/processing_deepseekocr.py +624 -0
  48. mlx_vlm/models/deepseekocr_2/vision.py +439 -0
  49. mlx_vlm/models/ernie4_5_moe_vl/__init__.py +5 -0
  50. mlx_vlm/models/ernie4_5_moe_vl/config.py +139 -0
  51. mlx_vlm/models/ernie4_5_moe_vl/ernie4_5_moe_vl.py +337 -0
  52. mlx_vlm/models/ernie4_5_moe_vl/language.py +770 -0
  53. mlx_vlm/models/ernie4_5_moe_vl/processor.py +686 -0
  54. mlx_vlm/models/ernie4_5_moe_vl/vision.py +322 -0
  55. mlx_vlm/models/fastvlm/__init__.py +2 -0
  56. mlx_vlm/models/fastvlm/config.py +79 -0
  57. mlx_vlm/models/fastvlm/fastvlm.py +198 -0
  58. mlx_vlm/models/fastvlm/language.py +49 -0
  59. mlx_vlm/models/fastvlm/vision.py +692 -0
  60. mlx_vlm/models/florence2/__init__.py +2 -0
  61. mlx_vlm/models/florence2/config.py +84 -0
  62. mlx_vlm/models/florence2/florence2.py +383 -0
  63. mlx_vlm/models/florence2/language.py +452 -0
  64. mlx_vlm/models/florence2/processing_florence2.py +30 -0
  65. mlx_vlm/models/florence2/vision.py +552 -0
  66. mlx_vlm/models/gemma3/__init__.py +2 -0
  67. mlx_vlm/models/gemma3/config.py +52 -0
  68. mlx_vlm/models/gemma3/gemma3.py +194 -0
  69. mlx_vlm/models/gemma3/language.py +293 -0
  70. mlx_vlm/models/gemma3/vision.py +215 -0
  71. mlx_vlm/models/gemma3n/__init__.py +2 -0
  72. mlx_vlm/models/gemma3n/audio.py +1038 -0
  73. mlx_vlm/models/gemma3n/config.py +130 -0
  74. mlx_vlm/models/gemma3n/gemma3n.py +322 -0
  75. mlx_vlm/models/gemma3n/language.py +631 -0
  76. mlx_vlm/models/gemma3n/vision.py +994 -0
  77. mlx_vlm/models/glm4v/__init__.py +3 -0
  78. mlx_vlm/models/glm4v/config.py +79 -0
  79. mlx_vlm/models/glm4v/glm4v.py +188 -0
  80. mlx_vlm/models/glm4v/language.py +574 -0
  81. mlx_vlm/models/glm4v/processing.py +220 -0
  82. mlx_vlm/models/glm4v/vision.py +406 -0
  83. mlx_vlm/models/glm4v_moe/__init__.py +3 -0
  84. mlx_vlm/models/glm4v_moe/config.py +81 -0
  85. mlx_vlm/models/glm4v_moe/glm4v_moe.py +176 -0
  86. mlx_vlm/models/glm4v_moe/language.py +674 -0
  87. mlx_vlm/models/glm4v_moe/processing.py +229 -0
  88. mlx_vlm/models/glm4v_moe/vision.py +405 -0
  89. mlx_vlm/models/glm_ocr/__init__.py +3 -0
  90. mlx_vlm/models/glm_ocr/config.py +93 -0
  91. mlx_vlm/models/glm_ocr/glm_ocr.py +180 -0
  92. mlx_vlm/models/glm_ocr/language.py +585 -0
  93. mlx_vlm/models/glm_ocr/processing.py +208 -0
  94. mlx_vlm/models/glm_ocr/vision.py +342 -0
  95. mlx_vlm/models/hunyuan_vl/__init__.py +7 -0
  96. mlx_vlm/models/hunyuan_vl/config.py +136 -0
  97. mlx_vlm/models/hunyuan_vl/hunyuan_vl.py +181 -0
  98. mlx_vlm/models/hunyuan_vl/language.py +509 -0
  99. mlx_vlm/models/hunyuan_vl/processing_hunyuan_vl.py +607 -0
  100. mlx_vlm/models/hunyuan_vl/vision.py +322 -0
  101. mlx_vlm/models/idefics2/__init__.py +2 -0
  102. mlx_vlm/models/idefics2/config.py +65 -0
  103. mlx_vlm/models/idefics2/idefics2.py +321 -0
  104. mlx_vlm/models/idefics2/language.py +161 -0
  105. mlx_vlm/models/idefics2/vision.py +244 -0
  106. mlx_vlm/models/idefics3/__init__.py +4 -0
  107. mlx_vlm/models/idefics3/config.py +54 -0
  108. mlx_vlm/models/idefics3/idefics3.py +221 -0
  109. mlx_vlm/models/idefics3/language.py +157 -0
  110. mlx_vlm/models/idefics3/vision.py +265 -0
  111. mlx_vlm/models/internvl_chat/__init__.py +3 -0
  112. mlx_vlm/models/internvl_chat/config.py +89 -0
  113. mlx_vlm/models/internvl_chat/internvl_chat.py +115 -0
  114. mlx_vlm/models/internvl_chat/language.py +187 -0
  115. mlx_vlm/models/internvl_chat/processor.py +395 -0
  116. mlx_vlm/models/internvl_chat/vision.py +265 -0
  117. mlx_vlm/models/interpolate.py +183 -0
  118. mlx_vlm/models/jina_vlm/__init__.py +3 -0
  119. mlx_vlm/models/jina_vlm/config.py +142 -0
  120. mlx_vlm/models/jina_vlm/image_processor.py +430 -0
  121. mlx_vlm/models/jina_vlm/jina_vlm.py +280 -0
  122. mlx_vlm/models/jina_vlm/language.py +272 -0
  123. mlx_vlm/models/jina_vlm/processing_jinavlm.py +266 -0
  124. mlx_vlm/models/jina_vlm/vision.py +202 -0
  125. mlx_vlm/models/kernels.py +447 -0
  126. mlx_vlm/models/kimi_vl/__init__.py +4 -0
  127. mlx_vlm/models/kimi_vl/config.py +84 -0
  128. mlx_vlm/models/kimi_vl/kimi_vl.py +127 -0
  129. mlx_vlm/models/kimi_vl/language.py +460 -0
  130. mlx_vlm/models/kimi_vl/processing_kimi_vl.py +560 -0
  131. mlx_vlm/models/kimi_vl/vision.py +485 -0
  132. mlx_vlm/models/lfm2_vl/__init__.py +2 -0
  133. mlx_vlm/models/lfm2_vl/config.py +94 -0
  134. mlx_vlm/models/lfm2_vl/language.py +49 -0
  135. mlx_vlm/models/lfm2_vl/lfm2_vl.py +223 -0
  136. mlx_vlm/models/lfm2_vl/processing_lfm2_vl.py +320 -0
  137. mlx_vlm/models/lfm2_vl/vision.py +223 -0
  138. mlx_vlm/models/llama4/__init__.py +2 -0
  139. mlx_vlm/models/llama4/config.py +83 -0
  140. mlx_vlm/models/llama4/language.py +334 -0
  141. mlx_vlm/models/llama4/llama4.py +146 -0
  142. mlx_vlm/models/llama4/vision.py +526 -0
  143. mlx_vlm/models/llava/__init__.py +2 -0
  144. mlx_vlm/models/llava/config.py +61 -0
  145. mlx_vlm/models/llava/language.py +200 -0
  146. mlx_vlm/models/llava/llava.py +132 -0
  147. mlx_vlm/models/llava/vision.py +233 -0
  148. mlx_vlm/models/llava_bunny/__init__.py +2 -0
  149. mlx_vlm/models/llava_bunny/config.py +85 -0
  150. mlx_vlm/models/llava_bunny/language.py +194 -0
  151. mlx_vlm/models/llava_bunny/llava_bunny.py +217 -0
  152. mlx_vlm/models/llava_bunny/vision.py +278 -0
  153. mlx_vlm/models/llava_next/__init__.py +2 -0
  154. mlx_vlm/models/llava_next/config.py +60 -0
  155. mlx_vlm/models/llava_next/language.py +192 -0
  156. mlx_vlm/models/llava_next/llava_next.py +138 -0
  157. mlx_vlm/models/llava_next/vision.py +217 -0
  158. mlx_vlm/models/mistral3/__init__.py +2 -0
  159. mlx_vlm/models/mistral3/config.py +59 -0
  160. mlx_vlm/models/mistral3/language.py +269 -0
  161. mlx_vlm/models/mistral3/mistral3.py +383 -0
  162. mlx_vlm/models/mllama/__init__.py +4 -0
  163. mlx_vlm/models/mllama/config.py +74 -0
  164. mlx_vlm/models/mllama/language.py +377 -0
  165. mlx_vlm/models/mllama/mllama.py +210 -0
  166. mlx_vlm/models/mllama/vision.py +458 -0
  167. mlx_vlm/models/molmo/__init__.py +5 -0
  168. mlx_vlm/models/molmo/config.py +93 -0
  169. mlx_vlm/models/molmo/language.py +208 -0
  170. mlx_vlm/models/molmo/molmo.py +108 -0
  171. mlx_vlm/models/molmo/processing_molmo.py +763 -0
  172. mlx_vlm/models/molmo/vision.py +408 -0
  173. mlx_vlm/models/molmo2/__init__.py +6 -0
  174. mlx_vlm/models/molmo2/config.py +137 -0
  175. mlx_vlm/models/molmo2/language.py +206 -0
  176. mlx_vlm/models/molmo2/molmo2.py +330 -0
  177. mlx_vlm/models/molmo2/processing.py +773 -0
  178. mlx_vlm/models/molmo2/vision.py +286 -0
  179. mlx_vlm/models/moondream2/__init__.py +11 -0
  180. mlx_vlm/models/moondream2/config.py +92 -0
  181. mlx_vlm/models/moondream2/image_crops.py +269 -0
  182. mlx_vlm/models/moondream2/language.py +267 -0
  183. mlx_vlm/models/moondream2/moondream2.py +522 -0
  184. mlx_vlm/models/moondream2/processing_moondream.py +144 -0
  185. mlx_vlm/models/moondream2/vision.py +200 -0
  186. mlx_vlm/models/multi_modality/__init__.py +4 -0
  187. mlx_vlm/models/multi_modality/config.py +108 -0
  188. mlx_vlm/models/multi_modality/language.py +191 -0
  189. mlx_vlm/models/multi_modality/multi_modality.py +338 -0
  190. mlx_vlm/models/multi_modality/sam.py +543 -0
  191. mlx_vlm/models/multi_modality/vision.py +450 -0
  192. mlx_vlm/models/paddleocr_vl/__init__.py +3 -0
  193. mlx_vlm/models/paddleocr_vl/config.py +93 -0
  194. mlx_vlm/models/paddleocr_vl/language.py +522 -0
  195. mlx_vlm/models/paddleocr_vl/paddleocr_vl.py +207 -0
  196. mlx_vlm/models/paddleocr_vl/processing_paddleocr_vl.py +425 -0
  197. mlx_vlm/models/paddleocr_vl/vision.py +358 -0
  198. mlx_vlm/models/paligemma/__init__.py +4 -0
  199. mlx_vlm/models/paligemma/config.py +50 -0
  200. mlx_vlm/models/paligemma/language.py +253 -0
  201. mlx_vlm/models/paligemma/paligemma.py +140 -0
  202. mlx_vlm/models/paligemma/vision.py +218 -0
  203. mlx_vlm/models/phi3_v/__init__.py +5 -0
  204. mlx_vlm/models/phi3_v/config.py +55 -0
  205. mlx_vlm/models/phi3_v/language.py +2 -0
  206. mlx_vlm/models/phi3_v/phi3_v.py +239 -0
  207. mlx_vlm/models/phi3_v/processing_phi3_v.py +704 -0
  208. mlx_vlm/models/phi3_v/vision.py +294 -0
  209. mlx_vlm/models/pixtral/__init__.py +4 -0
  210. mlx_vlm/models/pixtral/config.py +69 -0
  211. mlx_vlm/models/pixtral/language.py +195 -0
  212. mlx_vlm/models/pixtral/pixtral.py +208 -0
  213. mlx_vlm/models/pixtral/vision.py +293 -0
  214. mlx_vlm/models/qwen2_5_vl/__init__.py +2 -0
  215. mlx_vlm/models/qwen2_5_vl/config.py +90 -0
  216. mlx_vlm/models/qwen2_5_vl/language.py +541 -0
  217. mlx_vlm/models/qwen2_5_vl/qwen2_5_vl.py +184 -0
  218. mlx_vlm/models/qwen2_5_vl/vision.py +414 -0
  219. mlx_vlm/models/qwen2_vl/__init__.py +2 -0
  220. mlx_vlm/models/qwen2_vl/config.py +86 -0
  221. mlx_vlm/models/qwen2_vl/language.py +539 -0
  222. mlx_vlm/models/qwen2_vl/qwen2_vl.py +180 -0
  223. mlx_vlm/models/qwen2_vl/vision.py +308 -0
  224. mlx_vlm/models/qwen3_omni_moe/__init__.py +29 -0
  225. mlx_vlm/models/qwen3_omni_moe/audio.py +317 -0
  226. mlx_vlm/models/qwen3_omni_moe/code2wav.py +542 -0
  227. mlx_vlm/models/qwen3_omni_moe/config.py +264 -0
  228. mlx_vlm/models/qwen3_omni_moe/language.py +622 -0
  229. mlx_vlm/models/qwen3_omni_moe/omni_utils.py +69 -0
  230. mlx_vlm/models/qwen3_omni_moe/qwen3_omni_moe.py +706 -0
  231. mlx_vlm/models/qwen3_omni_moe/talker.py +873 -0
  232. mlx_vlm/models/qwen3_omni_moe/thinker.py +366 -0
  233. mlx_vlm/models/qwen3_omni_moe/vision.py +419 -0
  234. mlx_vlm/models/qwen3_vl/__init__.py +2 -0
  235. mlx_vlm/models/qwen3_vl/config.py +103 -0
  236. mlx_vlm/models/qwen3_vl/language.py +596 -0
  237. mlx_vlm/models/qwen3_vl/qwen3_vl.py +166 -0
  238. mlx_vlm/models/qwen3_vl/vision.py +441 -0
  239. mlx_vlm/models/qwen3_vl_moe/__init__.py +2 -0
  240. mlx_vlm/models/qwen3_vl_moe/config.py +108 -0
  241. mlx_vlm/models/qwen3_vl_moe/language.py +656 -0
  242. mlx_vlm/models/qwen3_vl_moe/qwen3_vl_moe.py +184 -0
  243. mlx_vlm/models/qwen3_vl_moe/vision.py +442 -0
  244. mlx_vlm/models/smolvlm/__init__.py +4 -0
  245. mlx_vlm/models/smolvlm/config.py +59 -0
  246. mlx_vlm/models/smolvlm/smolvlm.py +60 -0
  247. mlx_vlm/prompt_utils.py +565 -0
  248. mlx_vlm/sample_utils.py +39 -0
  249. mlx_vlm/server.py +1107 -0
  250. mlx_vlm/smolvlm_video_generate.py +109 -0
  251. mlx_vlm/tokenizer_utils.py +371 -0
  252. mlx_vlm/trainer/__init__.py +9 -0
  253. mlx_vlm/trainer/lora.py +70 -0
  254. mlx_vlm/trainer/trainer.py +299 -0
  255. mlx_vlm/trainer/utils.py +160 -0
  256. mlx_vlm/utils.py +1339 -0
  257. mlx_vlm/version.py +1 -0
  258. mlx_vlm/video_generate.py +611 -0
@@ -0,0 +1,337 @@
1
+ """ERNIE 4.5 VL MoE model for MLX."""
2
+
3
+ from typing import Optional
4
+
5
+ import mlx.core as mx
6
+ import mlx.nn as nn
7
+ import numpy as np
8
+ from transformers import AutoImageProcessor, AutoProcessor, AutoTokenizer
9
+
10
+ from ..base import InputEmbeddingsFeatures
11
+ from .config import ModelConfig
12
+ from .language import LanguageModel
13
+ from .processor import Ernie4_5_VLProcessor, Ernie4_5_VLTokenizer, ImageProcessor
14
+ from .vision import VisionModel
15
+
16
+ # Register custom processor classes for ernie4_5_moe_vl model type
17
+ MODEL_TYPE = "ernie4_5_moe_vl"
18
+ try:
19
+ AutoImageProcessor.register(MODEL_TYPE, slow_image_processor_class=ImageProcessor)
20
+ AutoTokenizer.register(MODEL_TYPE, slow_tokenizer_class=Ernie4_5_VLTokenizer)
21
+ AutoProcessor.register(MODEL_TYPE, Ernie4_5_VLProcessor)
22
+ except Exception:
23
+ pass # Already registered or registration not needed
24
+
25
+
26
+ class TokenType:
27
+ """Token type definition."""
28
+
29
+ text = 0
30
+ image = 1
31
+ video = 2
32
+
33
+
34
+ class VariableResolutionResamplerModel(nn.Module):
35
+ """Compresses vision features using spatial and temporal convolutions."""
36
+
37
+ def __init__(
38
+ self,
39
+ in_dim: int,
40
+ out_dim: int,
41
+ spatial_conv_size: int,
42
+ temporal_conv_size: int,
43
+ config: ModelConfig,
44
+ ):
45
+ super().__init__()
46
+ self.in_dim = in_dim
47
+ self.out_dim = out_dim
48
+ self.config = config
49
+ self.spatial_conv_size = spatial_conv_size
50
+ self.temporal_conv_size = temporal_conv_size
51
+ self.use_temporal_conv = config.use_temporal_conv
52
+
53
+ self.spatial_dim = in_dim * spatial_conv_size * spatial_conv_size
54
+ self.temporal_dim = (
55
+ in_dim * spatial_conv_size * spatial_conv_size * temporal_conv_size
56
+ )
57
+
58
+ self.spatial_linear = nn.Sequential(
59
+ nn.Linear(self.spatial_dim, self.spatial_dim),
60
+ nn.GELU(),
61
+ nn.Linear(self.spatial_dim, self.spatial_dim),
62
+ nn.LayerNorm(self.spatial_dim, eps=1e-6),
63
+ )
64
+
65
+ if self.use_temporal_conv:
66
+ self.temporal_linear = nn.Sequential(
67
+ nn.Linear(self.temporal_dim, self.spatial_dim),
68
+ nn.GELU(),
69
+ nn.Linear(self.spatial_dim, self.spatial_dim),
70
+ nn.LayerNorm(self.spatial_dim, eps=1e-6),
71
+ )
72
+
73
+ self.mlp = nn.Linear(self.spatial_dim, out_dim)
74
+ self.after_norm = nn.RMSNorm(out_dim)
75
+
76
+ def spatial_conv_reshape(self, x: mx.array) -> mx.array:
77
+ S, C = x.shape
78
+ x = x.reshape(-1, C * (self.spatial_conv_size**2))
79
+ return x
80
+
81
+ def __call__(
82
+ self,
83
+ x: mx.array,
84
+ grid_thw: mx.array,
85
+ ) -> mx.array:
86
+ def fwd_spatial(x):
87
+ x = self.spatial_conv_reshape(x)
88
+ x = self.spatial_linear(x)
89
+ return x
90
+
91
+ def fwd_placeholder(x, grid_thw):
92
+ grid_thw_np = np.array(grid_thw.tolist(), dtype=np.int64)
93
+ grid_t = grid_thw_np[:, 0]
94
+ grid_hw = grid_thw_np[:, 1:]
95
+ grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_conv_size**2)
96
+
97
+ tokens_per_img_or_vid = grid_thw_np.prod(-1) // (self.spatial_conv_size**2)
98
+ batch_offset = np.empty(tokens_per_img_or_vid.size, dtype=np.int64)
99
+ batch_offset[0] = 0
100
+ batch_offset[1:] = tokens_per_img_or_vid.cumsum()[:-1]
101
+
102
+ assert (
103
+ self.temporal_conv_size == 2
104
+ ), f"Hard Code: temporal_conv_size==2, got: {self.temporal_conv_size}"
105
+
106
+ slice_offsets = []
107
+ for temporal_size, spatial_size, b_offset in zip(
108
+ grid_t, grid_hw_after_conv, batch_offset
109
+ ):
110
+ for temp_offset in range(0, temporal_size, 2):
111
+ slice_offsets.append(
112
+ np.arange(
113
+ b_offset + temp_offset * spatial_size,
114
+ b_offset + (temp_offset + 1) * spatial_size,
115
+ )
116
+ )
117
+ slice_offsets = np.concatenate(slice_offsets, axis=-1).astype(np.int32)
118
+
119
+ slice_offsets2 = []
120
+ for temporal_size, spatial_size, b_offset in zip(
121
+ grid_t, grid_hw_after_conv, batch_offset
122
+ ):
123
+ for temp_offset in range(
124
+ 1 if temporal_size > 1 else 0, temporal_size, 2
125
+ ):
126
+ slice_offsets2.append(
127
+ np.arange(
128
+ b_offset + temp_offset * spatial_size,
129
+ b_offset + (temp_offset + 1) * spatial_size,
130
+ )
131
+ )
132
+ slice_offsets2 = np.concatenate(slice_offsets2, axis=-1).astype(np.int32)
133
+
134
+ x_timestep_1 = x[mx.array(slice_offsets), :]
135
+ x_timestep_2 = x[mx.array(slice_offsets2), :]
136
+ x = mx.concatenate([x_timestep_1, x_timestep_2], axis=-1)
137
+ return x
138
+
139
+ def fwd_temporal(x):
140
+ x = self.temporal_linear(x)
141
+ return x
142
+
143
+ def fwd_mlp(x):
144
+ x = self.mlp(x)
145
+ x = self.after_norm(x)
146
+ return x
147
+
148
+ x = fwd_spatial(x)
149
+ if self.use_temporal_conv:
150
+ x = fwd_placeholder(x, grid_thw)
151
+ x = fwd_temporal(x)
152
+ x = fwd_mlp(x)
153
+ return x
154
+
155
+
156
+ class Model(nn.Module):
157
+ """ERNIE 4.5 VL MoE model."""
158
+
159
+ def __init__(self, config: ModelConfig):
160
+ super().__init__()
161
+ self.config = config
162
+ self.vision_tower = VisionModel(config.vision_config)
163
+ self.resampler_model = VariableResolutionResamplerModel(
164
+ config.pixel_hidden_size,
165
+ config.hidden_size,
166
+ config.spatial_conv_size,
167
+ config.temporal_conv_size,
168
+ config=config,
169
+ )
170
+ self.language_model = LanguageModel(config.text_config, config)
171
+
172
+ def get_input_embeddings(
173
+ self,
174
+ input_ids: Optional[mx.array] = None,
175
+ pixel_values: Optional[mx.array] = None,
176
+ **kwargs,
177
+ ):
178
+ image_grid_thw = kwargs.get("image_grid_thw", None)
179
+ video_grid_thw = kwargs.get("video_grid_thw", None)
180
+ grid_thw = image_grid_thw if image_grid_thw is not None else video_grid_thw
181
+
182
+ if pixel_values is None:
183
+ return InputEmbeddingsFeatures(
184
+ inputs_embeds=self.language_model.model.embed_tokens(input_ids)
185
+ )
186
+
187
+ dtype = self.vision_tower.patch_embed.proj.weight.dtype
188
+ pixel_values = pixel_values.astype(dtype)
189
+
190
+ inputs_embeds = self.language_model.model.embed_tokens(input_ids)
191
+ hidden_states = self.vision_tower(
192
+ pixel_values, grid_thw, output_hidden_states=False
193
+ )
194
+ image_features = self.resampler_model(hidden_states, image_grid_thw)
195
+ final_inputs_embeds = self._merge_input_ids_with_image_features(
196
+ image_features,
197
+ inputs_embeds,
198
+ input_ids,
199
+ )
200
+ return InputEmbeddingsFeatures(inputs_embeds=final_inputs_embeds)
201
+
202
+ def _merge_input_ids_with_image_features(
203
+ self,
204
+ image_features: mx.array,
205
+ inputs_embeds: mx.array,
206
+ input_ids: mx.array,
207
+ ) -> mx.array:
208
+ image_token_id = self.config.image_token_id
209
+ video_token_id = self.config.video_token_id
210
+
211
+ image_positions = input_ids == image_token_id
212
+ if mx.sum(image_positions) == 0:
213
+ image_positions = input_ids == video_token_id
214
+
215
+ if mx.sum(image_positions) == 0:
216
+ return inputs_embeds
217
+
218
+ batch_size, seq_len = input_ids.shape
219
+ batch_outputs = []
220
+ feature_start_idx = 0
221
+
222
+ for batch_idx in range(batch_size):
223
+ image_mask = image_positions[batch_idx]
224
+ num_positions = int(mx.sum(image_mask).item())
225
+
226
+ if num_positions > 0:
227
+ batch_features = image_features[
228
+ feature_start_idx : feature_start_idx + num_positions
229
+ ]
230
+
231
+ if batch_features.shape[0] != num_positions:
232
+ raise ValueError(
233
+ f"Number of image token positions ({num_positions}) does not match "
234
+ f"number of image features ({batch_features.shape[0]}) for batch {batch_idx}"
235
+ )
236
+
237
+ cumsum = mx.cumsum(image_mask.astype(mx.int32))
238
+ feature_indices = mx.where(
239
+ image_mask, cumsum - 1, mx.zeros_like(cumsum)
240
+ )
241
+ gathered_features = batch_features[feature_indices]
242
+
243
+ image_mask_expanded = mx.expand_dims(image_mask, axis=-1)
244
+ batch_output = mx.where(
245
+ image_mask_expanded, gathered_features, inputs_embeds[batch_idx]
246
+ )
247
+
248
+ feature_start_idx += num_positions
249
+ else:
250
+ batch_output = inputs_embeds[batch_idx]
251
+
252
+ batch_outputs.append(batch_output)
253
+
254
+ return mx.stack(batch_outputs, axis=0)
255
+
256
+ @property
257
+ def layers(self):
258
+ return self.language_model.model.layers
259
+
260
+ def _build_token_type_ids(
261
+ self, input_ids: mx.array, pixel_values: Optional[mx.array] = None
262
+ ) -> Optional[mx.array]:
263
+ if pixel_values is None:
264
+ return None
265
+
266
+ image_token_id = self.config.image_token_id
267
+ video_token_id = self.config.video_token_id
268
+
269
+ is_image = input_ids == image_token_id
270
+ is_video = input_ids == video_token_id
271
+ is_vision = is_image | is_video
272
+
273
+ if mx.sum(is_vision) == 0:
274
+ return None
275
+
276
+ token_type_ids = mx.where(
277
+ is_vision, mx.ones_like(input_ids), mx.zeros_like(input_ids)
278
+ )
279
+ return token_type_ids
280
+
281
+ def __call__(
282
+ self,
283
+ input_ids: mx.array,
284
+ pixel_values: Optional[mx.array] = None,
285
+ mask: Optional[mx.array] = None,
286
+ cache=None,
287
+ **kwargs,
288
+ ):
289
+ token_type_ids = self._build_token_type_ids(input_ids, pixel_values)
290
+
291
+ inputs_embeds_features = self.get_input_embeddings(
292
+ input_ids, pixel_values, **kwargs
293
+ )
294
+
295
+ logits = self.language_model(
296
+ input_ids,
297
+ inputs_embeds_features.inputs_embeds,
298
+ mask=mask,
299
+ cache=cache,
300
+ pixel_values=pixel_values,
301
+ token_type_ids=token_type_ids,
302
+ **kwargs,
303
+ )
304
+
305
+ return logits
306
+
307
+ def sanitize(self, weights):
308
+ import re
309
+
310
+ def transform_key(key):
311
+ if "vision_tower" not in key and "vision_model" in key:
312
+ key = key.replace("vision_model", "vision_tower")
313
+
314
+ if "language_model" not in key:
315
+ if (
316
+ "model.layers" in key
317
+ or "model.embed_tokens" in key
318
+ or "model.norm" in key
319
+ ):
320
+ key = key.replace("model.", "language_model.model.")
321
+ elif "lm_head" in key:
322
+ key = key.replace("lm_head", "language_model.lm_head")
323
+
324
+ if "model.resampler_model" in key:
325
+ key = key.replace("model.resampler_model", "resampler_model")
326
+
327
+ key = re.sub(
328
+ r"(spatial_linear|temporal_linear)\.(\d+)", r"\1.layers.\2", key
329
+ )
330
+
331
+ return key
332
+
333
+ weights = {transform_key(k): v for k, v in weights.items()}
334
+ weights = self.vision_tower.sanitize(weights)
335
+ weights = self.language_model.sanitize(weights)
336
+
337
+ return weights