nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc8__cp310-cp310-macosx_14_0_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nexaai might be problematic. Click here for more details.

Files changed (196) hide show
  1. nexaai/_stub.cpython-310-darwin.so +0 -0
  2. nexaai/_version.py +1 -1
  3. nexaai/binds/libnexa_bridge.dylib +0 -0
  4. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/METADATA +1 -1
  5. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/RECORD +7 -196
  6. nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
  7. nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
  8. nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
  9. nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
  10. nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
  11. nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
  12. nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
  13. nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
  14. nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
  15. nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
  16. nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
  17. nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
  18. nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
  19. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
  20. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
  21. nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
  22. nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
  23. nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
  24. nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
  25. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
  26. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
  27. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
  28. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
  29. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
  30. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
  31. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
  32. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
  33. nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
  34. nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
  35. nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
  36. nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
  37. nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
  38. nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
  39. nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
  40. nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
  41. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
  42. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
  43. nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
  44. nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
  45. nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
  46. nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
  47. nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
  48. nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
  49. nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
  50. nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
  51. nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
  52. nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
  53. nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
  54. nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
  55. nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
  56. nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
  57. nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
  58. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
  59. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
  60. nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
  61. nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
  62. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
  63. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
  64. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
  65. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
  66. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
  67. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
  68. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
  69. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
  70. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
  71. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
  72. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
  73. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
  74. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
  75. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
  76. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
  77. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
  78. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
  79. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
  80. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
  81. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
  82. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
  83. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
  84. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
  85. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
  86. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
  87. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
  88. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
  89. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
  90. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
  91. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
  92. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
  93. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
  94. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
  95. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
  96. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
  97. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
  98. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
  99. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
  100. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
  101. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
  102. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
  103. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
  104. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
  105. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
  106. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
  107. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
  108. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
  109. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
  110. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
  111. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
  112. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
  113. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
  114. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
  115. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
  116. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
  117. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
  118. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
  119. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
  120. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
  121. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
  122. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
  123. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
  124. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
  125. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
  126. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
  127. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
  128. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
  129. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
  130. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
  131. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
  132. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
  133. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
  134. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
  135. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
  136. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
  137. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
  138. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
  139. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
  140. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
  141. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
  142. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
  143. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
  144. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
  145. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
  146. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
  147. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
  148. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
  149. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
  150. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
  151. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
  152. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
  153. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
  154. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
  155. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
  156. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
  157. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
  158. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
  159. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
  160. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
  161. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
  162. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
  163. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
  164. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
  165. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
  166. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
  167. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
  168. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
  169. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
  170. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
  171. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
  172. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
  173. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
  174. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
  175. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
  176. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
  177. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
  178. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
  179. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
  180. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
  181. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
  182. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
  183. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
  184. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
  185. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
  186. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
  187. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
  188. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
  189. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
  190. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
  191. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
  192. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
  193. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
  194. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
  195. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/WHEEL +0 -0
  196. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/top_level.txt +0 -0
@@ -1,210 +0,0 @@
1
- import math
2
- import mlx.core as mx
3
- import mlx.nn as nn
4
-
5
- def _gather_sort(x, indices):
6
- *_, M = indices.shape
7
- indices = indices.flatten()
8
- order = mx.argsort(indices)
9
- inv_order = mx.argsort(order)
10
- return x.flatten(0, -3)[order // M], indices[order], inv_order
11
-
12
-
13
- def _scatter_unsort(x, inv_order, shape=None):
14
- x = x[inv_order]
15
- if shape is not None:
16
- x = mx.unflatten(x, 0, shape)
17
- return x
18
-
19
-
20
- class QuantizedSwitchLinear(nn.Module):
21
- def __init__(
22
- self,
23
- input_dims: int,
24
- output_dims: int,
25
- num_experts: int,
26
- bias: bool = True,
27
- group_size: int = 64,
28
- bits: int = 4,
29
- ):
30
- super().__init__()
31
-
32
- scale = math.sqrt(1 / input_dims)
33
- self.weight, self.scales, self.biases = mx.quantize(
34
- mx.random.uniform(
35
- low=-scale,
36
- high=scale,
37
- shape=(num_experts, output_dims, input_dims),
38
- ),
39
- group_size=group_size,
40
- bits=bits,
41
- )
42
-
43
- if bias:
44
- self.bias = mx.zeros((num_experts, output_dims))
45
-
46
- self.group_size = group_size
47
- self.bits = bits
48
-
49
- # Freeze this model's parameters
50
- self.freeze()
51
-
52
- def unfreeze(self, *args, **kwargs):
53
- """Wrap unfreeze so that we unfreeze any layers we might contain but
54
- our parameters will remain frozen."""
55
- super().unfreeze(*args, **kwargs)
56
- self.freeze(recurse=False)
57
-
58
- @property
59
- def input_dims(self):
60
- return self.scales.shape[2] * self.group_size
61
-
62
- @property
63
- def output_dims(self):
64
- return self.weight.shape[1]
65
-
66
- @property
67
- def num_experts(self):
68
- return self.weight.shape[0]
69
-
70
- def __call__(self, x, indices, sorted_indices=False):
71
- x = mx.gather_qmm(
72
- x,
73
- self["weight"],
74
- self["scales"],
75
- self["biases"],
76
- rhs_indices=indices,
77
- transpose=True,
78
- group_size=self.group_size,
79
- bits=self.bits,
80
- sorted_indices=sorted_indices,
81
- )
82
- if "bias" in self:
83
- x = x + mx.expand_dims(self["bias"][indices], -2)
84
- return x
85
-
86
-
87
- class SwitchLinear(nn.Module):
88
- def __init__(
89
- self, input_dims: int, output_dims: int, num_experts: int, bias: bool = True
90
- ):
91
- super().__init__()
92
- scale = math.sqrt(1 / input_dims)
93
- self.weight = mx.random.uniform(
94
- low=-scale,
95
- high=scale,
96
- shape=(num_experts, output_dims, input_dims),
97
- )
98
-
99
- if bias:
100
- self.bias = mx.zeros((num_experts, output_dims))
101
-
102
- @property
103
- def input_dims(self):
104
- return self.weight.shape[2]
105
-
106
- @property
107
- def output_dims(self):
108
- return self.weight.shape[1]
109
-
110
- @property
111
- def num_experts(self):
112
- return self.weight.shape[0]
113
-
114
- def __call__(self, x, indices, sorted_indices=False):
115
- x = mx.gather_mm(
116
- x,
117
- self["weight"].swapaxes(-1, -2),
118
- lhs_indices=None,
119
- rhs_indices=indices,
120
- )
121
- if "bias" in self:
122
- x = x + mx.expand_dims(self["bias"][indices], -2)
123
- return x
124
-
125
- def to_quantized(self, group_size: int = 64, bits: int = 4):
126
- num_experts, output_dims, input_dims = self.weight.shape
127
- ql = QuantizedSwitchLinear(
128
- input_dims, output_dims, num_experts, False, group_size, bits
129
- )
130
- ql.weight, ql.scales, ql.biases = mx.quantize(self.weight, group_size, bits)
131
- if "bias" in self:
132
- ql.bias = self.bias
133
- return ql
134
-
135
-
136
- class SwitchGLU(nn.Module):
137
- def __init__(
138
- self,
139
- input_dims: int,
140
- hidden_dims: int,
141
- num_experts: int,
142
- activation=nn.SiLU(),
143
- bias: bool = False,
144
- ):
145
- super().__init__()
146
-
147
- self.gate_proj = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
148
- self.up_proj = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
149
- self.down_proj = SwitchLinear(hidden_dims, input_dims, num_experts, bias=bias)
150
- self.activation = activation
151
-
152
- def __call__(self, x, indices) -> mx.array:
153
- x = mx.expand_dims(x, (-2, -3))
154
-
155
- # When we have many tokens, then sort them to make sure that the access
156
- # of different experts is in order.
157
- do_sort = indices.size >= 64
158
- idx = indices
159
- inv_order = None
160
- if do_sort:
161
- x, idx, inv_order = _gather_sort(x, indices)
162
-
163
- x_up = self.up_proj(x, idx, sorted_indices=do_sort)
164
- x_gate = self.gate_proj(x, idx, sorted_indices=do_sort)
165
- x = self.down_proj(
166
- self.activation(x_gate) * x_up,
167
- idx,
168
- sorted_indices=do_sort,
169
- )
170
-
171
- if do_sort:
172
- x = _scatter_unsort(x, inv_order, indices.shape)
173
-
174
- return x.squeeze(-2)
175
-
176
-
177
- class SwitchMLP(nn.Module):
178
- def __init__(
179
- self,
180
- input_dims: int,
181
- hidden_dims: int,
182
- num_experts: int,
183
- activation=nn.GELU(approx="precise"),
184
- bias: bool = False,
185
- ):
186
- super().__init__()
187
-
188
- self.fc1 = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
189
- self.fc2 = SwitchLinear(hidden_dims, input_dims, num_experts, bias=bias)
190
- self.activation = activation
191
-
192
- def __call__(self, x, indices) -> mx.array:
193
- x = mx.expand_dims(x, (-2, -3))
194
-
195
- # When we have many tokens, then sort them to make sure that the access
196
- # of different experts is in order.
197
- do_sort = indices.size >= 64
198
- idx = indices
199
- inv_order = None
200
- if do_sort:
201
- x, idx, inv_order = _gather_sort(x, indices)
202
-
203
- x = self.fc1(x, idx, sorted_indices=do_sort)
204
- x = self.activation(x)
205
- x = self.fc2(x, idx, sorted_indices=do_sort)
206
-
207
- if do_sort:
208
- x = _scatter_unsort(x, inv_order, indices.shape)
209
-
210
- return x.squeeze(-2)
@@ -1,8 +0,0 @@
1
- from .smolvlm import (
2
- LanguageModel,
3
- Model,
4
- ModelConfig,
5
- TextConfig,
6
- VisionConfig,
7
- VisionModel,
8
- )
@@ -1,62 +0,0 @@
1
- import mlx.core as mx
2
- import numpy as np
3
-
4
- from ..idefics3 import LanguageModel
5
- from ..idefics3 import Model as Idefics3Model
6
- from ..idefics3 import ModelConfig, TextConfig, VisionConfig, VisionModel
7
-
8
-
9
- class Model(Idefics3Model):
10
- def _prepare_inputs_for_multimodal(self, image_features, inputs_embeds, input_ids):
11
- # Assumes bs == 1
12
-
13
- B, T, D_text = inputs_embeds.shape
14
- N, S, D_img = image_features.shape
15
-
16
- image_offset = 0
17
- cur_embeds = inputs_embeds[0]
18
-
19
- # Find positions of <image> tokens in the text
20
- image_token_index = self.config.image_token_index
21
- image_positions = np.where(input_ids == image_token_index)[1].tolist()
22
- num_image_tokens = len(image_positions)
23
-
24
- # If no <image> => text-only
25
- if num_image_tokens == 0:
26
- empty_slice = image_features[0][:0, :] # shape (0, D)
27
- return mx.concatenate([cur_embeds, empty_slice], axis=0)
28
-
29
- # Typically, if each image is S embeddings, we expect the total # of <image> tokens
30
- # in this sample to be multiple of S => each group of S tokens = 1 image
31
- if num_image_tokens % S != 0:
32
- raise ValueError(
33
- f"Input has {num_image_tokens} <image> tokens, not a multiple of S={S}. "
34
- "Cannot map them to blocks of shape (S, D)."
35
- )
36
-
37
- chunks = [image_positions[i : i + S] for i in range(0, num_image_tokens, S)]
38
-
39
- segments = []
40
- text_start = 0
41
-
42
- # For each chunk (each chunk => 1 image)
43
- for chunk in chunks:
44
- cur_block = image_features[image_offset]
45
- image_offset += 1
46
-
47
- # We'll iterate over the S positions in ascending order
48
- for i_s, pos in enumerate(chunk):
49
- if pos > text_start:
50
- segments.append(cur_embeds[text_start:pos])
51
- # Then add one row from cur_block => shape (1, D)
52
- row_of_block = cur_block[i_s : i_s + 1, :]
53
- segments.append(row_of_block)
54
- text_start = pos + 1
55
-
56
- # leftover text after the final <image> token
57
- if text_start < T:
58
- segments.append(cur_embeds[text_start:])
59
-
60
- # cat them into a single (T_b, D) tensor
61
- merged_sample = mx.concatenate(segments, axis=0)
62
- return mx.expand_dims(merged_sample, axis=0)
@@ -1,209 +0,0 @@
1
- # Copied from transformers. Removed video-related code.
2
-
3
- from typing import Optional, Union
4
-
5
- import numpy as np
6
-
7
- from transformers.feature_extraction_utils import BatchFeature
8
- from transformers.image_utils import ImageInput
9
- from transformers.processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
10
- from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
11
-
12
-
13
- class Qwen2_5_VLImagesKwargs(ImagesKwargs):
14
- min_pixels: Optional[int]
15
- max_pixels: Optional[int]
16
- patch_size: Optional[int]
17
- temporal_patch_size: Optional[int]
18
- merge_size: Optional[int]
19
-
20
-
21
- class Qwen2_5_VLProcessorKwargs(ProcessingKwargs, total=False):
22
- images_kwargs: Qwen2_5_VLImagesKwargs
23
- _defaults = {
24
- "text_kwargs": {
25
- "padding": False,
26
- "return_mm_token_type_ids": False,
27
- },
28
- }
29
-
30
-
31
- class Qwen2_5_VLProcessor(ProcessorMixin):
32
- r"""
33
- Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor.
34
- [`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
35
- [`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information.
36
- Args:
37
- image_processor ([`Qwen2VLImageProcessor`], *optional*):
38
- The image processor is a required input.
39
- tokenizer ([`Qwen2TokenizerFast`], *optional*):
40
- The tokenizer is a required input.
41
- chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
42
- in a chat into a tokenizable string.
43
- """
44
-
45
- attributes = ["image_processor", "tokenizer"]
46
-
47
- image_processor_class = "AutoImageProcessor"
48
- tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
49
-
50
- def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
51
- self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
52
- self.image_token_id = (
53
- tokenizer.image_token_id
54
- if getattr(tokenizer, "image_token_id", None)
55
- else tokenizer.convert_tokens_to_ids(self.image_token)
56
- )
57
- super().__init__(image_processor, tokenizer, chat_template=chat_template)
58
-
59
- def __call__(
60
- self,
61
- images: ImageInput = None,
62
- text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
63
- **kwargs: Unpack[Qwen2_5_VLProcessorKwargs],
64
- ) -> BatchFeature:
65
- """
66
- Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
67
- and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
68
- the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
69
- Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
70
-
71
- Args:
72
- images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
73
- The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
74
- tensor. Both channels-first and channels-last formats are supported.
75
- text (`str`, `list[str]`, `list[list[str]]`):
76
- The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
77
- (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
78
- `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
79
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
80
- If set, will return tensors of a particular framework. Acceptable values are:
81
- - `'tf'`: Return TensorFlow `tf.constant` objects.
82
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
83
- - `'np'`: Return NumPy `np.ndarray` objects.
84
- - `'jax'`: Return JAX `jnp.ndarray` objects.
85
-
86
- Returns:
87
- [`BatchFeature`]: A [`BatchFeature`] with the following fields:
88
-
89
- - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
90
- - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
91
- `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
92
- `None`).
93
- - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
94
- - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
95
- """
96
- output_kwargs = self._merge_kwargs(
97
- Qwen2_5_VLProcessorKwargs,
98
- tokenizer_init_kwargs=self.tokenizer.init_kwargs,
99
- **kwargs,
100
- )
101
-
102
- image_inputs = {}
103
- if images is not None:
104
- image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
105
- image_grid_thw = image_inputs["image_grid_thw"]
106
-
107
- if not isinstance(text, list):
108
- text = [text]
109
-
110
- text = text.copy() # below lines change text in-place
111
- if images is not None:
112
- merge_length = self.image_processor.merge_size**2
113
- index = 0
114
- for i in range(len(text)):
115
- while self.image_token in text[i]:
116
- num_image_tokens = image_grid_thw[index].prod() // merge_length
117
- text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
118
- index += 1
119
- text[i] = text[i].replace("<|placeholder|>", self.image_token)
120
-
121
- return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
122
- return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
123
- text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
124
- self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
125
-
126
- if return_mm_token_type_ids:
127
- array_ids = np.array(text_inputs["input_ids"])
128
- mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
129
- mm_token_type_ids[array_ids == self.image_token_id] = 1
130
- text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
131
-
132
- return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
133
-
134
- def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
135
- """
136
- Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
137
- Args:
138
- image_sizes (`list[list[int]]`, *optional*):
139
- The input sizes formatted as (height, width) per each image.
140
- Returns:
141
- `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
142
- input modalities, along with other useful data.
143
- """
144
-
145
- vision_data = {}
146
- if image_sizes is not None:
147
- images_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get("images_kwargs", {})
148
- images_kwargs.update(kwargs)
149
- merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
150
-
151
- num_image_patches = [
152
- self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
153
- for image_size in image_sizes
154
- ]
155
- num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
156
- vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
157
-
158
- return MultiModalData(**vision_data)
159
-
160
- def batch_decode(self, *args, **kwargs):
161
- """
162
- This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
163
- refer to the docstring of this method for more information.
164
- """
165
- return self.tokenizer.batch_decode(*args, **kwargs)
166
-
167
- def decode(self, *args, **kwargs):
168
- """
169
- This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
170
- the docstring of this method for more information.
171
- """
172
- return self.tokenizer.decode(*args, **kwargs)
173
-
174
- def post_process_image_text_to_text(
175
- self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
176
- ):
177
- """
178
- Post-process the output of the model to decode the text.
179
-
180
- Args:
181
- generated_outputs (`torch.Tensor` or `np.ndarray`):
182
- The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
183
- or `(sequence_length,)`.
184
- skip_special_tokens (`bool`, *optional*, defaults to `True`):
185
- Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
186
- clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
187
- Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
188
- **kwargs:
189
- Additional arguments to be passed to the tokenizer's `batch_decode method`.
190
-
191
- Returns:
192
- `list[str]`: The decoded text.
193
- """
194
- return self.tokenizer.batch_decode(
195
- generated_outputs,
196
- skip_special_tokens=skip_special_tokens,
197
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
198
- **kwargs,
199
- )
200
-
201
- @property
202
- def model_input_names(self):
203
- tokenizer_input_names = self.tokenizer.model_input_names
204
- image_processor_input_names = self.image_processor.model_input_names
205
- names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
206
- return names_from_processor
207
-
208
-
209
- __all__ = ["Qwen2_5_VLProcessor"]