nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc9__cp310-cp310-macosx_14_0_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nexaai might be problematic. Click here for more details.

Files changed (200) hide show
  1. nexaai/_stub.cpython-310-darwin.so +0 -0
  2. nexaai/_version.py +1 -1
  3. nexaai/binds/libnexa_bridge.dylib +0 -0
  4. nexaai/mlx_backend/vlm/generate_qwen3_vl.py +14 -31
  5. nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +15 -32
  6. nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +7 -23
  7. nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +8 -24
  8. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/METADATA +1 -1
  9. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/RECORD +11 -200
  10. nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
  11. nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
  12. nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
  13. nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
  14. nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
  15. nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
  16. nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
  17. nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
  18. nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
  19. nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
  20. nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
  21. nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
  22. nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
  23. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
  24. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
  25. nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
  26. nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
  27. nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
  28. nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
  29. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
  30. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
  31. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
  32. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
  33. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
  34. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
  35. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
  36. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
  37. nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
  38. nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
  39. nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
  40. nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
  41. nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
  42. nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
  43. nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
  44. nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
  45. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
  46. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
  47. nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
  48. nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
  49. nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
  50. nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
  51. nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
  52. nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
  53. nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
  54. nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
  55. nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
  56. nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
  57. nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
  58. nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
  59. nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
  60. nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
  61. nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
  62. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
  63. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
  64. nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
  65. nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
  66. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
  67. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
  68. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
  69. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
  70. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
  71. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
  72. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
  73. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
  74. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
  75. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
  76. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
  77. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
  78. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
  79. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
  80. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
  81. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
  82. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
  83. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
  84. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
  85. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
  86. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
  87. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
  88. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
  89. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
  90. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
  91. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
  92. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
  93. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
  94. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
  95. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
  96. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
  97. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
  98. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
  99. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
  100. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
  101. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
  102. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
  103. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
  104. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
  105. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
  106. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
  107. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
  108. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
  109. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
  110. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
  111. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
  112. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
  113. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
  114. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
  115. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
  116. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
  117. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
  118. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
  119. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
  120. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
  121. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
  122. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
  123. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
  124. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
  125. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
  126. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
  127. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
  128. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
  129. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
  130. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
  131. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
  132. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
  133. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
  134. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
  135. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
  136. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
  137. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
  138. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
  139. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
  140. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
  141. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
  142. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
  143. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
  144. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
  145. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
  146. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
  147. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
  148. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
  149. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
  150. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
  151. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
  152. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
  153. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
  154. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
  155. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
  156. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
  157. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
  158. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
  159. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
  160. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
  161. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
  162. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
  163. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
  164. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
  165. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
  166. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
  167. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
  168. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
  169. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
  170. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
  171. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
  172. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
  173. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
  174. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
  175. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
  176. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
  177. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
  178. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
  179. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
  180. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
  181. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
  182. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
  183. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
  184. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
  185. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
  186. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
  187. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
  188. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
  189. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
  190. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
  191. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
  192. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
  193. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
  194. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
  195. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
  196. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
  197. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
  198. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
  199. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/WHEEL +0 -0
  200. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/top_level.txt +0 -0
@@ -1,294 +0,0 @@
1
- import argparse
2
- import json
3
- import sys
4
- import os
5
- import mlx.core as mx
6
- import mlx.nn as nn
7
- import time
8
- from PIL import Image
9
- import requests
10
- import numpy as np
11
- from pathlib import Path
12
- from huggingface_hub import snapshot_download
13
-
14
- # Add current directory to path for imports
15
- curr_dir = os.path.dirname(os.path.abspath(__file__))
16
- sys.path.append(curr_dir)
17
- sys.path.append(os.path.dirname(curr_dir))
18
-
19
- # Add the qwen3vl model directory to path
20
- qwen3vl_dir = os.path.join(curr_dir, "modeling", "models", "qwen3_vl")
21
- sys.path.append(qwen3vl_dir)
22
-
23
- # Import required modules for quantized loading
24
- from transformers import AutoTokenizer
25
-
26
- # Try relative imports first, fallback to sys.path approach for Nuitka compatibility
27
- try:
28
- from .modeling.models.qwen3_vl.llm_common.generate import nexa_generate_step
29
- from .modeling.models.qwen3_vl.llm_common.cache import make_prompt_cache
30
- from .modeling.models.qwen3_vl.qwen3vl import (
31
- VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
32
- )
33
- from .modeling.models.qwen3_vl.processor import Qwen3VLProcessor
34
- except ImportError:
35
- # Fallback for Nuitka compiled environment - use sys.path approach
36
- from llm_common.generate import nexa_generate_step
37
- from llm_common.cache import make_prompt_cache
38
- from qwen3vl import VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
39
- from processor import Qwen3VLProcessor
40
-
41
- from ml import ChatMessage
42
- from dataclasses import dataclass
43
- from typing import Any, Generator, List, Optional, Sequence, Tuple, Union
44
- from .generate import GenerationResult
45
-
46
- # Custom exception for context length exceeded
47
- class ContextLengthExceededError(Exception):
48
- """Raised when input context length exceeds model's maximum context size"""
49
- pass
50
-
51
- @dataclass
52
- class Qwen3VLBundledModel:
53
- """Container for Qwen3-VL vision and language models."""
54
- vision_model: VEGModel
55
- llm_model: LLMModel
56
-
57
-
58
- def _ensure_list(x: Union[str, List[str], None]) -> Optional[List[str]]:
59
- if x is None:
60
- return None
61
- return x if isinstance(x, list) else [x]
62
-
63
-
64
- def load_qwen3_vl(
65
- path_or_repo: str,
66
- adapter_path: Optional[str] = None,
67
- lazy: bool = False,
68
- revision: Optional[str] = None,
69
- **kwargs,
70
- ) -> Tuple[Qwen3VLBundledModel, Qwen3VLProcessor]:
71
- """Load Qwen3-VL quantized models and processor.
72
-
73
- Parameters are aligned with .generate.load for compatibility.
74
- """
75
-
76
- model_path = Path(path_or_repo)
77
- if not model_path.exists():
78
- if "/" in path_or_repo:
79
- model_path = Path(snapshot_download(
80
- repo_id=path_or_repo, repo_type="model", revision=revision))
81
- else:
82
- # Fallback to local modelfiles directory
83
- model_path = Path(qwen3vl_dir) / "modelfiles"
84
- if not model_path.exists():
85
- model_path = Path(curr_dir) / "modelfiles"
86
-
87
- # Model configs (kept identical to main)
88
- vision_config = VisionConfig(
89
- hidden_size=1024,
90
- intermediate_size=4096,
91
- num_heads=16,
92
- num_hidden_layers=24,
93
- patch_size=16,
94
- temporal_patch_size=2,
95
- in_channels=3,
96
- hidden_act="gelu",
97
- spatial_merge_size=2,
98
- out_hidden_size=2560,
99
- num_position_embeddings=2304,
100
- deepstack_visual_indexes=[5, 11, 17],
101
- )
102
-
103
- text_config = TextConfig(
104
- model_type="qwen3vl",
105
- hidden_size=2560,
106
- num_hidden_layers=36,
107
- intermediate_size=9728,
108
- num_attention_heads=32,
109
- num_key_value_heads=8,
110
- rms_norm_eps=1e-6,
111
- vocab_size=151936,
112
- max_position_embeddings=32768,
113
- rope_theta=5000000.0,
114
- head_dim=128,
115
- tie_word_embeddings=True,
116
- attention_bias=False,
117
- attention_dropout=0.0,
118
- rope_scaling={"mrope_section": [24, 20, 20],
119
- "rope_type": "default", "type": "default"},
120
- )
121
-
122
- vision_model = VEGModel(vision_config)
123
- llm_model = LLMModel(text_config)
124
-
125
- # Try to load LLM model from available files in order of preference
126
- preferred_order = [
127
- ("qwen3vl-llm-4B-q4_0.safetensors", 4),
128
- ("qwen3vl-llm-4B-q8_0.safetensors", 8),
129
- ("qwen3vl-llm-4B-f32.safetensors", 32)
130
- ]
131
-
132
- llm_weights_path = None
133
- quantization_bits = None
134
-
135
- # Try loading in order of preference
136
- for filename, bits in preferred_order:
137
- candidate_path = model_path / filename
138
- if candidate_path.exists():
139
- llm_weights_path = candidate_path
140
- quantization_bits = bits
141
- break
142
-
143
- if llm_weights_path is None:
144
- # Fallback to original hardcoded path for backward compatibility
145
- llm_weights_path = model_path / "qwen3vl-llm-4B-q4_0.safetensors"
146
- quantization_bits = 4
147
-
148
- vision_weights_path = model_path / "qwen3vl-vision-4B-f16.safetensors"
149
-
150
- if not vision_weights_path.exists() or not llm_weights_path.exists():
151
- raise FileNotFoundError(
152
- f"Missing safetensors. Vision: {vision_weights_path}, LLM: {llm_weights_path}"
153
- )
154
-
155
- # Load weights (vision fp16, llm with detected quantization)
156
- vision_model.set_dtype(mx.float16)
157
- vision_model.load_weights(str(vision_weights_path), strict=True)
158
-
159
- # Apply quantization if needed and load LLM weights
160
- if quantization_bits in [4, 8]:
161
- nn.quantize(llm_model, bits=quantization_bits, group_size=64,
162
- class_predicate=quant_predicate)
163
-
164
- llm_model.load_weights(str(llm_weights_path), strict=True)
165
-
166
- # Tokenizer and processor
167
- tokenizer = AutoTokenizer.from_pretrained(path_or_repo)
168
- processor = Qwen3VLProcessor(tokenizer=tokenizer)
169
-
170
- return Qwen3VLBundledModel(vision_model=vision_model, llm_model=llm_model), processor
171
-
172
- def apply_chat_template_qwen3_vl(messages: Sequence[ChatMessage], num_images: int = 0, num_audios: int = 0, tools: Optional[str] = None, enable_thinking: bool = False) -> str:
173
- """Apply chat template: serialize messages with content as a list of typed items."""
174
-
175
- messages_dict = []
176
- for i, msg in enumerate(messages):
177
- content_items = [{"type": "text", "text": msg.content}]
178
- messages_dict.append({"role": msg.role, "content": content_items})
179
-
180
- result = json.dumps(messages_dict)
181
-
182
- return result
183
-
184
-
185
- def stream_generate_qwen3_vl(
186
- model: Qwen3VLBundledModel,
187
- processor: Qwen3VLProcessor,
188
- prompt: str,
189
- image: Union[str, List[str]] = None,
190
- audio: Union[str, List[str]] = None,
191
- max_tokens: int = 512,
192
- **kwargs,
193
-
194
- ) -> Generator[Any, None, None]:
195
- """Stream generation yielding .generate.GenerationResult-compatible chunks."""
196
-
197
- try:
198
- messages = json.loads(prompt)
199
- except json.JSONDecodeError as e:
200
- raise
201
-
202
- if image is not None:
203
- image_list = image if isinstance(image, list) else [image]
204
- pil_images = []
205
- for i, p in enumerate(image_list):
206
- try:
207
- img = Image.open(p)
208
- pil_images.append(img)
209
- except Exception as e:
210
- continue
211
-
212
- contents = [{"type": "image", "image": img} for img in pil_images]
213
- if messages:
214
- if "content" not in messages[-1] or not isinstance(messages[-1]["content"], list):
215
- messages[-1]["content"] = []
216
- messages[-1]["content"].extend(contents)
217
-
218
- raw_text, processed_images = processor.messages_to_text(
219
- messages, add_generation_prompt=True)
220
-
221
-
222
- inputs = processor.text_to_input_ids(
223
- raw_text, images=processed_images, return_tensors="mlx")
224
-
225
- input_ids = inputs["input_ids"]
226
- pixel_values = inputs.get("pixel_values")
227
- image_grid_thw = inputs.get("image_grid_thw")
228
-
229
-
230
- # Check if input context exceeds KV cache size and raise error
231
- max_kv_size = 4096 # This should match the max_kv_size used in make_prompt_cache and nexa_generate_step
232
- if input_ids.size > max_kv_size:
233
- error_msg = f"Input context length ({input_ids.size} tokens) exceeds maximum supported context size ({max_kv_size} tokens). Please reduce the input length."
234
- raise ContextLengthExceededError(error_msg)
235
-
236
- inputs_embeds, deepstack_visual_embeds, visual_pos_masks, cos, sin, rope_deltas = handle_multimodal_embeds(
237
- model.vision_model, model.llm_model, input_ids, pixel_values, image_grid_thw
238
- )
239
-
240
-
241
- prompt_cache = make_prompt_cache(model.llm_model, max_kv_size=4096)
242
- tokenizer = processor.tokenizer
243
-
244
- # Rough prompt TPS estimation based on input size
245
- prompt_start = time.perf_counter()
246
- prompt_tps = input_ids.size / max(1e-6, (time.perf_counter() - prompt_start))
247
-
248
- gen_count = 0
249
- tic = time.perf_counter()
250
-
251
-
252
- try:
253
- for token, logprobs in nexa_generate_step(
254
- model=model.llm_model,
255
- prompt=None,
256
- input_embeddings=inputs_embeds,
257
- max_tokens=max_tokens,
258
- max_kv_size=4096,
259
- prompt_cache=prompt_cache,
260
- visual_pos_masks=visual_pos_masks,
261
- deepstack_visual_embeds=deepstack_visual_embeds,
262
- cos=cos,
263
- sin=sin,
264
- rope_deltas=rope_deltas,
265
- ):
266
- if token == tokenizer.eos_token_id:
267
- break
268
-
269
- text_piece = tokenizer.decode([token])
270
- gen_count += 1
271
-
272
- current_tps = gen_count / max(1e-6, (time.perf_counter() - tic))
273
-
274
- yield GenerationResult(
275
- text=text_piece,
276
- token=token,
277
- logprobs=logprobs,
278
- prompt_tokens=int(input_ids.size),
279
- generation_tokens=gen_count,
280
- prompt_tps=float(prompt_tps),
281
- generation_tps=float(current_tps),
282
- peak_memory=float(mx.get_peak_memory() / 1e9),
283
- )
284
- except Exception as e:
285
- import traceback
286
- traceback.print_exc()
287
- raise
288
-
289
-
290
- def quant_predicate(path: str, mod: nn.Module) -> bool:
291
- """Quantization predicate to exclude certain layers from quantization."""
292
- if path.endswith("lm_head") or "norm" in path.lower() or "embed" in path.lower():
293
- return False
294
- return isinstance(mod, (nn.Linear, nn.Embedding))
@@ -1,276 +0,0 @@
1
- import argparse
2
- import json
3
- import sys
4
- import os
5
- import mlx.core as mx
6
- import mlx.nn as nn
7
- import time
8
- from PIL import Image
9
- import requests
10
- import numpy as np
11
- from pathlib import Path
12
- from huggingface_hub import snapshot_download
13
-
14
- # Add current directory to path for imports
15
- curr_dir = os.path.dirname(os.path.abspath(__file__))
16
- sys.path.append(curr_dir)
17
- sys.path.append(os.path.dirname(curr_dir))
18
-
19
- # Add the qwen3vl model directory to path
20
- qwen3vl_dir = os.path.join(curr_dir, "modeling", "models", "qwen3vl_moe")
21
- sys.path.append(qwen3vl_dir)
22
-
23
- # Import required modules for quantized loading
24
- from transformers import AutoTokenizer
25
-
26
- # Try relative imports first, fallback to sys.path approach for Nuitka compatibility
27
- try:
28
- from .modeling.models.qwen3_vl_moe.llm_common.generate import nexa_generate_step
29
- from .modeling.models.qwen3_vl_moe.llm_common.cache import make_prompt_cache
30
- from .modeling.models.qwen3_vl_moe.qwen3vl_moe import (
31
- VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
32
- )
33
- from .modeling.models.qwen3_vl_moe.processor import Qwen3VLProcessor
34
- except ImportError:
35
- # Fallback for Nuitka compiled environment - use sys.path approach
36
- from llm_common.generate import nexa_generate_step
37
- from llm_common.cache import make_prompt_cache
38
- from qwen3vl_moe import VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
39
- from processor import Qwen3VLProcessor
40
-
41
- from ml import ChatMessage
42
- from dataclasses import dataclass
43
- from typing import Any, Generator, List, Optional, Sequence, Tuple, Union
44
- from .generate import GenerationResult
45
-
46
- @dataclass
47
- class Qwen3VLBundledModel:
48
- """Container for Qwen3-VL MoE vision and language models."""
49
- vision_model: VEGModel
50
- llm_model: LLMModel
51
-
52
-
53
- def _ensure_list(x: Union[str, List[str], None]) -> Optional[List[str]]:
54
- if x is None:
55
- return None
56
- return x if isinstance(x, list) else [x]
57
-
58
-
59
- def load_qwen3_vl(
60
- path_or_repo: str,
61
- adapter_path: Optional[str] = None,
62
- lazy: bool = False,
63
- revision: Optional[str] = None,
64
- **kwargs,
65
- ) -> Tuple[Qwen3VLBundledModel, Qwen3VLProcessor]:
66
- """Load Qwen3-VL MoE quantized models and processor.
67
-
68
- Parameters are aligned with .generate.load for compatibility.
69
- """
70
- model_path = Path(path_or_repo)
71
- if not model_path.exists():
72
- if "/" in path_or_repo:
73
- model_path = Path(snapshot_download(
74
- repo_id=path_or_repo, repo_type="model", revision=revision))
75
- else:
76
- # Fallback to local modelfiles directory
77
- model_path = Path(qwen3vl_dir) / "modelfiles"
78
- if not model_path.exists():
79
- model_path = Path(curr_dir) / "modelfiles"
80
-
81
- # Model configs - Updated to match Qwen3VL-MoE specifications
82
- vision_config = VisionConfig(
83
- hidden_size=1152,
84
- intermediate_size=4304,
85
- num_heads=16,
86
- num_hidden_layers=27,
87
- patch_size=16,
88
- temporal_patch_size=2,
89
- in_channels=3,
90
- hidden_act="gelu_pytorch_tanh",
91
- spatial_merge_size=2,
92
- out_hidden_size=2048,
93
- num_position_embeddings=2304,
94
- deepstack_visual_indexes=[8, 16, 24],
95
- )
96
-
97
- text_config = TextConfig(
98
- model_type="qwen3_vl_moe_text",
99
- hidden_size=2048,
100
- num_hidden_layers=48,
101
- intermediate_size=6144,
102
- num_attention_heads=32,
103
- num_key_value_heads=4,
104
- rms_norm_eps=1e-6,
105
- vocab_size=152064,
106
- max_position_embeddings=128000,
107
- rope_theta=1000000.0,
108
- head_dim=128,
109
- tie_word_embeddings=False,
110
- attention_bias=False,
111
- attention_dropout=0.0,
112
- rope_scaling={
113
- "mrope_interleaved": True,
114
- "mrope_section": [24, 20, 20],
115
- "rope_type": "default"
116
- },
117
- # MoE specific parameters
118
- num_experts=128,
119
- num_experts_per_tok=8,
120
- moe_intermediate_size=768,
121
- shared_expert_intermediate_size=0,
122
- norm_topk_prob=True,
123
- decoder_sparse_step=1,
124
- max_window_layers=48,
125
- sliding_window=32768,
126
- mlp_only_layers=[],
127
- use_qk_norm=True,
128
- layer_types=[],
129
- )
130
-
131
- vision_model = VEGModel(vision_config)
132
- llm_model = LLMModel(text_config)
133
-
134
- # Try to load LLM model from available files in order of preference
135
- preferred_order = [
136
- ("qwen3vl-moe-llm-30B-A3B-q4_0.safetensors", 4),
137
- ("qwen3vl-moe-llm-30B-A3B-q8_0.safetensors", 8),
138
- ("qwen3vl-moe-llm-30B-A3B-f32.safetensors", 32),
139
- ]
140
-
141
- llm_weights_path = None
142
- quantization_bits = None
143
-
144
- # Try loading in order of preference
145
- for filename, bits in preferred_order:
146
- candidate_path = model_path / filename
147
- if candidate_path.exists():
148
- llm_weights_path = candidate_path
149
- quantization_bits = bits
150
- break
151
-
152
- if llm_weights_path is None:
153
- # Fallback to original hardcoded path for backward compatibility
154
- llm_weights_path = model_path / "qwen3vl-moe-llm-30B-A3B-q4_0.safetensors"
155
- quantization_bits = 4
156
-
157
- vision_weights_path = model_path / "qwen3vl-moe-vision-30B-A3B-f16.safetensors"
158
-
159
- if not vision_weights_path.exists():
160
- raise FileNotFoundError(
161
- f"Missing vision weights: {vision_weights_path}"
162
- )
163
-
164
- # Load weights (vision fp16, llm with detected quantization)
165
- vision_model.set_dtype(mx.float16)
166
- vision_model.load_weights(str(vision_weights_path), strict=True)
167
-
168
- # Apply quantization if needed and load LLM weights
169
- if quantization_bits in [4, 8]:
170
- nn.quantize(llm_model, bits=quantization_bits, group_size=64,
171
- class_predicate=quant_predicate)
172
- # For f32 (32-bit), no quantization needed
173
-
174
- llm_model.load_weights(str(llm_weights_path), strict=True)
175
-
176
- # Tokenizer and processor
177
- tokenizer = AutoTokenizer.from_pretrained(path_or_repo)
178
- processor = Qwen3VLProcessor(tokenizer=tokenizer)
179
-
180
- return Qwen3VLBundledModel(vision_model=vision_model, llm_model=llm_model), processor
181
-
182
- def apply_chat_template_qwen3_vl(messages: Sequence[ChatMessage], num_images: int = 0, num_audios: int = 0, tools: Optional[str] = None, enable_thinking: bool = False) -> str:
183
- """Apply chat template: serialize messages with content as a list of typed items."""
184
- messages_dict = []
185
- for msg in messages:
186
- content_items = [{"type": "text", "text": msg.content}]
187
- messages_dict.append({"role": msg.role, "content": content_items})
188
- return json.dumps(messages_dict)
189
-
190
-
191
- def stream_generate_qwen3_vl(
192
- model: Qwen3VLBundledModel,
193
- processor: Qwen3VLProcessor,
194
- prompt: str,
195
- image: Union[str, List[str]] = None,
196
- audio: Union[str, List[str]] = None,
197
- max_tokens: int = 512,
198
- **kwargs,
199
-
200
- ) -> Generator[Any, None, None]:
201
- """Stream generation yielding .generate.GenerationResult-compatible chunks."""
202
- messages = json.loads(prompt)
203
- if image is not None:
204
- image_list = image if isinstance(image, list) else [image]
205
- pil_images = []
206
- for p in image_list:
207
- try:
208
- pil_images.append(Image.open(p))
209
- except Exception:
210
- continue
211
- contents = [{"type": "image", "image": img} for img in pil_images]
212
- if messages:
213
- if "content" not in messages[-1] or not isinstance(messages[-1]["content"], list):
214
- messages[-1]["content"] = []
215
- messages[-1]["content"].extend(contents)
216
-
217
- raw_text, processed_images = processor.messages_to_text(
218
- messages, add_generation_prompt=True)
219
-
220
- inputs = processor.text_to_input_ids(
221
- raw_text, images=processed_images, return_tensors="mlx")
222
-
223
- input_ids = inputs["input_ids"]
224
- pixel_values = inputs.get("pixel_values")
225
- image_grid_thw = inputs.get("image_grid_thw")
226
-
227
- inputs_embeds, deepstack_visual_embeds, visual_pos_masks, cos, sin, rope_deltas = handle_multimodal_embeds(
228
- model.vision_model, model.llm_model, input_ids, pixel_values, image_grid_thw
229
- )
230
-
231
- prompt_cache = make_prompt_cache(model.llm_model, max_kv_size=4096)
232
- tokenizer = processor.tokenizer
233
-
234
- # Rough prompt TPS estimation based on input size
235
- prompt_start = time.perf_counter()
236
- prompt_tps = input_ids.size / max(1e-6, (time.perf_counter() - prompt_start))
237
-
238
- gen_count = 0
239
- tic = time.perf_counter()
240
-
241
- for token, logprobs in nexa_generate_step(
242
- model=model.llm_model,
243
- prompt=None,
244
- input_embeddings=inputs_embeds,
245
- max_tokens=max_tokens,
246
- max_kv_size=4096,
247
- prompt_cache=prompt_cache,
248
- visual_pos_masks=visual_pos_masks,
249
- deepstack_visual_embeds=deepstack_visual_embeds,
250
- cos=cos,
251
- sin=sin,
252
- rope_deltas=rope_deltas,
253
- ):
254
- if token == tokenizer.eos_token_id:
255
- break
256
-
257
- text_piece = tokenizer.decode([token])
258
- gen_count += 1
259
-
260
- yield GenerationResult(
261
- text=text_piece,
262
- token=token,
263
- logprobs=logprobs,
264
- prompt_tokens=int(input_ids.size),
265
- generation_tokens=gen_count,
266
- prompt_tps=float(prompt_tps),
267
- generation_tps=float(
268
- gen_count / max(1e-6, (time.perf_counter() - tic))),
269
- peak_memory=float(mx.get_peak_memory() / 1e9),
270
- )
271
-
272
- def quant_predicate(path: str, mod: nn.Module) -> bool:
273
- """Quantization predicate to exclude certain layers from quantization."""
274
- if path.endswith("lm_head") or "norm" in path.lower() or "embed" in path.lower():
275
- return False
276
- return isinstance(mod, (nn.Linear, nn.Embedding))