nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc8__cp310-cp310-macosx_14_0_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nexaai might be problematic. Click here for more details.

Files changed (196) hide show
  1. nexaai/_stub.cpython-310-darwin.so +0 -0
  2. nexaai/_version.py +1 -1
  3. nexaai/binds/libnexa_bridge.dylib +0 -0
  4. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/METADATA +1 -1
  5. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/RECORD +7 -196
  6. nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
  7. nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
  8. nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
  9. nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
  10. nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
  11. nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
  12. nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
  13. nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
  14. nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
  15. nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
  16. nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
  17. nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
  18. nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
  19. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
  20. nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
  21. nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
  22. nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
  23. nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
  24. nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
  25. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
  26. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
  27. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
  28. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
  29. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
  30. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
  31. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
  32. nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
  33. nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
  34. nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
  35. nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
  36. nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
  37. nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
  38. nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
  39. nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
  40. nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
  41. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
  42. nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
  43. nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
  44. nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
  45. nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
  46. nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
  47. nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
  48. nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
  49. nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
  50. nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
  51. nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
  52. nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
  53. nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
  54. nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
  55. nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
  56. nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
  57. nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
  58. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
  59. nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
  60. nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
  61. nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
  62. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
  63. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
  64. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
  65. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
  66. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
  67. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
  68. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
  69. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
  70. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
  71. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
  72. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
  73. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
  74. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
  75. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
  76. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
  77. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
  78. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
  79. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
  80. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
  81. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
  82. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
  83. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
  84. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
  85. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
  86. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
  87. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
  88. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
  89. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
  90. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
  91. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
  92. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
  93. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
  94. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
  95. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
  96. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
  97. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
  98. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
  99. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
  100. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
  101. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
  102. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
  103. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
  104. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
  105. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
  106. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
  107. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
  108. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
  109. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
  110. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
  111. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
  112. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
  113. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
  114. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
  115. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
  116. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
  117. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
  118. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
  119. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
  120. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
  121. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
  122. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
  123. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
  124. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
  125. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
  126. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
  127. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
  128. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
  129. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
  130. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
  131. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
  132. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
  133. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
  134. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
  135. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
  136. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
  137. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
  138. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
  139. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
  140. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
  141. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
  142. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
  143. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
  144. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
  145. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
  146. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
  147. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
  148. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
  149. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
  150. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
  151. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
  152. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
  153. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
  154. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
  155. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
  156. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
  157. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
  158. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
  159. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
  160. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
  161. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
  162. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
  163. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
  164. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
  165. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
  166. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
  167. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
  168. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
  169. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
  170. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
  171. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
  172. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
  173. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
  174. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
  175. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
  176. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
  177. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
  178. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
  179. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
  180. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
  181. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
  182. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
  183. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
  184. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
  185. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
  186. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
  187. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
  188. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
  189. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
  190. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
  191. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
  192. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
  193. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
  194. nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
  195. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/WHEEL +0 -0
  196. {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/top_level.txt +0 -0
@@ -1,476 +0,0 @@
1
- from typing import Any, Dict, List, Optional, Union
2
- import mlx.core as mx
3
- import numpy as np
4
- from PIL import Image
5
- import io
6
- import base64
7
-
8
-
9
- class Qwen3VLProcessor:
10
- def __init__(self, tokenizer=None, image_processor=None):
11
- self.tokenizer = tokenizer
12
- self.image_processor = image_processor
13
-
14
- # Vision tokens (following the official implementation)
15
- self.image_token = "<|image_pad|>"
16
- self.vision_start_token = "<|vision_start|>"
17
- self.vision_end_token = "<|vision_end|>"
18
-
19
- # Token IDs (will be set properly if tokenizer is provided)
20
- if tokenizer:
21
- self.image_token_id = getattr(tokenizer, 'image_token_id',
22
- tokenizer.convert_tokens_to_ids(self.image_token))
23
- self.vision_start_token_id = getattr(tokenizer, 'vision_start_token_id',
24
- tokenizer.convert_tokens_to_ids(self.vision_start_token))
25
- self.vision_end_token_id = getattr(tokenizer, 'vision_end_token_id',
26
- tokenizer.convert_tokens_to_ids(self.vision_end_token))
27
- else:
28
- # Fallback IDs for when no tokenizer is provided
29
- self.image_token_id = 151655
30
- self.vision_start_token_id = 151652
31
- self.vision_end_token_id = 151653
32
-
33
- # Image processing parameters (following Qwen3VL defaults)
34
- self.min_pixels = 4096
35
- self.max_pixels = 16777216
36
- self.patch_size = 16
37
- self.merge_size = 2
38
- self.temporal_patch_size = 2
39
-
40
- # Add the missing image_mean and image_std
41
- self.image_mean = [0.5, 0.5, 0.5]
42
- self.image_std = [0.5, 0.5, 0.5]
43
-
44
- def _extract_patches(self, image_array: np.ndarray) -> np.ndarray:
45
- """
46
- Extract patches from image array to create proper tensor for Conv3d.
47
-
48
- Args:
49
- image_array: Shape (C, H, W)
50
-
51
- Returns:
52
- patches: Flattened tensor that can be reshaped to
53
- (num_patches, C, temporal_patch_size, patch_size, patch_size)
54
- """
55
- C, H, W = image_array.shape
56
-
57
- # Calculate number of patches
58
- patch_h = H // self.patch_size
59
- patch_w = W // self.patch_size
60
-
61
- # Extract spatial patches
62
- # Reshape to (C, patch_h, patch_size, patch_w, patch_size)
63
- patches = image_array.reshape(
64
- C, patch_h, self.patch_size, patch_w, self.patch_size
65
- )
66
-
67
- # Rearrange to (patch_h, patch_w, C, patch_size, patch_size)
68
- patches = patches.transpose(1, 3, 0, 2, 4)
69
-
70
- # Reshape to (patch_h * patch_w, C, patch_size, patch_size)
71
- num_patches = patch_h * patch_w
72
- patches = patches.reshape(num_patches, C, self.patch_size, self.patch_size)
73
-
74
- # Add temporal dimension by duplicating the patches
75
- # Shape: (num_patches, C, temporal_patch_size, patch_size, patch_size)
76
- patches = np.tile(patches[:, :, None, :, :], (1, 1, self.temporal_patch_size, 1, 1))
77
-
78
- return patches
79
-
80
- def _process_single_image(self, image: Union[str, Image.Image, np.ndarray]) -> Dict[str, Any]:
81
- """Process a single image and return processed data."""
82
- if isinstance(image, str):
83
- if image.startswith('data:image'):
84
- image_data = base64.b64decode(image.split(',')[1])
85
- image = Image.open(io.BytesIO(image_data))
86
- else:
87
- image = Image.open(image)
88
- elif isinstance(image, np.ndarray):
89
- image = Image.fromarray(image)
90
-
91
- if image.mode != 'RGB':
92
- image = image.convert('RGB')
93
-
94
- # Resize image based on pixel constraints
95
- width, height = image.size
96
- pixels = width * height
97
-
98
- if pixels < self.min_pixels:
99
- scale = (self.min_pixels / pixels) ** 0.5
100
- width = int(width * scale)
101
- height = int(height * scale)
102
- elif pixels > self.max_pixels:
103
- scale = (self.max_pixels / pixels) ** 0.5
104
- width = int(width * scale)
105
- height = int(height * scale)
106
-
107
- # Ensure dimensions are multiples of patch_size AND work with merge_size
108
- # Use fraction-based rounding to match PyTorch behavior
109
- import math
110
-
111
- width_frac = (width / self.patch_size) % 1
112
- height_frac = (height / self.patch_size) % 1
113
-
114
- # Round up if fraction >= 0.3, otherwise round down
115
- # This matches the observed PyTorch processor behavior
116
- if width_frac >= 0.3:
117
- width = math.ceil(width / self.patch_size) * self.patch_size
118
- else:
119
- width = (width // self.patch_size) * self.patch_size
120
-
121
- if height_frac >= 0.3:
122
- height = math.ceil(height / self.patch_size) * self.patch_size
123
- else:
124
- height = (height // self.patch_size) * self.patch_size
125
-
126
- # CRITICAL: Ensure patch dimensions are even for 2x2 merging
127
- # If either dimension is odd, add one more patch to make it even
128
- h_patches = height // self.patch_size
129
- w_patches = width // self.patch_size
130
-
131
- if h_patches % 2 == 1:
132
- height += self.patch_size # Add one more patch row
133
-
134
- if w_patches % 2 == 1:
135
- width += self.patch_size # Add one more patch column
136
-
137
- if width == 0 or height == 0:
138
- width = height = self.patch_size
139
-
140
- image = image.resize((width, height), Image.Resampling.LANCZOS)
141
-
142
- # Convert to array and normalize
143
- image_array = np.array(image).astype(np.float32) / 255.0
144
-
145
- # Qwen3VL normalization
146
- mean = np.array(self.image_mean)
147
- std = np.array(self.image_std)
148
- image_array = (image_array - mean) / std
149
-
150
- # Convert HWC to CHW
151
- image_array = np.transpose(image_array, (2, 0, 1))
152
-
153
- # Calculate grid dimensions
154
- h_patches = height // self.patch_size
155
- w_patches = width // self.patch_size
156
-
157
- # Extract patches using the exact same method as PyTorch Conv3d unfold
158
- C, H, W = image_array.shape
159
-
160
- # Reshape to extract patches: (C, H//patch_size, patch_size, W//patch_size, patch_size)
161
- patches = image_array.reshape(C, h_patches, self.patch_size, w_patches, self.patch_size)
162
-
163
- # Rearrange to group patches: (h_patches, w_patches, C, patch_size, patch_size)
164
- patches = patches.transpose(1, 3, 0, 2, 4)
165
-
166
- # Flatten spatial patches: (h_patches * w_patches, C, patch_size, patch_size)
167
- patches = patches.reshape(-1, C, self.patch_size, self.patch_size)
168
-
169
- # Add temporal dimension: (num_patches, C, T, patch_size, patch_size)
170
- patches_with_temporal = np.tile(patches[:, :, None, :, :], (1, 1, self.temporal_patch_size, 1, 1))
171
-
172
- # Flatten each patch in the order: C, T, H, W to match PyTorch Conv3d
173
- pixel_values = patches_with_temporal.reshape(patches_with_temporal.shape[0], -1)
174
-
175
- # Apply spatial merging reordering to match PyTorch processor
176
- # Group patches into merge_size x merge_size blocks and reorder
177
- pixel_values = pixel_values.reshape(h_patches // self.merge_size, self.merge_size,
178
- w_patches // self.merge_size, self.merge_size, -1)
179
- # Rearrange to (h_blocks, w_blocks, merge_size*merge_size, feature_dim)
180
- pixel_values = pixel_values.transpose(0, 2, 1, 3, 4)
181
- pixel_values = pixel_values.reshape(h_patches // self.merge_size,
182
- w_patches // self.merge_size,
183
- self.merge_size * self.merge_size, -1)
184
- # Flatten to (total_merged_patches, feature_dim)
185
- pixel_values = pixel_values.reshape(-1, pixel_values.shape[-1])
186
-
187
- return {
188
- 'pixel_values': pixel_values, # Shape: (num_patches, 1536)
189
- 'grid_thw': [1, h_patches, w_patches] # T=1 for images
190
- }
191
-
192
- def _insert_image_tokens(self, text: str, image_grid_thw: List[List[int]]) -> str:
193
- """Insert the correct number of image tokens based on grid dimensions."""
194
- if not image_grid_thw:
195
- return text
196
-
197
- merge_length = self.merge_size ** 2
198
- index = 0
199
-
200
- while self.image_token in text and index < len(image_grid_thw):
201
- # Calculate number of tokens needed for this image
202
- t, h, w = image_grid_thw[index]
203
- num_image_tokens = (t * h * w) // merge_length
204
-
205
- # Replace one image token with the calculated number of tokens
206
- text = text.replace(self.image_token, self.image_token * num_image_tokens, 1)
207
- index += 1
208
-
209
- return text
210
-
211
- def __call__(
212
- self,
213
- text: Union[str, List[str]] = None,
214
- images: Union[Image.Image, List[Image.Image], str, List[str], np.ndarray, List[np.ndarray]] = None,
215
- return_tensors: str = "mlx",
216
- **kwargs
217
- ) -> Dict[str, mx.array]:
218
- """
219
- Process text and images for Qwen3VL model.
220
-
221
- Returns:
222
- Dict containing:
223
- - input_ids: Tokenized text with proper image tokens
224
- - pixel_values: Processed image patches (if images provided)
225
- - image_grid_thw: Grid dimensions for images (if images provided)
226
- """
227
- result = {}
228
-
229
- # Process images first
230
- grid_thw_list = None
231
- if images is not None:
232
- if not isinstance(images, list):
233
- images = [images]
234
-
235
- # Check if images list is not empty
236
- if len(images) > 0:
237
- if self.image_processor is not None:
238
- image_inputs = self.image_processor(images=images, return_tensors="np")
239
- result["pixel_values"] = mx.array(image_inputs["pixel_values"])
240
- result["image_grid_thw"] = mx.array(image_inputs["image_grid_thw"])
241
- grid_thw_list = image_inputs["image_grid_thw"].tolist()
242
- else:
243
- processed_patches = []
244
- grid_thw_list = []
245
- for image in images:
246
- processed = self._process_single_image(image)
247
- processed_patches.append(processed["pixel_values"])
248
- grid_thw_list.append(processed["grid_thw"])
249
- all_patches = np.concatenate(processed_patches, axis=0)
250
- result["pixel_values"] = mx.array(all_patches)
251
- result["image_grid_thw"] = mx.array(np.array(grid_thw_list))
252
-
253
- # Process text
254
- if text is not None:
255
- if not isinstance(text, list):
256
- text = [text]
257
- text = text.copy()
258
- if grid_thw_list is not None:
259
- for i in range(len(text)):
260
- text[i] = self._insert_image_tokens(text[i], grid_thw_list)
261
- if self.tokenizer:
262
- text_inputs = self.tokenizer(text, return_tensors="np", **kwargs)
263
- result["input_ids"] = mx.array(text_inputs["input_ids"])
264
- if "attention_mask" in text_inputs:
265
- result["attention_mask"] = mx.array(text_inputs["attention_mask"])
266
- else:
267
- all_tokens = []
268
- for t in text:
269
- tokens = [hash(word) % 50000 for word in t.split()]
270
- all_tokens.append(tokens)
271
- max_len = max(len(tokens) for tokens in all_tokens)
272
- padded_tokens = []
273
- for tokens in all_tokens:
274
- padded = tokens + [0] * (max_len - len(tokens))
275
- padded_tokens.append(padded)
276
- result["input_ids"] = mx.array(np.array(padded_tokens))
277
-
278
- return result
279
-
280
- def _extract_images_and_text_from_messages(self, messages: List[Dict]) -> tuple:
281
- """Extract images and text from message format."""
282
- images = []
283
- text_parts = []
284
-
285
- for message in messages:
286
- role = message.get("role", "user")
287
- content = message.get("content", [])
288
-
289
- if isinstance(content, str):
290
- # Simple text content
291
- text_parts.append({"role": role, "content": content})
292
- elif isinstance(content, list):
293
- # Multi-modal content
294
- message_text_parts = []
295
- for item in content:
296
- if item.get("type") == "image":
297
- images.append(item.get("image"))
298
- message_text_parts.append("<|vision_start|><|image_pad|><|vision_end|>")
299
- elif item.get("type") == "text":
300
- message_text_parts.append(item.get("text", ""))
301
-
302
- combined_text = "".join(message_text_parts)
303
- text_parts.append({"role": role, "content": combined_text})
304
-
305
- return images, text_parts
306
-
307
- def apply_chat_template(
308
- self,
309
- messages: List[Dict],
310
- add_generation_prompt: bool = True,
311
- tokenize: bool = False,
312
- **kwargs
313
- ) -> str:
314
- """Apply chat template to messages."""
315
- # Handle multi-modal messages
316
- if any(isinstance(msg.get("content"), list) for msg in messages):
317
- _, text_messages = self._extract_images_and_text_from_messages(messages)
318
- messages = text_messages
319
-
320
- if not self.tokenizer:
321
- # Fallback chat template
322
- formatted_messages = []
323
- for msg in messages:
324
- role = msg.get("role", "user")
325
- content = msg.get("content", "")
326
- formatted_messages.append(f"<|im_start|>{role}\n{content}<|im_end|>")
327
-
328
- result = "\n".join(formatted_messages)
329
- if add_generation_prompt:
330
- result += "\n<|im_start|>assistant\n"
331
- return result
332
-
333
- # Use tokenizer and manually remove system message to match ground truth
334
- result = self.tokenizer.apply_chat_template(
335
- messages,
336
- add_generation_prompt=add_generation_prompt,
337
- tokenize=tokenize,
338
- **kwargs
339
- )
340
-
341
- # Remove system message to match ground truth format
342
- system_prefix = '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n'
343
- if result.startswith(system_prefix):
344
- result = result[len(system_prefix):]
345
-
346
- return result
347
-
348
- def messages_to_text(
349
- self,
350
- messages: List[Dict],
351
- add_generation_prompt: bool = True,
352
- **kwargs
353
- ) -> tuple:
354
- """
355
- Step 1: Convert multi-modal messages to text format.
356
-
357
- Args:
358
- messages: List of message dicts with role and content
359
- add_generation_prompt: Whether to add generation prompt
360
- **kwargs: Additional arguments
361
-
362
- Returns:
363
- Tuple of (text, images) where text is the formatted string and images is list of image objects
364
- """
365
- # Extract images and text from messages
366
- images, text_messages = self._extract_images_and_text_from_messages(messages)
367
-
368
- # Apply chat template
369
- text = self.apply_chat_template(
370
- text_messages,
371
- add_generation_prompt=add_generation_prompt,
372
- tokenize=False,
373
- **kwargs
374
- )
375
-
376
- # Load images from URLs if needed
377
- processed_images = []
378
- for img in images:
379
- if isinstance(img, str) and (img.startswith('http://') or img.startswith('https://')):
380
- # Load image from URL
381
- import requests
382
- from io import BytesIO
383
- try:
384
- response = requests.get(img, stream=True, timeout=10)
385
- img = Image.open(BytesIO(response.content))
386
- except Exception as e:
387
- raise ValueError(f"Failed to load image from URL {img}: {e}")
388
- processed_images.append(img)
389
-
390
- return text, processed_images
391
-
392
- def text_to_input_ids(
393
- self,
394
- text: str,
395
- images: List = None,
396
- return_tensors: str = "mlx",
397
- **kwargs
398
- ) -> Dict[str, Any]:
399
- """
400
- Step 2: Process text and images into input_ids and pixel_values.
401
-
402
- Args:
403
- text: Formatted text string (from messages_to_text)
404
- images: List of image objects
405
- return_tensors: Format of returned tensors
406
- **kwargs: Additional arguments
407
-
408
- Returns:
409
- Dict with input_ids, pixel_values, image_grid_thw
410
- """
411
- return self(
412
- text=[text],
413
- images=images,
414
- return_tensors=return_tensors,
415
- **kwargs
416
- )
417
-
418
- def process_messages(
419
- self,
420
- messages: List[Dict],
421
- add_generation_prompt: bool = True,
422
- return_tensors: str = "mlx",
423
- **kwargs
424
- ) -> Dict[str, Any]:
425
- """
426
- Process multi-modal messages end-to-end (combines messages_to_text + text_to_input_ids).
427
-
428
- Args:
429
- messages: List of message dicts with role and content
430
- add_generation_prompt: Whether to add generation prompt
431
- return_tensors: Format of returned tensors
432
- **kwargs: Additional arguments
433
-
434
- Returns:
435
- Dict with input_ids, pixel_values, image_grid_thw
436
- """
437
- # Step 1: Convert messages to text
438
- text, processed_images = self.messages_to_text(
439
- messages,
440
- add_generation_prompt=add_generation_prompt,
441
- **kwargs
442
- )
443
-
444
- # Step 2: Convert text to input_ids
445
- return self.text_to_input_ids(
446
- text,
447
- images=processed_images,
448
- return_tensors=return_tensors,
449
- **kwargs
450
- )
451
-
452
- def post_process_image_text_to_text(
453
- self,
454
- generated_outputs,
455
- skip_special_tokens: bool = True,
456
- **kwargs
457
- ) -> List[str]:
458
- """Decode generated token IDs back to text."""
459
- if self.tokenizer:
460
- if hasattr(generated_outputs, 'tolist'):
461
- generated_outputs = generated_outputs.tolist()
462
-
463
- return self.tokenizer.batch_decode(
464
- generated_outputs,
465
- skip_special_tokens=skip_special_tokens,
466
- **kwargs
467
- )
468
- else:
469
- # Fallback decoding
470
- return ["[Decoded text - tokenizer not available]"] * len(generated_outputs)
471
-
472
-
473
- # Convenience function
474
- def create_qwen3vl_processor(tokenizer=None, image_processor=None):
475
- """Create a Qwen3VL processor instance."""
476
- return Qwen3VLProcessor(tokenizer=tokenizer, image_processor=image_processor)