nexaai 1.0.19rc6__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-base.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-cpu.so +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-metal.so +0 -0
- nexaai/binds/nexa_llama_cpp/libggml.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libllama.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libmtmd.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexa_mlx/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +12 -0
- nexaai/binds/nexa_mlx/py-lib/asr/interface.py +122 -0
- nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/common/utils.py +25 -0
- nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/cv/generate.py +195 -0
- nexaai/binds/nexa_mlx/py-lib/cv/interface.py +151 -0
- nexaai/binds/nexa_mlx/py-lib/cv/main.py +81 -0
- nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +1736 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +333 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +617 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/main.py +173 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +399 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +1 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +244 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +82 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +281 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +306 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +116 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +65 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +386 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +105 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +100 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +460 -0
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +274 -0
- nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/llm/generate.py +149 -0
- nexaai/binds/nexa_mlx/py-lib/llm/interface.py +764 -0
- nexaai/binds/nexa_mlx/py-lib/llm/main.py +68 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +174 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +287 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/main.py +127 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +330 -0
- nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +1 -0
- nexaai/binds/nexa_mlx/py-lib/sd/interface.py +362 -0
- nexaai/binds/nexa_mlx/py-lib/sd/main.py +286 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +306 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +116 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +65 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +385 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +105 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +100 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +460 -0
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +274 -0
- nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +12 -0
- nexaai/binds/nexa_mlx/py-lib/tts/interface.py +276 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +3 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +572 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +294 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +276 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +504 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/main.py +320 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +68 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +193 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +186 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +233 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +503 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +202 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +230 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +10 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +264 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +472 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +591 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +526 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +356 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +366 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +488 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +591 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +213 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +315 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +238 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +2 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +1038 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +139 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +322 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +629 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +1022 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +9 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +294 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +191 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +267 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +175 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +192 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +233 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +9 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +140 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +220 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +393 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +293 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +307 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +143 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +509 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +522 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +386 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +138 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +560 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +240 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +153 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +259 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +9 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +236 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +256 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +303 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +230 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +160 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +243 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +283 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +416 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +172 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +499 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +243 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +133 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +465 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +10 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +230 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +385 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +557 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +526 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +282 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +160 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +242 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +21 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +243 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +71 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +324 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +229 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +161 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +320 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +2 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +108 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +490 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +168 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +414 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +2 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +104 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +490 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +167 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +312 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +117 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +531 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +701 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +255 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +303 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +407 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +476 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +1223 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1309 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +8 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +62 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +209 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +215 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +474 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +39 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +344 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +9 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +70 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +296 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +160 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +928 -0
- nexaai/binds/nexa_nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexa_nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexa_nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexa_nexaml/libggml.dylib +0 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +276 -0
- nexaai/mlx_backend/vlm/interface.py +21 -4
- nexaai/mlx_backend/vlm/main.py +6 -2
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1309 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/utils/manifest_utils.py +222 -15
- nexaai/utils/model_manager.py +83 -7
- nexaai/utils/model_types.py +2 -0
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc7.dist-info}/METADATA +1 -1
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc7.dist-info}/RECORD +224 -24
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc7.dist-info}/WHEEL +0 -0
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import mlx.core as mx
|
|
3
|
+
import mlx.nn as nn
|
|
4
|
+
|
|
5
|
+
def _gather_sort(x, indices):
|
|
6
|
+
*_, M = indices.shape
|
|
7
|
+
indices = indices.flatten()
|
|
8
|
+
order = mx.argsort(indices)
|
|
9
|
+
inv_order = mx.argsort(order)
|
|
10
|
+
return x.flatten(0, -3)[order // M], indices[order], inv_order
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _scatter_unsort(x, inv_order, shape=None):
|
|
14
|
+
x = x[inv_order]
|
|
15
|
+
if shape is not None:
|
|
16
|
+
x = mx.unflatten(x, 0, shape)
|
|
17
|
+
return x
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class QuantizedSwitchLinear(nn.Module):
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
input_dims: int,
|
|
24
|
+
output_dims: int,
|
|
25
|
+
num_experts: int,
|
|
26
|
+
bias: bool = True,
|
|
27
|
+
group_size: int = 64,
|
|
28
|
+
bits: int = 4,
|
|
29
|
+
):
|
|
30
|
+
super().__init__()
|
|
31
|
+
|
|
32
|
+
scale = math.sqrt(1 / input_dims)
|
|
33
|
+
self.weight, self.scales, self.biases = mx.quantize(
|
|
34
|
+
mx.random.uniform(
|
|
35
|
+
low=-scale,
|
|
36
|
+
high=scale,
|
|
37
|
+
shape=(num_experts, output_dims, input_dims),
|
|
38
|
+
),
|
|
39
|
+
group_size=group_size,
|
|
40
|
+
bits=bits,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
if bias:
|
|
44
|
+
self.bias = mx.zeros((num_experts, output_dims))
|
|
45
|
+
|
|
46
|
+
self.group_size = group_size
|
|
47
|
+
self.bits = bits
|
|
48
|
+
|
|
49
|
+
# Freeze this model's parameters
|
|
50
|
+
self.freeze()
|
|
51
|
+
|
|
52
|
+
def unfreeze(self, *args, **kwargs):
|
|
53
|
+
"""Wrap unfreeze so that we unfreeze any layers we might contain but
|
|
54
|
+
our parameters will remain frozen."""
|
|
55
|
+
super().unfreeze(*args, **kwargs)
|
|
56
|
+
self.freeze(recurse=False)
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def input_dims(self):
|
|
60
|
+
return self.scales.shape[2] * self.group_size
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def output_dims(self):
|
|
64
|
+
return self.weight.shape[1]
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def num_experts(self):
|
|
68
|
+
return self.weight.shape[0]
|
|
69
|
+
|
|
70
|
+
def __call__(self, x, indices, sorted_indices=False):
|
|
71
|
+
x = mx.gather_qmm(
|
|
72
|
+
x,
|
|
73
|
+
self["weight"],
|
|
74
|
+
self["scales"],
|
|
75
|
+
self["biases"],
|
|
76
|
+
rhs_indices=indices,
|
|
77
|
+
transpose=True,
|
|
78
|
+
group_size=self.group_size,
|
|
79
|
+
bits=self.bits,
|
|
80
|
+
sorted_indices=sorted_indices,
|
|
81
|
+
)
|
|
82
|
+
if "bias" in self:
|
|
83
|
+
x = x + mx.expand_dims(self["bias"][indices], -2)
|
|
84
|
+
return x
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class SwitchLinear(nn.Module):
|
|
88
|
+
def __init__(
|
|
89
|
+
self, input_dims: int, output_dims: int, num_experts: int, bias: bool = True
|
|
90
|
+
):
|
|
91
|
+
super().__init__()
|
|
92
|
+
scale = math.sqrt(1 / input_dims)
|
|
93
|
+
self.weight = mx.random.uniform(
|
|
94
|
+
low=-scale,
|
|
95
|
+
high=scale,
|
|
96
|
+
shape=(num_experts, output_dims, input_dims),
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if bias:
|
|
100
|
+
self.bias = mx.zeros((num_experts, output_dims))
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def input_dims(self):
|
|
104
|
+
return self.weight.shape[2]
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def output_dims(self):
|
|
108
|
+
return self.weight.shape[1]
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def num_experts(self):
|
|
112
|
+
return self.weight.shape[0]
|
|
113
|
+
|
|
114
|
+
def __call__(self, x, indices, sorted_indices=False):
|
|
115
|
+
x = mx.gather_mm(
|
|
116
|
+
x,
|
|
117
|
+
self["weight"].swapaxes(-1, -2),
|
|
118
|
+
lhs_indices=None,
|
|
119
|
+
rhs_indices=indices,
|
|
120
|
+
)
|
|
121
|
+
if "bias" in self:
|
|
122
|
+
x = x + mx.expand_dims(self["bias"][indices], -2)
|
|
123
|
+
return x
|
|
124
|
+
|
|
125
|
+
def to_quantized(self, group_size: int = 64, bits: int = 4):
|
|
126
|
+
num_experts, output_dims, input_dims = self.weight.shape
|
|
127
|
+
ql = QuantizedSwitchLinear(
|
|
128
|
+
input_dims, output_dims, num_experts, False, group_size, bits
|
|
129
|
+
)
|
|
130
|
+
ql.weight, ql.scales, ql.biases = mx.quantize(self.weight, group_size, bits)
|
|
131
|
+
if "bias" in self:
|
|
132
|
+
ql.bias = self.bias
|
|
133
|
+
return ql
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class SwitchGLU(nn.Module):
|
|
137
|
+
def __init__(
|
|
138
|
+
self,
|
|
139
|
+
input_dims: int,
|
|
140
|
+
hidden_dims: int,
|
|
141
|
+
num_experts: int,
|
|
142
|
+
activation=nn.SiLU(),
|
|
143
|
+
bias: bool = False,
|
|
144
|
+
):
|
|
145
|
+
super().__init__()
|
|
146
|
+
|
|
147
|
+
self.gate_proj = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
|
|
148
|
+
self.up_proj = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
|
|
149
|
+
self.down_proj = SwitchLinear(hidden_dims, input_dims, num_experts, bias=bias)
|
|
150
|
+
self.activation = activation
|
|
151
|
+
|
|
152
|
+
def __call__(self, x, indices) -> mx.array:
|
|
153
|
+
x = mx.expand_dims(x, (-2, -3))
|
|
154
|
+
|
|
155
|
+
# When we have many tokens, then sort them to make sure that the access
|
|
156
|
+
# of different experts is in order.
|
|
157
|
+
do_sort = indices.size >= 64
|
|
158
|
+
idx = indices
|
|
159
|
+
inv_order = None
|
|
160
|
+
if do_sort:
|
|
161
|
+
x, idx, inv_order = _gather_sort(x, indices)
|
|
162
|
+
|
|
163
|
+
x_up = self.up_proj(x, idx, sorted_indices=do_sort)
|
|
164
|
+
x_gate = self.gate_proj(x, idx, sorted_indices=do_sort)
|
|
165
|
+
x = self.down_proj(
|
|
166
|
+
self.activation(x_gate) * x_up,
|
|
167
|
+
idx,
|
|
168
|
+
sorted_indices=do_sort,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if do_sort:
|
|
172
|
+
x = _scatter_unsort(x, inv_order, indices.shape)
|
|
173
|
+
|
|
174
|
+
return x.squeeze(-2)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class SwitchMLP(nn.Module):
|
|
178
|
+
def __init__(
|
|
179
|
+
self,
|
|
180
|
+
input_dims: int,
|
|
181
|
+
hidden_dims: int,
|
|
182
|
+
num_experts: int,
|
|
183
|
+
activation=nn.GELU(approx="precise"),
|
|
184
|
+
bias: bool = False,
|
|
185
|
+
):
|
|
186
|
+
super().__init__()
|
|
187
|
+
|
|
188
|
+
self.fc1 = SwitchLinear(input_dims, hidden_dims, num_experts, bias=bias)
|
|
189
|
+
self.fc2 = SwitchLinear(hidden_dims, input_dims, num_experts, bias=bias)
|
|
190
|
+
self.activation = activation
|
|
191
|
+
|
|
192
|
+
def __call__(self, x, indices) -> mx.array:
|
|
193
|
+
x = mx.expand_dims(x, (-2, -3))
|
|
194
|
+
|
|
195
|
+
# When we have many tokens, then sort them to make sure that the access
|
|
196
|
+
# of different experts is in order.
|
|
197
|
+
do_sort = indices.size >= 64
|
|
198
|
+
idx = indices
|
|
199
|
+
inv_order = None
|
|
200
|
+
if do_sort:
|
|
201
|
+
x, idx, inv_order = _gather_sort(x, indices)
|
|
202
|
+
|
|
203
|
+
x = self.fc1(x, idx, sorted_indices=do_sort)
|
|
204
|
+
x = self.activation(x)
|
|
205
|
+
x = self.fc2(x, idx, sorted_indices=do_sort)
|
|
206
|
+
|
|
207
|
+
if do_sort:
|
|
208
|
+
x = _scatter_unsort(x, inv_order, indices.shape)
|
|
209
|
+
|
|
210
|
+
return x.squeeze(-2)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import mlx.core as mx
|
|
2
|
+
import numpy as np
|
|
3
|
+
|
|
4
|
+
from ..idefics3 import LanguageModel
|
|
5
|
+
from ..idefics3 import Model as Idefics3Model
|
|
6
|
+
from ..idefics3 import ModelConfig, TextConfig, VisionConfig, VisionModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Model(Idefics3Model):
|
|
10
|
+
def _prepare_inputs_for_multimodal(self, image_features, inputs_embeds, input_ids):
|
|
11
|
+
# Assumes bs == 1
|
|
12
|
+
|
|
13
|
+
B, T, D_text = inputs_embeds.shape
|
|
14
|
+
N, S, D_img = image_features.shape
|
|
15
|
+
|
|
16
|
+
image_offset = 0
|
|
17
|
+
cur_embeds = inputs_embeds[0]
|
|
18
|
+
|
|
19
|
+
# Find positions of <image> tokens in the text
|
|
20
|
+
image_token_index = self.config.image_token_index
|
|
21
|
+
image_positions = np.where(input_ids == image_token_index)[1].tolist()
|
|
22
|
+
num_image_tokens = len(image_positions)
|
|
23
|
+
|
|
24
|
+
# If no <image> => text-only
|
|
25
|
+
if num_image_tokens == 0:
|
|
26
|
+
empty_slice = image_features[0][:0, :] # shape (0, D)
|
|
27
|
+
return mx.concatenate([cur_embeds, empty_slice], axis=0)
|
|
28
|
+
|
|
29
|
+
# Typically, if each image is S embeddings, we expect the total # of <image> tokens
|
|
30
|
+
# in this sample to be multiple of S => each group of S tokens = 1 image
|
|
31
|
+
if num_image_tokens % S != 0:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
f"Input has {num_image_tokens} <image> tokens, not a multiple of S={S}. "
|
|
34
|
+
"Cannot map them to blocks of shape (S, D)."
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
chunks = [image_positions[i : i + S] for i in range(0, num_image_tokens, S)]
|
|
38
|
+
|
|
39
|
+
segments = []
|
|
40
|
+
text_start = 0
|
|
41
|
+
|
|
42
|
+
# For each chunk (each chunk => 1 image)
|
|
43
|
+
for chunk in chunks:
|
|
44
|
+
cur_block = image_features[image_offset]
|
|
45
|
+
image_offset += 1
|
|
46
|
+
|
|
47
|
+
# We'll iterate over the S positions in ascending order
|
|
48
|
+
for i_s, pos in enumerate(chunk):
|
|
49
|
+
if pos > text_start:
|
|
50
|
+
segments.append(cur_embeds[text_start:pos])
|
|
51
|
+
# Then add one row from cur_block => shape (1, D)
|
|
52
|
+
row_of_block = cur_block[i_s : i_s + 1, :]
|
|
53
|
+
segments.append(row_of_block)
|
|
54
|
+
text_start = pos + 1
|
|
55
|
+
|
|
56
|
+
# leftover text after the final <image> token
|
|
57
|
+
if text_start < T:
|
|
58
|
+
segments.append(cur_embeds[text_start:])
|
|
59
|
+
|
|
60
|
+
# cat them into a single (T_b, D) tensor
|
|
61
|
+
merged_sample = mx.concatenate(segments, axis=0)
|
|
62
|
+
return mx.expand_dims(merged_sample, axis=0)
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
# Copied from transformers. Removed video-related code.
|
|
2
|
+
|
|
3
|
+
from typing import Optional, Union
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from transformers.feature_extraction_utils import BatchFeature
|
|
8
|
+
from transformers.image_utils import ImageInput
|
|
9
|
+
from transformers.processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
|
|
10
|
+
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Qwen2_5_VLImagesKwargs(ImagesKwargs):
|
|
14
|
+
min_pixels: Optional[int]
|
|
15
|
+
max_pixels: Optional[int]
|
|
16
|
+
patch_size: Optional[int]
|
|
17
|
+
temporal_patch_size: Optional[int]
|
|
18
|
+
merge_size: Optional[int]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Qwen2_5_VLProcessorKwargs(ProcessingKwargs, total=False):
|
|
22
|
+
images_kwargs: Qwen2_5_VLImagesKwargs
|
|
23
|
+
_defaults = {
|
|
24
|
+
"text_kwargs": {
|
|
25
|
+
"padding": False,
|
|
26
|
+
"return_mm_token_type_ids": False,
|
|
27
|
+
},
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Qwen2_5_VLProcessor(ProcessorMixin):
|
|
32
|
+
r"""
|
|
33
|
+
Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor.
|
|
34
|
+
[`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
|
|
35
|
+
[`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information.
|
|
36
|
+
Args:
|
|
37
|
+
image_processor ([`Qwen2VLImageProcessor`], *optional*):
|
|
38
|
+
The image processor is a required input.
|
|
39
|
+
tokenizer ([`Qwen2TokenizerFast`], *optional*):
|
|
40
|
+
The tokenizer is a required input.
|
|
41
|
+
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
|
|
42
|
+
in a chat into a tokenizable string.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
attributes = ["image_processor", "tokenizer"]
|
|
46
|
+
|
|
47
|
+
image_processor_class = "AutoImageProcessor"
|
|
48
|
+
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
|
|
49
|
+
|
|
50
|
+
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
|
|
51
|
+
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
|
|
52
|
+
self.image_token_id = (
|
|
53
|
+
tokenizer.image_token_id
|
|
54
|
+
if getattr(tokenizer, "image_token_id", None)
|
|
55
|
+
else tokenizer.convert_tokens_to_ids(self.image_token)
|
|
56
|
+
)
|
|
57
|
+
super().__init__(image_processor, tokenizer, chat_template=chat_template)
|
|
58
|
+
|
|
59
|
+
def __call__(
|
|
60
|
+
self,
|
|
61
|
+
images: ImageInput = None,
|
|
62
|
+
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
|
|
63
|
+
**kwargs: Unpack[Qwen2_5_VLProcessorKwargs],
|
|
64
|
+
) -> BatchFeature:
|
|
65
|
+
"""
|
|
66
|
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
|
67
|
+
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
|
|
68
|
+
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
|
|
69
|
+
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
|
|
73
|
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
|
74
|
+
tensor. Both channels-first and channels-last formats are supported.
|
|
75
|
+
text (`str`, `list[str]`, `list[list[str]]`):
|
|
76
|
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
|
77
|
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
|
78
|
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
|
79
|
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
|
80
|
+
If set, will return tensors of a particular framework. Acceptable values are:
|
|
81
|
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
|
82
|
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
|
83
|
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
|
84
|
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
|
88
|
+
|
|
89
|
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
|
90
|
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
|
91
|
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
|
92
|
+
`None`).
|
|
93
|
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
|
94
|
+
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
|
|
95
|
+
"""
|
|
96
|
+
output_kwargs = self._merge_kwargs(
|
|
97
|
+
Qwen2_5_VLProcessorKwargs,
|
|
98
|
+
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
|
99
|
+
**kwargs,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
image_inputs = {}
|
|
103
|
+
if images is not None:
|
|
104
|
+
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
|
|
105
|
+
image_grid_thw = image_inputs["image_grid_thw"]
|
|
106
|
+
|
|
107
|
+
if not isinstance(text, list):
|
|
108
|
+
text = [text]
|
|
109
|
+
|
|
110
|
+
text = text.copy() # below lines change text in-place
|
|
111
|
+
if images is not None:
|
|
112
|
+
merge_length = self.image_processor.merge_size**2
|
|
113
|
+
index = 0
|
|
114
|
+
for i in range(len(text)):
|
|
115
|
+
while self.image_token in text[i]:
|
|
116
|
+
num_image_tokens = image_grid_thw[index].prod() // merge_length
|
|
117
|
+
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
|
|
118
|
+
index += 1
|
|
119
|
+
text[i] = text[i].replace("<|placeholder|>", self.image_token)
|
|
120
|
+
|
|
121
|
+
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
|
|
122
|
+
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
|
|
123
|
+
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
|
|
124
|
+
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
|
|
125
|
+
|
|
126
|
+
if return_mm_token_type_ids:
|
|
127
|
+
array_ids = np.array(text_inputs["input_ids"])
|
|
128
|
+
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
|
|
129
|
+
mm_token_type_ids[array_ids == self.image_token_id] = 1
|
|
130
|
+
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
|
|
131
|
+
|
|
132
|
+
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
|
|
133
|
+
|
|
134
|
+
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
|
|
135
|
+
"""
|
|
136
|
+
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
|
|
137
|
+
Args:
|
|
138
|
+
image_sizes (`list[list[int]]`, *optional*):
|
|
139
|
+
The input sizes formatted as (height, width) per each image.
|
|
140
|
+
Returns:
|
|
141
|
+
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
|
|
142
|
+
input modalities, along with other useful data.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
vision_data = {}
|
|
146
|
+
if image_sizes is not None:
|
|
147
|
+
images_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get("images_kwargs", {})
|
|
148
|
+
images_kwargs.update(kwargs)
|
|
149
|
+
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
|
|
150
|
+
|
|
151
|
+
num_image_patches = [
|
|
152
|
+
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
|
|
153
|
+
for image_size in image_sizes
|
|
154
|
+
]
|
|
155
|
+
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
|
|
156
|
+
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
|
|
157
|
+
|
|
158
|
+
return MultiModalData(**vision_data)
|
|
159
|
+
|
|
160
|
+
def batch_decode(self, *args, **kwargs):
|
|
161
|
+
"""
|
|
162
|
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
|
163
|
+
refer to the docstring of this method for more information.
|
|
164
|
+
"""
|
|
165
|
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
|
166
|
+
|
|
167
|
+
def decode(self, *args, **kwargs):
|
|
168
|
+
"""
|
|
169
|
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
|
170
|
+
the docstring of this method for more information.
|
|
171
|
+
"""
|
|
172
|
+
return self.tokenizer.decode(*args, **kwargs)
|
|
173
|
+
|
|
174
|
+
def post_process_image_text_to_text(
|
|
175
|
+
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
|
|
176
|
+
):
|
|
177
|
+
"""
|
|
178
|
+
Post-process the output of the model to decode the text.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
generated_outputs (`torch.Tensor` or `np.ndarray`):
|
|
182
|
+
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
|
|
183
|
+
or `(sequence_length,)`.
|
|
184
|
+
skip_special_tokens (`bool`, *optional*, defaults to `True`):
|
|
185
|
+
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
|
|
186
|
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
|
187
|
+
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
|
|
188
|
+
**kwargs:
|
|
189
|
+
Additional arguments to be passed to the tokenizer's `batch_decode method`.
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
`list[str]`: The decoded text.
|
|
193
|
+
"""
|
|
194
|
+
return self.tokenizer.batch_decode(
|
|
195
|
+
generated_outputs,
|
|
196
|
+
skip_special_tokens=skip_special_tokens,
|
|
197
|
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
|
198
|
+
**kwargs,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
@property
|
|
202
|
+
def model_input_names(self):
|
|
203
|
+
tokenizer_input_names = self.tokenizer.model_input_names
|
|
204
|
+
image_processor_input_names = self.image_processor.model_input_names
|
|
205
|
+
names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
|
206
|
+
return names_from_processor
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
__all__ = ["Qwen2_5_VLProcessor"]
|