nexaai 1.0.29__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nexaai/__init__.py +99 -0
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +4 -0
- nexaai/asr.py +68 -0
- nexaai/asr_impl/__init__.py +0 -0
- nexaai/asr_impl/mlx_asr_impl.py +93 -0
- nexaai/asr_impl/pybind_asr_impl.py +127 -0
- nexaai/base.py +39 -0
- nexaai/binds/__init__.py +7 -0
- nexaai/binds/asr_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/common_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/cpu_gpu/libggml-base.dylib +0 -0
- nexaai/binds/cpu_gpu/libggml-cpu.so +0 -0
- nexaai/binds/cpu_gpu/libggml-metal.so +0 -0
- nexaai/binds/cpu_gpu/libggml.dylib +0 -0
- nexaai/binds/cpu_gpu/libmtmd.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_cpu_gpu.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_plugin.dylib +0 -0
- nexaai/binds/cv_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/diarize_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/embedder_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/llm_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/metal/libnexa_plugin.dylib +0 -0
- nexaai/binds/metal/py-lib/ml.py +888 -0
- nexaai/binds/metal/py-lib/mlx_audio/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/binds/metal/py-lib/mlx_audio/server.py +525 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/generate.py +174 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/utils.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/audio_player.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/convert.py +71 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/generate.py +449 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/base.py +84 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/utils.py +337 -0
- nexaai/binds/metal/py-lib/mlx_audio/utils.py +237 -0
- nexaai/binds/metal/py-lib/mlx_audio/version.py +1 -0
- nexaai/binds/metal/py-lib/profiling.py +239 -0
- nexaai/binds/nexaml/libfftw3.3.dylib +0 -0
- nexaai/binds/nexaml/libfftw3f.3.dylib +0 -0
- nexaai/binds/nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexaml/libggml.dylib +0 -0
- nexaai/binds/nexaml/libmp3lame.0.dylib +0 -0
- nexaai/binds/nexaml/libmpg123.0.dylib +0 -0
- nexaai/binds/nexaml/libnexa-mm-process.dylib +0 -0
- nexaai/binds/nexaml/libnexa-sampling.dylib +0 -0
- nexaai/binds/nexaml/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexaml/libnexaproc.dylib +0 -0
- nexaai/binds/nexaml/libomp.dylib +0 -0
- nexaai/binds/nexaml/libqwen3-vl.dylib +0 -0
- nexaai/binds/nexaml/libqwen3vl-vision.dylib +0 -0
- nexaai/binds/rerank_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/vlm_bind.cpython-310-darwin.so +0 -0
- nexaai/common.py +106 -0
- nexaai/cv.py +95 -0
- nexaai/cv_impl/__init__.py +0 -0
- nexaai/cv_impl/mlx_cv_impl.py +91 -0
- nexaai/cv_impl/pybind_cv_impl.py +124 -0
- nexaai/diarize.py +80 -0
- nexaai/diarize_impl/__init__.py +1 -0
- nexaai/diarize_impl/pybind_diarize_impl.py +125 -0
- nexaai/embedder.py +73 -0
- nexaai/embedder_impl/__init__.py +0 -0
- nexaai/embedder_impl/mlx_embedder_impl.py +118 -0
- nexaai/embedder_impl/pybind_embedder_impl.py +96 -0
- nexaai/image_gen.py +141 -0
- nexaai/image_gen_impl/__init__.py +0 -0
- nexaai/image_gen_impl/mlx_image_gen_impl.py +292 -0
- nexaai/image_gen_impl/pybind_image_gen_impl.py +85 -0
- nexaai/llm.py +98 -0
- nexaai/llm_impl/__init__.py +0 -0
- nexaai/llm_impl/mlx_llm_impl.py +271 -0
- nexaai/llm_impl/pybind_llm_impl.py +238 -0
- nexaai/log.py +92 -0
- nexaai/mlx_backend/asr/__init__.py +12 -0
- nexaai/mlx_backend/asr/interface.py +122 -0
- nexaai/mlx_backend/common/__init__.py +0 -0
- nexaai/mlx_backend/common/utils.py +25 -0
- nexaai/mlx_backend/cv/__init__.py +0 -0
- nexaai/mlx_backend/cv/generate.py +195 -0
- nexaai/mlx_backend/cv/interface.py +162 -0
- nexaai/mlx_backend/cv/main.py +81 -0
- nexaai/mlx_backend/cv/modeling/pp_ocr_v4.py +1736 -0
- nexaai/mlx_backend/embedding/__init__.py +0 -0
- nexaai/mlx_backend/embedding/generate.py +333 -0
- nexaai/mlx_backend/embedding/interface.py +617 -0
- nexaai/mlx_backend/embedding/main.py +173 -0
- nexaai/mlx_backend/embedding/modeling/__init__.py +0 -0
- nexaai/mlx_backend/embedding/modeling/nexa_jina_v2.py +399 -0
- nexaai/mlx_backend/image_gen/__init__.py +1 -0
- nexaai/mlx_backend/image_gen/generate_sd.py +244 -0
- nexaai/mlx_backend/image_gen/interface.py +82 -0
- nexaai/mlx_backend/image_gen/main.py +281 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/__init__.py +306 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/clip.py +116 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/config.py +65 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/model_io.py +386 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/sampler.py +105 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/tokenizer.py +100 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/unet.py +460 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/vae.py +274 -0
- nexaai/mlx_backend/llm/__init__.py +0 -0
- nexaai/mlx_backend/llm/generate.py +149 -0
- nexaai/mlx_backend/llm/interface.py +764 -0
- nexaai/mlx_backend/llm/main.py +68 -0
- nexaai/mlx_backend/ml.py +888 -0
- nexaai/mlx_backend/mlx_audio/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/mlx_backend/mlx_audio/server.py +525 -0
- nexaai/mlx_backend/mlx_audio/sts/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/mlx_backend/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/mlx_backend/mlx_audio/stt/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/generate.py +174 -0
- nexaai/mlx_backend/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/mlx_backend/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/mlx_backend/mlx_audio/stt/utils.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/audio_player.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/convert.py +71 -0
- nexaai/mlx_backend/mlx_audio/tts/generate.py +449 -0
- nexaai/mlx_backend/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/mlx_backend/mlx_audio/tts/models/base.py +84 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/mlx_backend/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/default_speaker.json +461 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/mlx_backend/mlx_audio/tts/utils.py +337 -0
- nexaai/mlx_backend/mlx_audio/utils.py +237 -0
- nexaai/mlx_backend/mlx_audio/version.py +1 -0
- nexaai/mlx_backend/profiling.py +239 -0
- nexaai/mlx_backend/rerank/__init__.py +0 -0
- nexaai/mlx_backend/rerank/generate.py +174 -0
- nexaai/mlx_backend/rerank/interface.py +287 -0
- nexaai/mlx_backend/rerank/main.py +127 -0
- nexaai/mlx_backend/rerank/modeling/__init__.py +0 -0
- nexaai/mlx_backend/rerank/modeling/nexa_jina_rerank.py +330 -0
- nexaai/mlx_backend/sd/__init__.py +1 -0
- nexaai/mlx_backend/sd/interface.py +362 -0
- nexaai/mlx_backend/sd/main.py +286 -0
- nexaai/mlx_backend/sd/modeling/__init__.py +306 -0
- nexaai/mlx_backend/sd/modeling/clip.py +116 -0
- nexaai/mlx_backend/sd/modeling/config.py +65 -0
- nexaai/mlx_backend/sd/modeling/model_io.py +385 -0
- nexaai/mlx_backend/sd/modeling/sampler.py +105 -0
- nexaai/mlx_backend/sd/modeling/tokenizer.py +100 -0
- nexaai/mlx_backend/sd/modeling/unet.py +460 -0
- nexaai/mlx_backend/sd/modeling/vae.py +274 -0
- nexaai/mlx_backend/tts/__init__.py +12 -0
- nexaai/mlx_backend/tts/interface.py +276 -0
- nexaai/mlx_backend/vlm/__init__.py +3 -0
- nexaai/mlx_backend/vlm/generate.py +572 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +374 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +259 -0
- nexaai/mlx_backend/vlm/interface.py +559 -0
- nexaai/mlx_backend/vlm/main.py +365 -0
- nexaai/mlx_backend/vlm/modeling/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/convert.py +68 -0
- nexaai/mlx_backend/vlm/modeling/models/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/aya_vision.py +193 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/interpolate.py +186 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/language.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/vision.py +503 -0
- nexaai/mlx_backend/vlm/modeling/models/base.py +202 -0
- nexaai/mlx_backend/vlm/modeling/models/cache.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/conversation.py +264 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +472 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/language.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/vision.py +356 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/florence2.py +366 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/language.py +488 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/vision.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/gemma3.py +213 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/language.py +315 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/vision.py +238 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/audio.py +1038 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/config.py +139 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/gemma3n.py +322 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/language.py +629 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/vision.py +1022 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/idefics2.py +294 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/language.py +191 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/vision.py +267 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/idefics3.py +175 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/language.py +192 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/vision.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/internvl_chat.py +140 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/language.py +220 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/processor.py +393 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/vision.py +293 -0
- nexaai/mlx_backend/vlm/modeling/models/kernels.py +307 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/kimi_vl.py +143 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/language.py +509 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/vision.py +522 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/language.py +386 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/llama4.py +138 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/vision.py +560 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/language.py +240 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/llava.py +153 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/vision.py +259 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/language.py +236 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/llava_bunny.py +256 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/vision.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/llava_next.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/vision.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/mistral3.py +283 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/language.py +416 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/mllama.py +172 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/vision.py +499 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/language.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/molmo.py +133 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/vision.py +465 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/multi_modality.py +385 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/sam.py +557 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/vision.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/language.py +282 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/paligemma.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/vision.py +242 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/language.py +21 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/phi3_v.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/su_rope.py +71 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/vision.py +324 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/language.py +229 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/pixtral.py +161 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/vision.py +320 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/config.py +108 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +168 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/vision.py +414 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/config.py +104 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/qwen2_vl.py +167 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/vision.py +312 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +1262 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1308 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/smolvlm.py +62 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_5_vl.py +209 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_vl.py +215 -0
- nexaai/mlx_backend/vlm/modeling/prompt_utils.py +474 -0
- nexaai/mlx_backend/vlm/modeling/sample_utils.py +39 -0
- nexaai/mlx_backend/vlm/modeling/tokenizer_utils.py +344 -0
- nexaai/mlx_backend/vlm/modeling/trainer/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/trainer/lora.py +70 -0
- nexaai/mlx_backend/vlm/modeling/trainer/trainer.py +296 -0
- nexaai/mlx_backend/vlm/modeling/trainer/utils.py +160 -0
- nexaai/mlx_backend/vlm/modeling/utils.py +928 -0
- nexaai/rerank.py +57 -0
- nexaai/rerank_impl/__init__.py +0 -0
- nexaai/rerank_impl/mlx_rerank_impl.py +94 -0
- nexaai/rerank_impl/pybind_rerank_impl.py +136 -0
- nexaai/runtime.py +68 -0
- nexaai/runtime_error.py +24 -0
- nexaai/tts.py +75 -0
- nexaai/tts_impl/__init__.py +0 -0
- nexaai/tts_impl/mlx_tts_impl.py +94 -0
- nexaai/tts_impl/pybind_tts_impl.py +43 -0
- nexaai/utils/decode.py +18 -0
- nexaai/utils/manifest_utils.py +531 -0
- nexaai/utils/model_manager.py +1745 -0
- nexaai/utils/model_types.py +49 -0
- nexaai/utils/progress_tracker.py +389 -0
- nexaai/utils/quantization_utils.py +245 -0
- nexaai/vlm.py +130 -0
- nexaai/vlm_impl/__init__.py +0 -0
- nexaai/vlm_impl/mlx_vlm_impl.py +259 -0
- nexaai/vlm_impl/pybind_vlm_impl.py +275 -0
- nexaai-1.0.29.dist-info/METADATA +35 -0
- nexaai-1.0.29.dist-info/RECORD +580 -0
- nexaai-1.0.29.dist-info/WHEEL +5 -0
- nexaai-1.0.29.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import mlx.core as mx
|
|
5
|
+
import mlx.nn as nn
|
|
6
|
+
import time
|
|
7
|
+
from PIL import Image
|
|
8
|
+
import requests
|
|
9
|
+
import numpy as np
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from huggingface_hub import snapshot_download
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from typing import Any, Generator, List, Optional, Sequence, Tuple, Union
|
|
14
|
+
|
|
15
|
+
# Import required modules for quantized loading
|
|
16
|
+
from transformers import AutoTokenizer
|
|
17
|
+
|
|
18
|
+
# Import from the nested modeling structure
|
|
19
|
+
from .modeling.models.qwen3_vl.llm_common.generate import nexa_generate_step
|
|
20
|
+
from .modeling.models.qwen3_vl.llm_common.cache import make_prompt_cache
|
|
21
|
+
from .modeling.models.qwen3_vl.qwen3vl import (
|
|
22
|
+
VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
|
|
23
|
+
)
|
|
24
|
+
from .modeling.models.qwen3_vl.processor import Qwen3VLProcessor
|
|
25
|
+
from .generate import GenerationResult
|
|
26
|
+
from ml import ChatMessage
|
|
27
|
+
|
|
28
|
+
# Custom exception for context length exceeded
|
|
29
|
+
class ContextLengthExceededError(Exception):
|
|
30
|
+
"""Raised when input context length exceeds model's maximum context size"""
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class Qwen3VLBundledModel:
|
|
35
|
+
"""Container for Qwen3-VL vision and language models."""
|
|
36
|
+
vision_model: VEGModel
|
|
37
|
+
llm_model: LLMModel
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _ensure_list(x: Union[str, List[str], None]) -> Optional[List[str]]:
|
|
41
|
+
if x is None:
|
|
42
|
+
return None
|
|
43
|
+
return x if isinstance(x, list) else [x]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_model_configs(model_name: str):
|
|
47
|
+
"""Get model configurations based on model name"""
|
|
48
|
+
|
|
49
|
+
# 4B model configs (default)
|
|
50
|
+
if model_name in ["qwen3vl", "qwen3vl-4b", "qwen3vl-4b-thinking"]:
|
|
51
|
+
vision_config = VisionConfig(
|
|
52
|
+
hidden_size=1024,
|
|
53
|
+
intermediate_size=4096,
|
|
54
|
+
num_heads=16,
|
|
55
|
+
num_hidden_layers=24,
|
|
56
|
+
patch_size=16,
|
|
57
|
+
temporal_patch_size=2,
|
|
58
|
+
in_channels=3,
|
|
59
|
+
hidden_act="gelu",
|
|
60
|
+
spatial_merge_size=2,
|
|
61
|
+
out_hidden_size=2560,
|
|
62
|
+
num_position_embeddings=2304,
|
|
63
|
+
deepstack_visual_indexes=[5, 11, 17],
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
text_config = TextConfig(
|
|
67
|
+
model_type="qwen3vl",
|
|
68
|
+
hidden_size=2560,
|
|
69
|
+
num_hidden_layers=36,
|
|
70
|
+
intermediate_size=9728,
|
|
71
|
+
num_attention_heads=32,
|
|
72
|
+
num_key_value_heads=8,
|
|
73
|
+
rms_norm_eps=1e-6,
|
|
74
|
+
vocab_size=151936,
|
|
75
|
+
max_position_embeddings=32768,
|
|
76
|
+
rope_theta=5000000.0,
|
|
77
|
+
head_dim=128,
|
|
78
|
+
tie_word_embeddings=True,
|
|
79
|
+
attention_bias=False,
|
|
80
|
+
attention_dropout=0.0,
|
|
81
|
+
rope_scaling={"mrope_section": [24, 20, 20],
|
|
82
|
+
"rope_type": "default", "type": "default"},
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# 8B model configs
|
|
86
|
+
elif model_name in ["qwen3vl-8b", "qwen3vl-8b-thinking"]:
|
|
87
|
+
vision_config = VisionConfig(
|
|
88
|
+
hidden_size=1152,
|
|
89
|
+
intermediate_size=4304,
|
|
90
|
+
num_heads=16,
|
|
91
|
+
num_hidden_layers=27,
|
|
92
|
+
patch_size=16,
|
|
93
|
+
temporal_patch_size=2,
|
|
94
|
+
in_channels=3,
|
|
95
|
+
hidden_act="gelu",
|
|
96
|
+
spatial_merge_size=2,
|
|
97
|
+
out_hidden_size=4096,
|
|
98
|
+
num_position_embeddings=2304,
|
|
99
|
+
deepstack_visual_indexes=[8, 16, 24],
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
text_config = TextConfig(
|
|
103
|
+
model_type="qwen3vl",
|
|
104
|
+
hidden_size=4096,
|
|
105
|
+
num_hidden_layers=36,
|
|
106
|
+
intermediate_size=12288,
|
|
107
|
+
num_attention_heads=32,
|
|
108
|
+
num_key_value_heads=8,
|
|
109
|
+
rms_norm_eps=1e-6,
|
|
110
|
+
vocab_size=151936,
|
|
111
|
+
max_position_embeddings=262144,
|
|
112
|
+
rope_theta=5000000,
|
|
113
|
+
head_dim=128,
|
|
114
|
+
tie_word_embeddings=False,
|
|
115
|
+
attention_bias=False,
|
|
116
|
+
attention_dropout=0.0,
|
|
117
|
+
rope_scaling={"mrope_section": [24, 20, 20], "rope_type": "default", "mrope_interleaved": True},
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
# Fallback to 4B config
|
|
121
|
+
return get_model_configs("qwen3vl-4b")
|
|
122
|
+
|
|
123
|
+
return vision_config, text_config
|
|
124
|
+
|
|
125
|
+
def get_weight_filenames(model_name: str, model_path: Path):
|
|
126
|
+
"""Get appropriate weight filenames based on model name and available files"""
|
|
127
|
+
|
|
128
|
+
# Determine model size and type based on the actual file structure
|
|
129
|
+
if "4b" in model_name:
|
|
130
|
+
size_prefix = "4b"
|
|
131
|
+
elif "8b" in model_name:
|
|
132
|
+
size_prefix = "8b"
|
|
133
|
+
else:
|
|
134
|
+
size_prefix = "4b"
|
|
135
|
+
|
|
136
|
+
# Determine model type
|
|
137
|
+
if "thinking" in model_name:
|
|
138
|
+
model_type = f"{size_prefix}_thinking"
|
|
139
|
+
else:
|
|
140
|
+
model_type = f"{size_prefix}_instruct"
|
|
141
|
+
|
|
142
|
+
# Try different weight file patterns matching the actual file structure
|
|
143
|
+
llm_patterns = [
|
|
144
|
+
# New naming convention matching actual files
|
|
145
|
+
f"qwen3vl-llm-{model_type}-q4_0.safetensors",
|
|
146
|
+
f"qwen3vl-llm-{model_type}-q8_0.safetensors",
|
|
147
|
+
f"qwen3vl-llm-{model_type}-f16.safetensors",
|
|
148
|
+
# Legacy naming convention
|
|
149
|
+
f"qwen3vl-llm-{size_prefix.upper()}-q4_0.safetensors",
|
|
150
|
+
f"qwen3vl-llm-{size_prefix.upper()}-q8_0.safetensors",
|
|
151
|
+
f"qwen3vl-llm-{size_prefix.upper()}-f16.safetensors",
|
|
152
|
+
f"qwen3vl-llm-{size_prefix.upper()}-f32.safetensors",
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
vision_patterns = [
|
|
156
|
+
f"qwen3vl-vision-{model_type}-f16.safetensors",
|
|
157
|
+
f"qwen3vl-vision-{size_prefix.upper()}-f16.safetensors",
|
|
158
|
+
]
|
|
159
|
+
|
|
160
|
+
# Find LLM weights
|
|
161
|
+
llm_weights_path = None
|
|
162
|
+
quantization_bits = None
|
|
163
|
+
|
|
164
|
+
for pattern in llm_patterns:
|
|
165
|
+
candidate_path = model_path / pattern
|
|
166
|
+
if candidate_path.exists():
|
|
167
|
+
llm_weights_path = candidate_path
|
|
168
|
+
if "q4_0" in pattern:
|
|
169
|
+
quantization_bits = 4
|
|
170
|
+
elif "q8_0" in pattern:
|
|
171
|
+
quantization_bits = 8
|
|
172
|
+
else:
|
|
173
|
+
quantization_bits = 16
|
|
174
|
+
break
|
|
175
|
+
|
|
176
|
+
# Find vision weights
|
|
177
|
+
vision_weights_path = None
|
|
178
|
+
for pattern in vision_patterns:
|
|
179
|
+
candidate_path = model_path / pattern
|
|
180
|
+
if candidate_path.exists():
|
|
181
|
+
vision_weights_path = candidate_path
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
return llm_weights_path, vision_weights_path, quantization_bits
|
|
185
|
+
|
|
186
|
+
# Update the load_qwen3_vl function signature and implementation:
|
|
187
|
+
def load_qwen3_vl(
|
|
188
|
+
path_or_repo: str,
|
|
189
|
+
adapter_path: Optional[str] = None,
|
|
190
|
+
lazy: bool = False,
|
|
191
|
+
revision: Optional[str] = None,
|
|
192
|
+
model_name: Optional[str] = None,
|
|
193
|
+
**kwargs,
|
|
194
|
+
) -> Tuple[Qwen3VLBundledModel, Qwen3VLProcessor]:
|
|
195
|
+
"""Load Qwen3-VL quantized models and processor with support for different model sizes."""
|
|
196
|
+
|
|
197
|
+
model_path = Path(path_or_repo)
|
|
198
|
+
if not model_path.exists():
|
|
199
|
+
if "/" in path_or_repo:
|
|
200
|
+
model_path = Path(snapshot_download(
|
|
201
|
+
repo_id=path_or_repo, repo_type="model", revision=revision))
|
|
202
|
+
else:
|
|
203
|
+
# Fallback to local modelfiles directory relative to this file
|
|
204
|
+
curr_dir = Path(__file__).parent
|
|
205
|
+
model_path = curr_dir / "modeling" / "models" / "qwen3_vl" / "modelfiles"
|
|
206
|
+
if not model_path.exists():
|
|
207
|
+
model_path = curr_dir / "modelfiles"
|
|
208
|
+
|
|
209
|
+
# Get model configurations based on model name
|
|
210
|
+
if model_name:
|
|
211
|
+
vision_config, text_config = get_model_configs(model_name)
|
|
212
|
+
else:
|
|
213
|
+
# Default to 4B config
|
|
214
|
+
vision_config, text_config = get_model_configs("qwen3vl-4b")
|
|
215
|
+
|
|
216
|
+
vision_model = VEGModel(vision_config)
|
|
217
|
+
llm_model = LLMModel(text_config)
|
|
218
|
+
|
|
219
|
+
# Get appropriate weight filenames
|
|
220
|
+
llm_weights_path, vision_weights_path, quantization_bits = get_weight_filenames(
|
|
221
|
+
model_name or "qwen3vl-4b", model_path
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
if not vision_weights_path or not llm_weights_path:
|
|
225
|
+
raise FileNotFoundError(
|
|
226
|
+
f"Missing safetensors. Vision: {vision_weights_path}, LLM: {llm_weights_path}"
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
# Load weights (vision fp16, llm with detected quantization)
|
|
230
|
+
vision_model.set_dtype(mx.float16)
|
|
231
|
+
vision_model.load_weights(str(vision_weights_path), strict=True)
|
|
232
|
+
|
|
233
|
+
# Apply quantization if needed and load LLM weights
|
|
234
|
+
if quantization_bits in [4, 8]:
|
|
235
|
+
nn.quantize(llm_model, bits=quantization_bits, group_size=64,
|
|
236
|
+
class_predicate=quant_predicate)
|
|
237
|
+
|
|
238
|
+
llm_model.load_weights(str(llm_weights_path), strict=True)
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
tokenizer = AutoTokenizer.from_pretrained(str(model_path))
|
|
242
|
+
except Exception:
|
|
243
|
+
try:
|
|
244
|
+
tokenizer = AutoTokenizer.from_pretrained(path_or_repo)
|
|
245
|
+
except Exception:
|
|
246
|
+
raise Exception("Failed to load tokenizer from the same path where model weights are loaded and original path_or_repo.")
|
|
247
|
+
|
|
248
|
+
processor = Qwen3VLProcessor(tokenizer=tokenizer)
|
|
249
|
+
|
|
250
|
+
return Qwen3VLBundledModel(vision_model=vision_model, llm_model=llm_model), processor
|
|
251
|
+
|
|
252
|
+
def apply_chat_template_qwen3_vl(messages: Sequence[ChatMessage], num_images: int = 0, num_audios: int = 0, tools: Optional[str] = None, enable_thinking: bool = False) -> str:
|
|
253
|
+
"""Apply chat template: serialize messages with content as a list of typed items."""
|
|
254
|
+
|
|
255
|
+
messages_dict = []
|
|
256
|
+
for i, msg in enumerate(messages):
|
|
257
|
+
content_items = [{"type": "text", "text": msg.content}]
|
|
258
|
+
messages_dict.append({"role": msg.role, "content": content_items})
|
|
259
|
+
|
|
260
|
+
result = json.dumps(messages_dict)
|
|
261
|
+
|
|
262
|
+
return result
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def stream_generate_qwen3_vl(
|
|
266
|
+
model: Qwen3VLBundledModel,
|
|
267
|
+
processor: Qwen3VLProcessor,
|
|
268
|
+
prompt: str,
|
|
269
|
+
image: Union[str, List[str]] = None,
|
|
270
|
+
audio: Union[str, List[str]] = None,
|
|
271
|
+
max_tokens: int = 512,
|
|
272
|
+
**kwargs,
|
|
273
|
+
|
|
274
|
+
) -> Generator[Any, None, None]:
|
|
275
|
+
"""Stream generation yielding .generate.GenerationResult-compatible chunks."""
|
|
276
|
+
|
|
277
|
+
try:
|
|
278
|
+
messages = json.loads(prompt)
|
|
279
|
+
except json.JSONDecodeError as e:
|
|
280
|
+
raise
|
|
281
|
+
|
|
282
|
+
if image is not None:
|
|
283
|
+
image_list = image if isinstance(image, list) else [image]
|
|
284
|
+
pil_images = []
|
|
285
|
+
for i, p in enumerate(image_list):
|
|
286
|
+
try:
|
|
287
|
+
img = Image.open(p)
|
|
288
|
+
pil_images.append(img)
|
|
289
|
+
except Exception as e:
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
contents = [{"type": "image", "image": img} for img in pil_images]
|
|
293
|
+
if messages:
|
|
294
|
+
if "content" not in messages[-1] or not isinstance(messages[-1]["content"], list):
|
|
295
|
+
messages[-1]["content"] = []
|
|
296
|
+
messages[-1]["content"].extend(contents)
|
|
297
|
+
|
|
298
|
+
raw_text, processed_images = processor.messages_to_text(
|
|
299
|
+
messages, add_generation_prompt=True)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
inputs = processor.text_to_input_ids(
|
|
303
|
+
raw_text, images=processed_images, return_tensors="mlx")
|
|
304
|
+
|
|
305
|
+
input_ids = inputs["input_ids"]
|
|
306
|
+
pixel_values = inputs.get("pixel_values")
|
|
307
|
+
image_grid_thw = inputs.get("image_grid_thw")
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
# Check if input context exceeds KV cache size and raise error
|
|
311
|
+
max_kv_size = 4096 # This should match the max_kv_size used in make_prompt_cache and nexa_generate_step
|
|
312
|
+
if input_ids.size > max_kv_size:
|
|
313
|
+
error_msg = f"Input context length ({input_ids.size} tokens) exceeds maximum supported context size ({max_kv_size} tokens). Please reduce the input length."
|
|
314
|
+
raise ContextLengthExceededError(error_msg)
|
|
315
|
+
|
|
316
|
+
inputs_embeds, deepstack_visual_embeds, visual_pos_masks, cos, sin, rope_deltas = handle_multimodal_embeds(
|
|
317
|
+
model.vision_model, model.llm_model, input_ids, pixel_values, image_grid_thw
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
prompt_cache = make_prompt_cache(model.llm_model, max_kv_size=4096)
|
|
322
|
+
tokenizer = processor.tokenizer
|
|
323
|
+
|
|
324
|
+
# Rough prompt TPS estimation based on input size
|
|
325
|
+
prompt_start = time.perf_counter()
|
|
326
|
+
prompt_tps = input_ids.size / max(1e-6, (time.perf_counter() - prompt_start))
|
|
327
|
+
|
|
328
|
+
gen_count = 0
|
|
329
|
+
tic = time.perf_counter()
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
try:
|
|
333
|
+
for token, logprobs in nexa_generate_step(
|
|
334
|
+
model=model.llm_model,
|
|
335
|
+
prompt=None,
|
|
336
|
+
input_embeddings=inputs_embeds,
|
|
337
|
+
max_tokens=max_tokens,
|
|
338
|
+
max_kv_size=4096,
|
|
339
|
+
prompt_cache=prompt_cache,
|
|
340
|
+
visual_pos_masks=visual_pos_masks,
|
|
341
|
+
deepstack_visual_embeds=deepstack_visual_embeds,
|
|
342
|
+
cos=cos,
|
|
343
|
+
sin=sin,
|
|
344
|
+
rope_deltas=rope_deltas,
|
|
345
|
+
):
|
|
346
|
+
if token == tokenizer.eos_token_id:
|
|
347
|
+
break
|
|
348
|
+
|
|
349
|
+
text_piece = tokenizer.decode([token])
|
|
350
|
+
gen_count += 1
|
|
351
|
+
|
|
352
|
+
current_tps = gen_count / max(1e-6, (time.perf_counter() - tic))
|
|
353
|
+
|
|
354
|
+
yield GenerationResult(
|
|
355
|
+
text=text_piece,
|
|
356
|
+
token=token,
|
|
357
|
+
logprobs=logprobs,
|
|
358
|
+
prompt_tokens=int(input_ids.size),
|
|
359
|
+
generation_tokens=gen_count,
|
|
360
|
+
prompt_tps=float(prompt_tps),
|
|
361
|
+
generation_tps=float(current_tps),
|
|
362
|
+
peak_memory=float(mx.get_peak_memory() / 1e9),
|
|
363
|
+
)
|
|
364
|
+
except Exception as e:
|
|
365
|
+
import traceback
|
|
366
|
+
traceback.print_exc()
|
|
367
|
+
raise
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def quant_predicate(path: str, mod: nn.Module) -> bool:
|
|
371
|
+
"""Quantization predicate to exclude certain layers from quantization."""
|
|
372
|
+
if path.endswith("lm_head") or "norm" in path.lower() or "embed" in path.lower():
|
|
373
|
+
return False
|
|
374
|
+
return isinstance(mod, (nn.Linear, nn.Embedding))
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import mlx.core as mx
|
|
5
|
+
import mlx.nn as nn
|
|
6
|
+
import time
|
|
7
|
+
from PIL import Image
|
|
8
|
+
import requests
|
|
9
|
+
import numpy as np
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from huggingface_hub import snapshot_download
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from typing import Any, Generator, List, Optional, Sequence, Tuple, Union
|
|
14
|
+
|
|
15
|
+
# Import required modules for quantized loading
|
|
16
|
+
from transformers import AutoTokenizer
|
|
17
|
+
|
|
18
|
+
# Import from the nested modeling structure
|
|
19
|
+
from .modeling.models.qwen3vl_moe.llm_common.generate import nexa_generate_step
|
|
20
|
+
from .modeling.models.qwen3vl_moe.llm_common.cache import make_prompt_cache
|
|
21
|
+
from .modeling.models.qwen3vl_moe.qwen3vl_moe import (
|
|
22
|
+
VEGModel, LLMModel, ModelArgs, VisionConfig, TextConfig, handle_multimodal_embeds
|
|
23
|
+
)
|
|
24
|
+
from .modeling.models.qwen3vl_moe.processor import Qwen3VLProcessor
|
|
25
|
+
from .generate import GenerationResult
|
|
26
|
+
from ml import ChatMessage
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class Qwen3VLBundledModel:
|
|
30
|
+
"""Container for Qwen3-VL MoE vision and language models."""
|
|
31
|
+
vision_model: VEGModel
|
|
32
|
+
llm_model: LLMModel
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _ensure_list(x: Union[str, List[str], None]) -> Optional[List[str]]:
|
|
36
|
+
if x is None:
|
|
37
|
+
return None
|
|
38
|
+
return x if isinstance(x, list) else [x]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def load_qwen3_vl(
|
|
42
|
+
path_or_repo: str,
|
|
43
|
+
adapter_path: Optional[str] = None,
|
|
44
|
+
lazy: bool = False,
|
|
45
|
+
revision: Optional[str] = None,
|
|
46
|
+
**kwargs,
|
|
47
|
+
) -> Tuple[Qwen3VLBundledModel, Qwen3VLProcessor]:
|
|
48
|
+
"""Load Qwen3-VL MoE quantized models and processor.
|
|
49
|
+
|
|
50
|
+
Parameters are aligned with .generate.load for compatibility.
|
|
51
|
+
"""
|
|
52
|
+
model_path = Path(path_or_repo)
|
|
53
|
+
if not model_path.exists():
|
|
54
|
+
if "/" in path_or_repo:
|
|
55
|
+
model_path = Path(snapshot_download(
|
|
56
|
+
repo_id=path_or_repo, repo_type="model", revision=revision))
|
|
57
|
+
else:
|
|
58
|
+
# Fallback to local modelfiles directory relative to this file
|
|
59
|
+
curr_dir = Path(__file__).parent
|
|
60
|
+
model_path = curr_dir / "modeling" / "models" / "qwen3vl_moe" / "modelfiles"
|
|
61
|
+
if not model_path.exists():
|
|
62
|
+
model_path = curr_dir / "modelfiles"
|
|
63
|
+
|
|
64
|
+
# Model configs - Updated to match Qwen3VL-MoE specifications
|
|
65
|
+
vision_config = VisionConfig(
|
|
66
|
+
hidden_size=1152,
|
|
67
|
+
intermediate_size=4304,
|
|
68
|
+
num_heads=16,
|
|
69
|
+
num_hidden_layers=27,
|
|
70
|
+
patch_size=16,
|
|
71
|
+
temporal_patch_size=2,
|
|
72
|
+
in_channels=3,
|
|
73
|
+
hidden_act="gelu_pytorch_tanh",
|
|
74
|
+
spatial_merge_size=2,
|
|
75
|
+
out_hidden_size=2048,
|
|
76
|
+
num_position_embeddings=2304,
|
|
77
|
+
deepstack_visual_indexes=[8, 16, 24],
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
text_config = TextConfig(
|
|
81
|
+
model_type="qwen3_vl_moe_text",
|
|
82
|
+
hidden_size=2048,
|
|
83
|
+
num_hidden_layers=48,
|
|
84
|
+
intermediate_size=6144,
|
|
85
|
+
num_attention_heads=32,
|
|
86
|
+
num_key_value_heads=4,
|
|
87
|
+
rms_norm_eps=1e-6,
|
|
88
|
+
vocab_size=152064,
|
|
89
|
+
max_position_embeddings=128000,
|
|
90
|
+
rope_theta=1000000.0,
|
|
91
|
+
head_dim=128,
|
|
92
|
+
tie_word_embeddings=False,
|
|
93
|
+
attention_bias=False,
|
|
94
|
+
attention_dropout=0.0,
|
|
95
|
+
rope_scaling={
|
|
96
|
+
"mrope_interleaved": True,
|
|
97
|
+
"mrope_section": [24, 20, 20],
|
|
98
|
+
"rope_type": "default"
|
|
99
|
+
},
|
|
100
|
+
# MoE specific parameters
|
|
101
|
+
num_experts=128,
|
|
102
|
+
num_experts_per_tok=8,
|
|
103
|
+
moe_intermediate_size=768,
|
|
104
|
+
shared_expert_intermediate_size=0,
|
|
105
|
+
norm_topk_prob=True,
|
|
106
|
+
decoder_sparse_step=1,
|
|
107
|
+
max_window_layers=48,
|
|
108
|
+
sliding_window=32768,
|
|
109
|
+
mlp_only_layers=[],
|
|
110
|
+
use_qk_norm=True,
|
|
111
|
+
layer_types=[],
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
vision_model = VEGModel(vision_config)
|
|
115
|
+
llm_model = LLMModel(text_config)
|
|
116
|
+
|
|
117
|
+
# Try to load LLM model from available files in order of preference
|
|
118
|
+
preferred_order = [
|
|
119
|
+
("qwen3vl-moe-llm-30B-A3B-q4_0.safetensors", 4),
|
|
120
|
+
("qwen3vl-moe-llm-30B-A3B-q8_0.safetensors", 8),
|
|
121
|
+
("qwen3vl-moe-llm-30B-A3B-f32.safetensors", 32),
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
llm_weights_path = None
|
|
125
|
+
quantization_bits = None
|
|
126
|
+
|
|
127
|
+
# Try loading in order of preference
|
|
128
|
+
for filename, bits in preferred_order:
|
|
129
|
+
candidate_path = model_path / filename
|
|
130
|
+
if candidate_path.exists():
|
|
131
|
+
llm_weights_path = candidate_path
|
|
132
|
+
quantization_bits = bits
|
|
133
|
+
break
|
|
134
|
+
|
|
135
|
+
if llm_weights_path is None:
|
|
136
|
+
# Fallback to original hardcoded path for backward compatibility
|
|
137
|
+
llm_weights_path = model_path / "qwen3vl-moe-llm-30B-A3B-q4_0.safetensors"
|
|
138
|
+
quantization_bits = 4
|
|
139
|
+
|
|
140
|
+
vision_weights_path = model_path / "qwen3vl-moe-vision-30B-A3B-f16.safetensors"
|
|
141
|
+
|
|
142
|
+
if not vision_weights_path.exists():
|
|
143
|
+
raise FileNotFoundError(
|
|
144
|
+
f"Missing vision weights: {vision_weights_path}"
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Load weights (vision fp16, llm with detected quantization)
|
|
148
|
+
vision_model.set_dtype(mx.float16)
|
|
149
|
+
vision_model.load_weights(str(vision_weights_path), strict=True)
|
|
150
|
+
|
|
151
|
+
# Apply quantization if needed and load LLM weights
|
|
152
|
+
if quantization_bits in [4, 8]:
|
|
153
|
+
nn.quantize(llm_model, bits=quantization_bits, group_size=64,
|
|
154
|
+
class_predicate=quant_predicate)
|
|
155
|
+
# For f32 (32-bit), no quantization needed
|
|
156
|
+
|
|
157
|
+
llm_model.load_weights(str(llm_weights_path), strict=True)
|
|
158
|
+
|
|
159
|
+
# Tokenizer and processor
|
|
160
|
+
tokenizer = AutoTokenizer.from_pretrained(path_or_repo)
|
|
161
|
+
processor = Qwen3VLProcessor(tokenizer=tokenizer)
|
|
162
|
+
|
|
163
|
+
return Qwen3VLBundledModel(vision_model=vision_model, llm_model=llm_model), processor
|
|
164
|
+
|
|
165
|
+
def apply_chat_template_qwen3_vl(messages: Sequence[ChatMessage], num_images: int = 0, num_audios: int = 0, tools: Optional[str] = None, enable_thinking: bool = False) -> str:
|
|
166
|
+
"""Apply chat template: serialize messages with content as a list of typed items."""
|
|
167
|
+
messages_dict = []
|
|
168
|
+
for msg in messages:
|
|
169
|
+
content_items = [{"type": "text", "text": msg.content}]
|
|
170
|
+
messages_dict.append({"role": msg.role, "content": content_items})
|
|
171
|
+
return json.dumps(messages_dict)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def stream_generate_qwen3_vl(
|
|
175
|
+
model: Qwen3VLBundledModel,
|
|
176
|
+
processor: Qwen3VLProcessor,
|
|
177
|
+
prompt: str,
|
|
178
|
+
image: Union[str, List[str]] = None,
|
|
179
|
+
audio: Union[str, List[str]] = None,
|
|
180
|
+
max_tokens: int = 512,
|
|
181
|
+
**kwargs,
|
|
182
|
+
|
|
183
|
+
) -> Generator[Any, None, None]:
|
|
184
|
+
"""Stream generation yielding .generate.GenerationResult-compatible chunks."""
|
|
185
|
+
messages = json.loads(prompt)
|
|
186
|
+
if image is not None:
|
|
187
|
+
image_list = image if isinstance(image, list) else [image]
|
|
188
|
+
pil_images = []
|
|
189
|
+
for p in image_list:
|
|
190
|
+
try:
|
|
191
|
+
pil_images.append(Image.open(p))
|
|
192
|
+
except Exception:
|
|
193
|
+
continue
|
|
194
|
+
contents = [{"type": "image", "image": img} for img in pil_images]
|
|
195
|
+
if messages:
|
|
196
|
+
if "content" not in messages[-1] or not isinstance(messages[-1]["content"], list):
|
|
197
|
+
messages[-1]["content"] = []
|
|
198
|
+
messages[-1]["content"].extend(contents)
|
|
199
|
+
|
|
200
|
+
raw_text, processed_images = processor.messages_to_text(
|
|
201
|
+
messages, add_generation_prompt=True)
|
|
202
|
+
|
|
203
|
+
inputs = processor.text_to_input_ids(
|
|
204
|
+
raw_text, images=processed_images, return_tensors="mlx")
|
|
205
|
+
|
|
206
|
+
input_ids = inputs["input_ids"]
|
|
207
|
+
pixel_values = inputs.get("pixel_values")
|
|
208
|
+
image_grid_thw = inputs.get("image_grid_thw")
|
|
209
|
+
|
|
210
|
+
inputs_embeds, deepstack_visual_embeds, visual_pos_masks, cos, sin, rope_deltas = handle_multimodal_embeds(
|
|
211
|
+
model.vision_model, model.llm_model, input_ids, pixel_values, image_grid_thw
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
prompt_cache = make_prompt_cache(model.llm_model, max_kv_size=4096)
|
|
215
|
+
tokenizer = processor.tokenizer
|
|
216
|
+
|
|
217
|
+
# Rough prompt TPS estimation based on input size
|
|
218
|
+
prompt_start = time.perf_counter()
|
|
219
|
+
prompt_tps = input_ids.size / max(1e-6, (time.perf_counter() - prompt_start))
|
|
220
|
+
|
|
221
|
+
gen_count = 0
|
|
222
|
+
tic = time.perf_counter()
|
|
223
|
+
|
|
224
|
+
for token, logprobs in nexa_generate_step(
|
|
225
|
+
model=model.llm_model,
|
|
226
|
+
prompt=None,
|
|
227
|
+
input_embeddings=inputs_embeds,
|
|
228
|
+
max_tokens=max_tokens,
|
|
229
|
+
max_kv_size=4096,
|
|
230
|
+
prompt_cache=prompt_cache,
|
|
231
|
+
visual_pos_masks=visual_pos_masks,
|
|
232
|
+
deepstack_visual_embeds=deepstack_visual_embeds,
|
|
233
|
+
cos=cos,
|
|
234
|
+
sin=sin,
|
|
235
|
+
rope_deltas=rope_deltas,
|
|
236
|
+
):
|
|
237
|
+
if token == tokenizer.eos_token_id:
|
|
238
|
+
break
|
|
239
|
+
|
|
240
|
+
text_piece = tokenizer.decode([token])
|
|
241
|
+
gen_count += 1
|
|
242
|
+
|
|
243
|
+
yield GenerationResult(
|
|
244
|
+
text=text_piece,
|
|
245
|
+
token=token,
|
|
246
|
+
logprobs=logprobs,
|
|
247
|
+
prompt_tokens=int(input_ids.size),
|
|
248
|
+
generation_tokens=gen_count,
|
|
249
|
+
prompt_tps=float(prompt_tps),
|
|
250
|
+
generation_tps=float(
|
|
251
|
+
gen_count / max(1e-6, (time.perf_counter() - tic))),
|
|
252
|
+
peak_memory=float(mx.get_peak_memory() / 1e9),
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
def quant_predicate(path: str, mod: nn.Module) -> bool:
|
|
256
|
+
"""Quantization predicate to exclude certain layers from quantization."""
|
|
257
|
+
if path.endswith("lm_head") or "norm" in path.lower() or "embed" in path.lower():
|
|
258
|
+
return False
|
|
259
|
+
return isinstance(mod, (nn.Linear, nn.Embedding))
|