nexaai 1.0.29__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nexaai/__init__.py +99 -0
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +4 -0
- nexaai/asr.py +68 -0
- nexaai/asr_impl/__init__.py +0 -0
- nexaai/asr_impl/mlx_asr_impl.py +93 -0
- nexaai/asr_impl/pybind_asr_impl.py +127 -0
- nexaai/base.py +39 -0
- nexaai/binds/__init__.py +7 -0
- nexaai/binds/asr_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/common_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/cpu_gpu/libggml-base.dylib +0 -0
- nexaai/binds/cpu_gpu/libggml-cpu.so +0 -0
- nexaai/binds/cpu_gpu/libggml-metal.so +0 -0
- nexaai/binds/cpu_gpu/libggml.dylib +0 -0
- nexaai/binds/cpu_gpu/libmtmd.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_cpu_gpu.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_plugin.dylib +0 -0
- nexaai/binds/cv_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/diarize_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/embedder_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/llm_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/metal/libnexa_plugin.dylib +0 -0
- nexaai/binds/metal/py-lib/ml.py +888 -0
- nexaai/binds/metal/py-lib/mlx_audio/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/binds/metal/py-lib/mlx_audio/server.py +525 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/generate.py +174 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/utils.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/audio_player.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/convert.py +71 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/generate.py +449 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/base.py +84 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/utils.py +337 -0
- nexaai/binds/metal/py-lib/mlx_audio/utils.py +237 -0
- nexaai/binds/metal/py-lib/mlx_audio/version.py +1 -0
- nexaai/binds/metal/py-lib/profiling.py +239 -0
- nexaai/binds/nexaml/libfftw3.3.dylib +0 -0
- nexaai/binds/nexaml/libfftw3f.3.dylib +0 -0
- nexaai/binds/nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexaml/libggml.dylib +0 -0
- nexaai/binds/nexaml/libmp3lame.0.dylib +0 -0
- nexaai/binds/nexaml/libmpg123.0.dylib +0 -0
- nexaai/binds/nexaml/libnexa-mm-process.dylib +0 -0
- nexaai/binds/nexaml/libnexa-sampling.dylib +0 -0
- nexaai/binds/nexaml/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexaml/libnexaproc.dylib +0 -0
- nexaai/binds/nexaml/libomp.dylib +0 -0
- nexaai/binds/nexaml/libqwen3-vl.dylib +0 -0
- nexaai/binds/nexaml/libqwen3vl-vision.dylib +0 -0
- nexaai/binds/rerank_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/vlm_bind.cpython-310-darwin.so +0 -0
- nexaai/common.py +106 -0
- nexaai/cv.py +95 -0
- nexaai/cv_impl/__init__.py +0 -0
- nexaai/cv_impl/mlx_cv_impl.py +91 -0
- nexaai/cv_impl/pybind_cv_impl.py +124 -0
- nexaai/diarize.py +80 -0
- nexaai/diarize_impl/__init__.py +1 -0
- nexaai/diarize_impl/pybind_diarize_impl.py +125 -0
- nexaai/embedder.py +73 -0
- nexaai/embedder_impl/__init__.py +0 -0
- nexaai/embedder_impl/mlx_embedder_impl.py +118 -0
- nexaai/embedder_impl/pybind_embedder_impl.py +96 -0
- nexaai/image_gen.py +141 -0
- nexaai/image_gen_impl/__init__.py +0 -0
- nexaai/image_gen_impl/mlx_image_gen_impl.py +292 -0
- nexaai/image_gen_impl/pybind_image_gen_impl.py +85 -0
- nexaai/llm.py +98 -0
- nexaai/llm_impl/__init__.py +0 -0
- nexaai/llm_impl/mlx_llm_impl.py +271 -0
- nexaai/llm_impl/pybind_llm_impl.py +238 -0
- nexaai/log.py +92 -0
- nexaai/mlx_backend/asr/__init__.py +12 -0
- nexaai/mlx_backend/asr/interface.py +122 -0
- nexaai/mlx_backend/common/__init__.py +0 -0
- nexaai/mlx_backend/common/utils.py +25 -0
- nexaai/mlx_backend/cv/__init__.py +0 -0
- nexaai/mlx_backend/cv/generate.py +195 -0
- nexaai/mlx_backend/cv/interface.py +162 -0
- nexaai/mlx_backend/cv/main.py +81 -0
- nexaai/mlx_backend/cv/modeling/pp_ocr_v4.py +1736 -0
- nexaai/mlx_backend/embedding/__init__.py +0 -0
- nexaai/mlx_backend/embedding/generate.py +333 -0
- nexaai/mlx_backend/embedding/interface.py +617 -0
- nexaai/mlx_backend/embedding/main.py +173 -0
- nexaai/mlx_backend/embedding/modeling/__init__.py +0 -0
- nexaai/mlx_backend/embedding/modeling/nexa_jina_v2.py +399 -0
- nexaai/mlx_backend/image_gen/__init__.py +1 -0
- nexaai/mlx_backend/image_gen/generate_sd.py +244 -0
- nexaai/mlx_backend/image_gen/interface.py +82 -0
- nexaai/mlx_backend/image_gen/main.py +281 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/__init__.py +306 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/clip.py +116 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/config.py +65 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/model_io.py +386 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/sampler.py +105 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/tokenizer.py +100 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/unet.py +460 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/vae.py +274 -0
- nexaai/mlx_backend/llm/__init__.py +0 -0
- nexaai/mlx_backend/llm/generate.py +149 -0
- nexaai/mlx_backend/llm/interface.py +764 -0
- nexaai/mlx_backend/llm/main.py +68 -0
- nexaai/mlx_backend/ml.py +888 -0
- nexaai/mlx_backend/mlx_audio/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/mlx_backend/mlx_audio/server.py +525 -0
- nexaai/mlx_backend/mlx_audio/sts/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/mlx_backend/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/mlx_backend/mlx_audio/stt/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/generate.py +174 -0
- nexaai/mlx_backend/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/mlx_backend/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/mlx_backend/mlx_audio/stt/utils.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/audio_player.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/convert.py +71 -0
- nexaai/mlx_backend/mlx_audio/tts/generate.py +449 -0
- nexaai/mlx_backend/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/mlx_backend/mlx_audio/tts/models/base.py +84 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/mlx_backend/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/default_speaker.json +461 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/mlx_backend/mlx_audio/tts/utils.py +337 -0
- nexaai/mlx_backend/mlx_audio/utils.py +237 -0
- nexaai/mlx_backend/mlx_audio/version.py +1 -0
- nexaai/mlx_backend/profiling.py +239 -0
- nexaai/mlx_backend/rerank/__init__.py +0 -0
- nexaai/mlx_backend/rerank/generate.py +174 -0
- nexaai/mlx_backend/rerank/interface.py +287 -0
- nexaai/mlx_backend/rerank/main.py +127 -0
- nexaai/mlx_backend/rerank/modeling/__init__.py +0 -0
- nexaai/mlx_backend/rerank/modeling/nexa_jina_rerank.py +330 -0
- nexaai/mlx_backend/sd/__init__.py +1 -0
- nexaai/mlx_backend/sd/interface.py +362 -0
- nexaai/mlx_backend/sd/main.py +286 -0
- nexaai/mlx_backend/sd/modeling/__init__.py +306 -0
- nexaai/mlx_backend/sd/modeling/clip.py +116 -0
- nexaai/mlx_backend/sd/modeling/config.py +65 -0
- nexaai/mlx_backend/sd/modeling/model_io.py +385 -0
- nexaai/mlx_backend/sd/modeling/sampler.py +105 -0
- nexaai/mlx_backend/sd/modeling/tokenizer.py +100 -0
- nexaai/mlx_backend/sd/modeling/unet.py +460 -0
- nexaai/mlx_backend/sd/modeling/vae.py +274 -0
- nexaai/mlx_backend/tts/__init__.py +12 -0
- nexaai/mlx_backend/tts/interface.py +276 -0
- nexaai/mlx_backend/vlm/__init__.py +3 -0
- nexaai/mlx_backend/vlm/generate.py +572 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +374 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +259 -0
- nexaai/mlx_backend/vlm/interface.py +559 -0
- nexaai/mlx_backend/vlm/main.py +365 -0
- nexaai/mlx_backend/vlm/modeling/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/convert.py +68 -0
- nexaai/mlx_backend/vlm/modeling/models/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/aya_vision.py +193 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/interpolate.py +186 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/language.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/vision.py +503 -0
- nexaai/mlx_backend/vlm/modeling/models/base.py +202 -0
- nexaai/mlx_backend/vlm/modeling/models/cache.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/conversation.py +264 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +472 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/language.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/vision.py +356 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/florence2.py +366 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/language.py +488 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/vision.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/gemma3.py +213 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/language.py +315 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/vision.py +238 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/audio.py +1038 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/config.py +139 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/gemma3n.py +322 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/language.py +629 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/vision.py +1022 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/idefics2.py +294 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/language.py +191 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/vision.py +267 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/idefics3.py +175 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/language.py +192 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/vision.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/internvl_chat.py +140 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/language.py +220 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/processor.py +393 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/vision.py +293 -0
- nexaai/mlx_backend/vlm/modeling/models/kernels.py +307 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/kimi_vl.py +143 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/language.py +509 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/vision.py +522 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/language.py +386 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/llama4.py +138 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/vision.py +560 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/language.py +240 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/llava.py +153 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/vision.py +259 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/language.py +236 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/llava_bunny.py +256 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/vision.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/llava_next.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/vision.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/mistral3.py +283 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/language.py +416 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/mllama.py +172 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/vision.py +499 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/language.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/molmo.py +133 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/vision.py +465 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/multi_modality.py +385 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/sam.py +557 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/vision.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/language.py +282 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/paligemma.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/vision.py +242 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/language.py +21 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/phi3_v.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/su_rope.py +71 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/vision.py +324 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/language.py +229 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/pixtral.py +161 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/vision.py +320 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/config.py +108 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +168 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/vision.py +414 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/config.py +104 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/qwen2_vl.py +167 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/vision.py +312 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +1262 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1308 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/smolvlm.py +62 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_5_vl.py +209 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_vl.py +215 -0
- nexaai/mlx_backend/vlm/modeling/prompt_utils.py +474 -0
- nexaai/mlx_backend/vlm/modeling/sample_utils.py +39 -0
- nexaai/mlx_backend/vlm/modeling/tokenizer_utils.py +344 -0
- nexaai/mlx_backend/vlm/modeling/trainer/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/trainer/lora.py +70 -0
- nexaai/mlx_backend/vlm/modeling/trainer/trainer.py +296 -0
- nexaai/mlx_backend/vlm/modeling/trainer/utils.py +160 -0
- nexaai/mlx_backend/vlm/modeling/utils.py +928 -0
- nexaai/rerank.py +57 -0
- nexaai/rerank_impl/__init__.py +0 -0
- nexaai/rerank_impl/mlx_rerank_impl.py +94 -0
- nexaai/rerank_impl/pybind_rerank_impl.py +136 -0
- nexaai/runtime.py +68 -0
- nexaai/runtime_error.py +24 -0
- nexaai/tts.py +75 -0
- nexaai/tts_impl/__init__.py +0 -0
- nexaai/tts_impl/mlx_tts_impl.py +94 -0
- nexaai/tts_impl/pybind_tts_impl.py +43 -0
- nexaai/utils/decode.py +18 -0
- nexaai/utils/manifest_utils.py +531 -0
- nexaai/utils/model_manager.py +1745 -0
- nexaai/utils/model_types.py +49 -0
- nexaai/utils/progress_tracker.py +389 -0
- nexaai/utils/quantization_utils.py +245 -0
- nexaai/vlm.py +130 -0
- nexaai/vlm_impl/__init__.py +0 -0
- nexaai/vlm_impl/mlx_vlm_impl.py +259 -0
- nexaai/vlm_impl/pybind_vlm_impl.py +275 -0
- nexaai-1.0.29.dist-info/METADATA +35 -0
- nexaai-1.0.29.dist-info/RECORD +580 -0
- nexaai-1.0.29.dist-info/WHEEL +5 -0
- nexaai-1.0.29.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
from .interface import VLM
|
|
2
|
+
from ml import GenerationConfig, SamplerConfig, ChatMessage
|
|
3
|
+
import re
|
|
4
|
+
import os
|
|
5
|
+
import codecs
|
|
6
|
+
import argparse
|
|
7
|
+
|
|
8
|
+
def parse_media_from_input(user_input):
|
|
9
|
+
"""Parse quoted media files from user input and return prompt and media paths"""
|
|
10
|
+
# Find all quoted strings (both single and double quotes)
|
|
11
|
+
quoted_pattern = r'["\']([^"\']*)["\']'
|
|
12
|
+
quoted_matches = re.findall(quoted_pattern, user_input)
|
|
13
|
+
|
|
14
|
+
# Remove quoted strings from the input to get the actual prompt
|
|
15
|
+
prompt = re.sub(quoted_pattern, '', user_input).strip()
|
|
16
|
+
|
|
17
|
+
# Separate image and audio files based on extensions
|
|
18
|
+
image_extensions = {'.png', '.jpg', '.jpeg', '.gif', '.bmp', '.tiff', '.webp'}
|
|
19
|
+
audio_extensions = {'.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a'}
|
|
20
|
+
|
|
21
|
+
image_paths = []
|
|
22
|
+
audio_paths = []
|
|
23
|
+
|
|
24
|
+
for quoted_file in quoted_matches:
|
|
25
|
+
if quoted_file: # Skip empty quotes
|
|
26
|
+
# Expand user path if it starts with ~
|
|
27
|
+
if quoted_file.startswith('~'):
|
|
28
|
+
quoted_file = os.path.expanduser(quoted_file)
|
|
29
|
+
|
|
30
|
+
# Check if file exists
|
|
31
|
+
if not os.path.exists(quoted_file):
|
|
32
|
+
print(f"Warning: File '{quoted_file}' not found")
|
|
33
|
+
continue
|
|
34
|
+
|
|
35
|
+
file_ext = os.path.splitext(quoted_file.lower())[1]
|
|
36
|
+
if file_ext in image_extensions:
|
|
37
|
+
image_paths.append(quoted_file)
|
|
38
|
+
elif file_ext in audio_extensions:
|
|
39
|
+
audio_paths.append(quoted_file)
|
|
40
|
+
|
|
41
|
+
return prompt, image_paths if image_paths else None, audio_paths if audio_paths else None
|
|
42
|
+
|
|
43
|
+
def detect_model_name_and_repo(model_path):
|
|
44
|
+
"""Detect model name and corresponding HuggingFace repo based on model path or name"""
|
|
45
|
+
model_path_lower = model_path.lower()
|
|
46
|
+
|
|
47
|
+
# Handle HuggingFace repo format
|
|
48
|
+
if "/" in model_path:
|
|
49
|
+
repo_name = model_path.split("/")[-1] if model_path.endswith("/") else model_path.split("/")[-1]
|
|
50
|
+
repo_name_lower = repo_name.lower()
|
|
51
|
+
else:
|
|
52
|
+
repo_name_lower = model_path_lower
|
|
53
|
+
|
|
54
|
+
# Model name mapping based on the provided examples
|
|
55
|
+
model_mappings = {
|
|
56
|
+
# 4B models
|
|
57
|
+
"qwen3vl-4b-4bit-mlx": ("qwen3vl-4b", "NexaAI/qwen3vl-4B-4bit-mlx"),
|
|
58
|
+
"qwen3vl-4b-fp16-mlx": ("qwen3vl-4b", "NexaAI/qwen3vl-4B-fp16-mlx"),
|
|
59
|
+
"qwen3vl-4b-thinking-4bit-mlx": ("qwen3vl-4b-thinking", "NexaAI/qwen3vl-4B-thinking-4bit-mlx"),
|
|
60
|
+
"qwen3vl-4b-thinking-fp16-mlx": ("qwen3vl-4b-thinking", "NexaAI/qwen3vl-4B-thinking-fp16-mlx"),
|
|
61
|
+
|
|
62
|
+
# 8B models
|
|
63
|
+
"qwen3vl-8b-4bit-mlx": ("qwen3vl-8b", "NexaAI/qwen3vl-8B-4bit-mlx"),
|
|
64
|
+
"qwen3vl-8b-fp16-mlx": ("qwen3vl-8b", "NexaAI/qwen3vl-8B-fp16-mlx"),
|
|
65
|
+
"qwen3vl-8b-thinking-4bit-mlx": ("qwen3vl-8b-thinking", "NexaAI/qwen3vl-8B-thinking-4bit-mlx"),
|
|
66
|
+
"qwen3vl-8b-thinking-fp16-mlx": ("qwen3vl-8b-thinking", "NexaAI/qwen3vl-8B-thinking-fp16-mlx"),
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# Check exact matches first
|
|
70
|
+
for key, (model_name, repo) in model_mappings.items():
|
|
71
|
+
if key in repo_name_lower:
|
|
72
|
+
return model_name, repo if "/" not in model_path else model_path
|
|
73
|
+
|
|
74
|
+
# Fallback detection based on patterns
|
|
75
|
+
if "qwen3vl" in repo_name_lower:
|
|
76
|
+
if "8b" in repo_name_lower:
|
|
77
|
+
if "thinking" in repo_name_lower:
|
|
78
|
+
return "qwen3vl-8b-thinking", model_path
|
|
79
|
+
else:
|
|
80
|
+
return "qwen3vl-8b", model_path
|
|
81
|
+
elif "4b" in repo_name_lower:
|
|
82
|
+
if "thinking" in repo_name_lower:
|
|
83
|
+
return "qwen3vl-4b-thinking", model_path
|
|
84
|
+
else:
|
|
85
|
+
return "qwen3vl-4b", model_path
|
|
86
|
+
else:
|
|
87
|
+
# Default to 4B if size not specified
|
|
88
|
+
return "qwen3vl-4b", model_path
|
|
89
|
+
elif "gemma" in repo_name_lower:
|
|
90
|
+
return "gemma3", model_path
|
|
91
|
+
|
|
92
|
+
return "", model_path
|
|
93
|
+
|
|
94
|
+
def parse_arguments():
|
|
95
|
+
"""Parse command line arguments for the VLM main function."""
|
|
96
|
+
parser = argparse.ArgumentParser(
|
|
97
|
+
description="Interactive VLM (Vision-Language Model) conversation interface."
|
|
98
|
+
)
|
|
99
|
+
parser.add_argument(
|
|
100
|
+
"--model_path",
|
|
101
|
+
type=str,
|
|
102
|
+
default="NexaAI/qwen3vl-4B-4bit-mlx",
|
|
103
|
+
help="The path to the local model directory or Hugging Face repo."
|
|
104
|
+
)
|
|
105
|
+
parser.add_argument(
|
|
106
|
+
"--model_name",
|
|
107
|
+
type=str,
|
|
108
|
+
default="",
|
|
109
|
+
help="Specific model name/type (e.g., 'qwen3vl-4b', 'qwen3vl-4b-thinking', 'qwen3vl-8b', 'qwen3vl-8b-thinking'). If empty, auto-detect from model_path."
|
|
110
|
+
)
|
|
111
|
+
parser.add_argument(
|
|
112
|
+
"--context_length",
|
|
113
|
+
type=int,
|
|
114
|
+
default=2048,
|
|
115
|
+
help="Context length for the model (default: 2048)."
|
|
116
|
+
)
|
|
117
|
+
parser.add_argument(
|
|
118
|
+
"--temperature",
|
|
119
|
+
type=float,
|
|
120
|
+
default=0.7,
|
|
121
|
+
help="Sampling temperature (default: 0.7)."
|
|
122
|
+
)
|
|
123
|
+
parser.add_argument(
|
|
124
|
+
"--top_p",
|
|
125
|
+
type=float,
|
|
126
|
+
default=0.9,
|
|
127
|
+
help="Top-p sampling parameter (default: 0.9)."
|
|
128
|
+
)
|
|
129
|
+
parser.add_argument(
|
|
130
|
+
"--max_tokens",
|
|
131
|
+
type=int,
|
|
132
|
+
default=512,
|
|
133
|
+
help="Maximum tokens to generate (default: 512)."
|
|
134
|
+
)
|
|
135
|
+
return parser.parse_args()
|
|
136
|
+
|
|
137
|
+
def main():
|
|
138
|
+
"""Main function for interactive VLM conversation."""
|
|
139
|
+
args = parse_arguments()
|
|
140
|
+
|
|
141
|
+
# Auto-detect model name if not provided
|
|
142
|
+
model_name = args.model_name
|
|
143
|
+
model_path = args.model_path
|
|
144
|
+
|
|
145
|
+
if not model_name:
|
|
146
|
+
model_name, model_path = detect_model_name_and_repo(args.model_path)
|
|
147
|
+
print(f"Auto-detected model: {model_name} from path: {model_path}")
|
|
148
|
+
|
|
149
|
+
# Load the VLM instance
|
|
150
|
+
vlm = VLM(
|
|
151
|
+
model_name=model_name,
|
|
152
|
+
model_path=model_path,
|
|
153
|
+
mmproj_path=None, # Not needed for this model
|
|
154
|
+
context_length=args.context_length,
|
|
155
|
+
device=None
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Configure sampler
|
|
159
|
+
sampler_config = SamplerConfig(
|
|
160
|
+
temperature=args.temperature,
|
|
161
|
+
top_p=args.top_p
|
|
162
|
+
)
|
|
163
|
+
vlm.set_sampler(sampler_config)
|
|
164
|
+
|
|
165
|
+
# Chat history using ChatMessage objects
|
|
166
|
+
chat = []
|
|
167
|
+
|
|
168
|
+
print("VLM Multi-round conversation started. Type 'quit' or 'exit' to end.")
|
|
169
|
+
print("Include images/audios in quotes, e.g.: 'describe \"image1.jpg\" \"image2.png\"'")
|
|
170
|
+
print("You can also use single quotes: 'describe '/path/to/image.jpg''")
|
|
171
|
+
print("=" * 50)
|
|
172
|
+
|
|
173
|
+
def on_token(text_chunk):
|
|
174
|
+
"""Token callback for streaming"""
|
|
175
|
+
print(text_chunk, end="", flush=True)
|
|
176
|
+
return True
|
|
177
|
+
|
|
178
|
+
while True:
|
|
179
|
+
# Get user input
|
|
180
|
+
user_input = input("\nUser: ").strip()
|
|
181
|
+
|
|
182
|
+
# Check for exit commands
|
|
183
|
+
if user_input.lower() in ["quit", "exit", "q"]:
|
|
184
|
+
print("Goodbye!")
|
|
185
|
+
break
|
|
186
|
+
|
|
187
|
+
if not user_input:
|
|
188
|
+
continue
|
|
189
|
+
|
|
190
|
+
# Parse media files and prompt from user input
|
|
191
|
+
prompt_text, image_paths, audio_paths = parse_media_from_input(user_input)
|
|
192
|
+
|
|
193
|
+
# If no text prompt after parsing, use the original input
|
|
194
|
+
if not prompt_text.strip():
|
|
195
|
+
prompt_text = user_input
|
|
196
|
+
image_paths = None
|
|
197
|
+
audio_paths = None
|
|
198
|
+
|
|
199
|
+
# Add user message to chat history using ChatMessage
|
|
200
|
+
chat.append(ChatMessage(role="user", content=prompt_text))
|
|
201
|
+
|
|
202
|
+
# Calculate number of images and audios for chat template
|
|
203
|
+
num_images = len(image_paths) if image_paths else 0
|
|
204
|
+
num_audios = len(audio_paths) if audio_paths else 0
|
|
205
|
+
|
|
206
|
+
# Apply chat template with image/audio token insertion
|
|
207
|
+
try:
|
|
208
|
+
formatted_prompt = vlm.apply_chat_template_with_media(chat, num_images=num_images, num_audios=num_audios)
|
|
209
|
+
except (NotImplementedError, AttributeError):
|
|
210
|
+
# Fallback to manual formatting if chat template is not implemented
|
|
211
|
+
formatted_prompt = ""
|
|
212
|
+
for msg in chat:
|
|
213
|
+
formatted_prompt += f"{msg.role}: {msg.content}\n"
|
|
214
|
+
formatted_prompt += "Assistant: "
|
|
215
|
+
|
|
216
|
+
# Generation config with media paths
|
|
217
|
+
generation_config = GenerationConfig(
|
|
218
|
+
max_tokens=args.max_tokens,
|
|
219
|
+
sampler_config=sampler_config,
|
|
220
|
+
image_paths=image_paths,
|
|
221
|
+
audio_paths=audio_paths
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Generate response
|
|
225
|
+
print("Assistant: ", end="", flush=True)
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
# Use streaming generation with callback
|
|
229
|
+
response_text = ""
|
|
230
|
+
|
|
231
|
+
def token_callback(text_chunk):
|
|
232
|
+
nonlocal response_text
|
|
233
|
+
print(text_chunk, end="", flush=True)
|
|
234
|
+
response_text += text_chunk
|
|
235
|
+
return True
|
|
236
|
+
|
|
237
|
+
# Use generate_stream method for streaming generation
|
|
238
|
+
response = vlm.generate_stream(
|
|
239
|
+
prompt=formatted_prompt,
|
|
240
|
+
config=generation_config,
|
|
241
|
+
on_token=token_callback
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
print() # New line after streaming
|
|
245
|
+
|
|
246
|
+
# Add assistant response to chat history using ChatMessage
|
|
247
|
+
chat.append(ChatMessage(role="assistant", content=response_text))
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
print(f"Error generating response: {e}")
|
|
251
|
+
print()
|
|
252
|
+
|
|
253
|
+
# Clean up
|
|
254
|
+
vlm.destroy()
|
|
255
|
+
|
|
256
|
+
def test_vlm_generate_stream(model_path, model_name):
|
|
257
|
+
# Specify the checkpoint
|
|
258
|
+
context_length = 2048
|
|
259
|
+
|
|
260
|
+
# Load the corresponding model and VLM instance
|
|
261
|
+
vlm = VLM(
|
|
262
|
+
model_name=model_name,
|
|
263
|
+
model_path=model_path,
|
|
264
|
+
mmproj_path=None, # Not needed for this model
|
|
265
|
+
context_length=context_length,
|
|
266
|
+
device=None
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
# Configure sampler
|
|
270
|
+
sampler_config = SamplerConfig(
|
|
271
|
+
temperature=0.7,
|
|
272
|
+
top_p=0.9
|
|
273
|
+
)
|
|
274
|
+
vlm.set_sampler(sampler_config)
|
|
275
|
+
|
|
276
|
+
# Chat history using ChatMessage objects (following ml.py API)
|
|
277
|
+
chat = []
|
|
278
|
+
|
|
279
|
+
print("Multi-round VLM conversation started. Type 'quit' or 'exit' to end.")
|
|
280
|
+
print("Include images/audios in quotes, e.g.: 'describe \"image1.jpg\" \"image2.png\"'")
|
|
281
|
+
print("You can also use single quotes: 'describe '/path/to/image.jpg''")
|
|
282
|
+
print("=" * 50)
|
|
283
|
+
|
|
284
|
+
def on_token(text_chunk, user_data):
|
|
285
|
+
"""Token callback for streaming"""
|
|
286
|
+
print(text_chunk, end="", flush=True)
|
|
287
|
+
if user_data is not None:
|
|
288
|
+
user_data["response"] += text_chunk
|
|
289
|
+
return True
|
|
290
|
+
|
|
291
|
+
while True:
|
|
292
|
+
# Get user input
|
|
293
|
+
user_input = input("\nUser: ").strip()
|
|
294
|
+
|
|
295
|
+
# Check for exit commands
|
|
296
|
+
if user_input.lower() in ["quit", "exit", "q"]:
|
|
297
|
+
print("Goodbye!")
|
|
298
|
+
break
|
|
299
|
+
|
|
300
|
+
if not user_input:
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
# Parse media files and prompt from user input
|
|
304
|
+
prompt_text, image_paths, audio_paths = parse_media_from_input(user_input)
|
|
305
|
+
|
|
306
|
+
# If no text prompt after parsing, use the original input
|
|
307
|
+
if not prompt_text.strip():
|
|
308
|
+
prompt_text = user_input
|
|
309
|
+
image_paths = None
|
|
310
|
+
audio_paths = None
|
|
311
|
+
|
|
312
|
+
# Add user message to chat history using ChatMessage (following ml.py API)
|
|
313
|
+
chat.append(ChatMessage(role="user", content=prompt_text))
|
|
314
|
+
|
|
315
|
+
# Calculate number of images and audios for chat template
|
|
316
|
+
num_images = len(image_paths) if image_paths else 0
|
|
317
|
+
num_audios = len(audio_paths) if audio_paths else 0
|
|
318
|
+
|
|
319
|
+
# Apply chat template with image/audio token insertion
|
|
320
|
+
try:
|
|
321
|
+
formatted_prompt = vlm.apply_chat_template_with_media(chat, num_images=num_images, num_audios=num_audios)
|
|
322
|
+
except (NotImplementedError, AttributeError):
|
|
323
|
+
# Fallback to manual formatting if chat template is not implemented
|
|
324
|
+
formatted_prompt = ""
|
|
325
|
+
for msg in chat:
|
|
326
|
+
formatted_prompt += f"{msg.role}: {msg.content}\n"
|
|
327
|
+
formatted_prompt += "Assistant: "
|
|
328
|
+
|
|
329
|
+
# Generation config with media paths
|
|
330
|
+
generation_config = GenerationConfig(
|
|
331
|
+
max_tokens=512,
|
|
332
|
+
sampler_config=sampler_config,
|
|
333
|
+
image_paths=image_paths,
|
|
334
|
+
audio_paths=audio_paths
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
# Generate response
|
|
338
|
+
print("Assistant: ", end="", flush=True)
|
|
339
|
+
|
|
340
|
+
try:
|
|
341
|
+
# Use streaming generation with callback - single method handles all cases
|
|
342
|
+
user_data = {"response": ""}
|
|
343
|
+
|
|
344
|
+
# Always use the unified generate_stream method
|
|
345
|
+
response = vlm.generate_stream(
|
|
346
|
+
prompt=formatted_prompt,
|
|
347
|
+
config=generation_config,
|
|
348
|
+
on_token=on_token,
|
|
349
|
+
user_data=user_data
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
print() # New line after streaming
|
|
353
|
+
|
|
354
|
+
# Add assistant response to chat history using ChatMessage
|
|
355
|
+
chat.append(ChatMessage(role="assistant", content=user_data["response"]))
|
|
356
|
+
|
|
357
|
+
except Exception as e:
|
|
358
|
+
print(f"Error generating response: {e}")
|
|
359
|
+
print()
|
|
360
|
+
|
|
361
|
+
# Clean up
|
|
362
|
+
vlm.destroy()
|
|
363
|
+
|
|
364
|
+
if __name__ == "__main__":
|
|
365
|
+
main()
|
|
File without changes
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
|
|
5
|
+
from .utils import MODEL_CONVERSION_DTYPES, convert
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def configure_parser() -> argparse.ArgumentParser:
|
|
9
|
+
"""
|
|
10
|
+
Configures and returns the argument parser for the script.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
argparse.ArgumentParser: Configured argument parser.
|
|
14
|
+
"""
|
|
15
|
+
parser = argparse.ArgumentParser(
|
|
16
|
+
description="Convert Hugging Face model to MLX format"
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
parser.add_argument("--hf-path", type=str, help="Path to the Hugging Face model.")
|
|
20
|
+
parser.add_argument(
|
|
21
|
+
"--mlx-path", type=str, default="mlx_model", help="Path to save the MLX model."
|
|
22
|
+
)
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"-q", "--quantize", help="Generate a quantized model.", action="store_true"
|
|
25
|
+
)
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"--q-group-size", help="Group size for quantization.", type=int, default=64
|
|
28
|
+
)
|
|
29
|
+
parser.add_argument(
|
|
30
|
+
"--q-bits", help="Bits per weight for quantization.", type=int, default=4
|
|
31
|
+
)
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"--dtype",
|
|
34
|
+
help="Type to save the parameter. Defaults to config.json's `torch_dtype` or the current model weights dtype",
|
|
35
|
+
type=str,
|
|
36
|
+
choices=MODEL_CONVERSION_DTYPES,
|
|
37
|
+
default=None,
|
|
38
|
+
)
|
|
39
|
+
parser.add_argument(
|
|
40
|
+
"--upload-repo",
|
|
41
|
+
help="The Hugging Face repo to upload the model to.",
|
|
42
|
+
type=str,
|
|
43
|
+
default=None,
|
|
44
|
+
)
|
|
45
|
+
parser.add_argument(
|
|
46
|
+
"-d",
|
|
47
|
+
"--dequantize",
|
|
48
|
+
help="Dequantize a quantized model.",
|
|
49
|
+
action="store_true",
|
|
50
|
+
default=False,
|
|
51
|
+
)
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"--skip-vision",
|
|
54
|
+
help="Skip vision module quantization.",
|
|
55
|
+
action="store_true",
|
|
56
|
+
default=False,
|
|
57
|
+
)
|
|
58
|
+
return parser
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def main():
|
|
62
|
+
parser = configure_parser()
|
|
63
|
+
args = parser.parse_args()
|
|
64
|
+
convert(**vars(args))
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
if __name__ == "__main__":
|
|
68
|
+
main()
|
|
File without changes
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
import glob
|
|
2
|
+
import inspect
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import List, Optional
|
|
7
|
+
|
|
8
|
+
import mlx.core as mx
|
|
9
|
+
import mlx.nn as nn
|
|
10
|
+
import numpy as np
|
|
11
|
+
from mlx_lm.utils import snapshot_download
|
|
12
|
+
|
|
13
|
+
from .language import LanguageModel, TextConfig
|
|
14
|
+
from .vision import VisionConfig, VisionModel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ModelConfig:
|
|
19
|
+
text_config: TextConfig
|
|
20
|
+
vision_config: VisionConfig
|
|
21
|
+
model_type: str
|
|
22
|
+
image_token_index: int = 255036
|
|
23
|
+
max_splits_per_img: int = 12
|
|
24
|
+
downsample_factor: int = 2
|
|
25
|
+
alignment_intermediate_size: int = 28672
|
|
26
|
+
adapter_layer_norm_eps: float = 1e-06
|
|
27
|
+
vision_feature_layer: int = -1
|
|
28
|
+
vision_feature_select_strategy: str = "full"
|
|
29
|
+
eos_token_id: Optional[List[int]] = None
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def from_dict(cls, params):
|
|
33
|
+
return cls(
|
|
34
|
+
**{
|
|
35
|
+
k: v
|
|
36
|
+
for k, v in params.items()
|
|
37
|
+
if k in inspect.signature(cls).parameters
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class AyaVisionMultiModalProjector(nn.Module):
|
|
43
|
+
def __init__(self, config: ModelConfig):
|
|
44
|
+
super().__init__()
|
|
45
|
+
self.config = config
|
|
46
|
+
self.downsample_factor = config.downsample_factor
|
|
47
|
+
self.alignment_intermediate_size = getattr(
|
|
48
|
+
config, "alignment_intermediate_size", config.text_config.hidden_size
|
|
49
|
+
)
|
|
50
|
+
self.layernorm = nn.LayerNorm(
|
|
51
|
+
config.vision_config.hidden_size * (config.downsample_factor**2),
|
|
52
|
+
eps=config.adapter_layer_norm_eps,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
self.linear_1 = nn.Linear(
|
|
56
|
+
config.vision_config.hidden_size * (config.downsample_factor**2),
|
|
57
|
+
self.alignment_intermediate_size,
|
|
58
|
+
bias=True,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
self.act = nn.SiLU() # SwiGLU uses SiLU activation
|
|
62
|
+
|
|
63
|
+
# For SwiGLU, project down to half size since we split intermediate dim
|
|
64
|
+
self.linear_2 = nn.Linear(
|
|
65
|
+
self.alignment_intermediate_size // 2,
|
|
66
|
+
config.text_config.hidden_size,
|
|
67
|
+
bias=True,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def __call__(self, image_features):
|
|
71
|
+
image_features = self.pixel_shuffle(image_features)
|
|
72
|
+
image_features = self.layernorm(image_features)
|
|
73
|
+
hidden_states = self.linear_1(image_features)
|
|
74
|
+
|
|
75
|
+
# Split along last dimension and apply SwiGLU
|
|
76
|
+
x, gate = mx.split(hidden_states, 2, axis=-1)
|
|
77
|
+
hidden_states = self.act(gate) * x
|
|
78
|
+
|
|
79
|
+
hidden_states = self.linear_2(hidden_states)
|
|
80
|
+
return hidden_states
|
|
81
|
+
|
|
82
|
+
def pixel_shuffle(self, image_features): # B, S, D
|
|
83
|
+
batch_size, seq_length, feature_dim = image_features.shape
|
|
84
|
+
height = width = int(seq_length**0.5)
|
|
85
|
+
image_features = image_features.reshape(
|
|
86
|
+
image_features.shape[0], width, height, -1
|
|
87
|
+
)
|
|
88
|
+
channels = image_features.shape[-1]
|
|
89
|
+
image_features = image_features.reshape(
|
|
90
|
+
batch_size,
|
|
91
|
+
width,
|
|
92
|
+
int(height / self.downsample_factor),
|
|
93
|
+
int(channels * self.downsample_factor),
|
|
94
|
+
)
|
|
95
|
+
image_features = image_features.transpose(0, 2, 1, 3)
|
|
96
|
+
image_features = image_features.reshape(
|
|
97
|
+
batch_size,
|
|
98
|
+
int(height / self.downsample_factor),
|
|
99
|
+
int(width / self.downsample_factor),
|
|
100
|
+
-1,
|
|
101
|
+
)
|
|
102
|
+
image_features = image_features.transpose(0, 2, 1, 3)
|
|
103
|
+
return image_features
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class Model(nn.Module):
|
|
107
|
+
def __init__(self, config: ModelConfig):
|
|
108
|
+
super().__init__()
|
|
109
|
+
self.config = config
|
|
110
|
+
self.vision_tower = VisionModel(config.vision_config)
|
|
111
|
+
self.language_model = LanguageModel(config.text_config)
|
|
112
|
+
self.multi_modal_projector = AyaVisionMultiModalProjector(config)
|
|
113
|
+
self.vision_feature_layer = config.vision_feature_layer
|
|
114
|
+
self.vision_feature_select_strategy = config.vision_feature_select_strategy
|
|
115
|
+
|
|
116
|
+
def get_input_embeddings(
|
|
117
|
+
self,
|
|
118
|
+
input_ids: Optional[mx.array] = None,
|
|
119
|
+
pixel_values: Optional[mx.array] = None,
|
|
120
|
+
**kwargs,
|
|
121
|
+
):
|
|
122
|
+
if pixel_values is None:
|
|
123
|
+
return self.language_model.model.embed_tokens(input_ids)
|
|
124
|
+
|
|
125
|
+
# Get the input embeddings from the language model
|
|
126
|
+
inputs_embeds = self.language_model.model.embed_tokens(input_ids)
|
|
127
|
+
|
|
128
|
+
spatial_shapes = kwargs.get("spatial_shapes", None)
|
|
129
|
+
# Get the ouptut hidden states from the vision model
|
|
130
|
+
*_, hidden_states = self.vision_tower(
|
|
131
|
+
pixel_values.transpose(0, 2, 3, 1),
|
|
132
|
+
spatial_shapes=spatial_shapes,
|
|
133
|
+
output_hidden_states=True,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Select the hidden states from the desired layer
|
|
137
|
+
selected_image_feature = hidden_states[self.vision_feature_layer]
|
|
138
|
+
|
|
139
|
+
if self.vision_feature_select_strategy == "default":
|
|
140
|
+
selected_image_feature = selected_image_feature[:, 1:]
|
|
141
|
+
elif self.vision_feature_select_strategy == "full":
|
|
142
|
+
selected_image_feature = selected_image_feature
|
|
143
|
+
else:
|
|
144
|
+
raise ValueError(
|
|
145
|
+
"Unexpected feature selection strategy: "
|
|
146
|
+
f"{self.vision_feature_select_strategy}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Pass image features through the multi-modal projector
|
|
150
|
+
image_features = self.multi_modal_projector(selected_image_feature)
|
|
151
|
+
|
|
152
|
+
# Insert special image tokens in the input_ids
|
|
153
|
+
final_inputs_embeds = self._merge_input_ids_with_image_features(
|
|
154
|
+
image_features, inputs_embeds, input_ids
|
|
155
|
+
)
|
|
156
|
+
return final_inputs_embeds
|
|
157
|
+
|
|
158
|
+
def _merge_input_ids_with_image_features(
|
|
159
|
+
self, image_features, inputs_embeds, input_ids
|
|
160
|
+
):
|
|
161
|
+
image_token_index = self.config.image_token_index
|
|
162
|
+
|
|
163
|
+
# Positions of <image> tokens in input_ids, assuming batch size is 1
|
|
164
|
+
image_positions = np.where(input_ids[0] == image_token_index)[0].tolist()
|
|
165
|
+
num_images, _, _, vision_hidden_size = image_features.shape
|
|
166
|
+
|
|
167
|
+
reshaped_image_hidden_states = image_features.reshape(-1, vision_hidden_size)
|
|
168
|
+
|
|
169
|
+
# cast to the dtype of the input_embeds to support quantized models
|
|
170
|
+
reshaped_image_hidden_states = reshaped_image_hidden_states.astype(
|
|
171
|
+
inputs_embeds.dtype
|
|
172
|
+
)
|
|
173
|
+
inputs_embeds[:, image_positions, :] = reshaped_image_hidden_states
|
|
174
|
+
return inputs_embeds
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def layers(self):
|
|
178
|
+
return self.language_model.model.layers
|
|
179
|
+
|
|
180
|
+
def __call__(
|
|
181
|
+
self,
|
|
182
|
+
input_ids: mx.array,
|
|
183
|
+
pixel_values: mx.array,
|
|
184
|
+
mask: mx.array,
|
|
185
|
+
cache=None,
|
|
186
|
+
**kwargs,
|
|
187
|
+
):
|
|
188
|
+
|
|
189
|
+
input_embddings = self.get_input_embeddings(input_ids, pixel_values, **kwargs)
|
|
190
|
+
logits = self.language_model(
|
|
191
|
+
input_ids, cache=cache, inputs_embeds=input_embddings
|
|
192
|
+
)
|
|
193
|
+
return logits
|