nexaai 1.0.29__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nexaai/__init__.py +99 -0
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +4 -0
- nexaai/asr.py +68 -0
- nexaai/asr_impl/__init__.py +0 -0
- nexaai/asr_impl/mlx_asr_impl.py +93 -0
- nexaai/asr_impl/pybind_asr_impl.py +127 -0
- nexaai/base.py +39 -0
- nexaai/binds/__init__.py +7 -0
- nexaai/binds/asr_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/common_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/cpu_gpu/libggml-base.dylib +0 -0
- nexaai/binds/cpu_gpu/libggml-cpu.so +0 -0
- nexaai/binds/cpu_gpu/libggml-metal.so +0 -0
- nexaai/binds/cpu_gpu/libggml.dylib +0 -0
- nexaai/binds/cpu_gpu/libmtmd.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_cpu_gpu.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_plugin.dylib +0 -0
- nexaai/binds/cv_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/diarize_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/embedder_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/llm_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/metal/libnexa_plugin.dylib +0 -0
- nexaai/binds/metal/py-lib/ml.py +888 -0
- nexaai/binds/metal/py-lib/mlx_audio/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/binds/metal/py-lib/mlx_audio/server.py +525 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/generate.py +174 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/utils.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/audio_player.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/convert.py +71 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/generate.py +449 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/base.py +84 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/utils.py +337 -0
- nexaai/binds/metal/py-lib/mlx_audio/utils.py +237 -0
- nexaai/binds/metal/py-lib/mlx_audio/version.py +1 -0
- nexaai/binds/metal/py-lib/profiling.py +239 -0
- nexaai/binds/nexaml/libfftw3.3.dylib +0 -0
- nexaai/binds/nexaml/libfftw3f.3.dylib +0 -0
- nexaai/binds/nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexaml/libggml.dylib +0 -0
- nexaai/binds/nexaml/libmp3lame.0.dylib +0 -0
- nexaai/binds/nexaml/libmpg123.0.dylib +0 -0
- nexaai/binds/nexaml/libnexa-mm-process.dylib +0 -0
- nexaai/binds/nexaml/libnexa-sampling.dylib +0 -0
- nexaai/binds/nexaml/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexaml/libnexaproc.dylib +0 -0
- nexaai/binds/nexaml/libomp.dylib +0 -0
- nexaai/binds/nexaml/libqwen3-vl.dylib +0 -0
- nexaai/binds/nexaml/libqwen3vl-vision.dylib +0 -0
- nexaai/binds/rerank_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/vlm_bind.cpython-310-darwin.so +0 -0
- nexaai/common.py +106 -0
- nexaai/cv.py +95 -0
- nexaai/cv_impl/__init__.py +0 -0
- nexaai/cv_impl/mlx_cv_impl.py +91 -0
- nexaai/cv_impl/pybind_cv_impl.py +124 -0
- nexaai/diarize.py +80 -0
- nexaai/diarize_impl/__init__.py +1 -0
- nexaai/diarize_impl/pybind_diarize_impl.py +125 -0
- nexaai/embedder.py +73 -0
- nexaai/embedder_impl/__init__.py +0 -0
- nexaai/embedder_impl/mlx_embedder_impl.py +118 -0
- nexaai/embedder_impl/pybind_embedder_impl.py +96 -0
- nexaai/image_gen.py +141 -0
- nexaai/image_gen_impl/__init__.py +0 -0
- nexaai/image_gen_impl/mlx_image_gen_impl.py +292 -0
- nexaai/image_gen_impl/pybind_image_gen_impl.py +85 -0
- nexaai/llm.py +98 -0
- nexaai/llm_impl/__init__.py +0 -0
- nexaai/llm_impl/mlx_llm_impl.py +271 -0
- nexaai/llm_impl/pybind_llm_impl.py +238 -0
- nexaai/log.py +92 -0
- nexaai/mlx_backend/asr/__init__.py +12 -0
- nexaai/mlx_backend/asr/interface.py +122 -0
- nexaai/mlx_backend/common/__init__.py +0 -0
- nexaai/mlx_backend/common/utils.py +25 -0
- nexaai/mlx_backend/cv/__init__.py +0 -0
- nexaai/mlx_backend/cv/generate.py +195 -0
- nexaai/mlx_backend/cv/interface.py +162 -0
- nexaai/mlx_backend/cv/main.py +81 -0
- nexaai/mlx_backend/cv/modeling/pp_ocr_v4.py +1736 -0
- nexaai/mlx_backend/embedding/__init__.py +0 -0
- nexaai/mlx_backend/embedding/generate.py +333 -0
- nexaai/mlx_backend/embedding/interface.py +617 -0
- nexaai/mlx_backend/embedding/main.py +173 -0
- nexaai/mlx_backend/embedding/modeling/__init__.py +0 -0
- nexaai/mlx_backend/embedding/modeling/nexa_jina_v2.py +399 -0
- nexaai/mlx_backend/image_gen/__init__.py +1 -0
- nexaai/mlx_backend/image_gen/generate_sd.py +244 -0
- nexaai/mlx_backend/image_gen/interface.py +82 -0
- nexaai/mlx_backend/image_gen/main.py +281 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/__init__.py +306 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/clip.py +116 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/config.py +65 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/model_io.py +386 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/sampler.py +105 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/tokenizer.py +100 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/unet.py +460 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/vae.py +274 -0
- nexaai/mlx_backend/llm/__init__.py +0 -0
- nexaai/mlx_backend/llm/generate.py +149 -0
- nexaai/mlx_backend/llm/interface.py +764 -0
- nexaai/mlx_backend/llm/main.py +68 -0
- nexaai/mlx_backend/ml.py +888 -0
- nexaai/mlx_backend/mlx_audio/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/mlx_backend/mlx_audio/server.py +525 -0
- nexaai/mlx_backend/mlx_audio/sts/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/mlx_backend/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/mlx_backend/mlx_audio/stt/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/generate.py +174 -0
- nexaai/mlx_backend/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/mlx_backend/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/mlx_backend/mlx_audio/stt/utils.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/audio_player.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/convert.py +71 -0
- nexaai/mlx_backend/mlx_audio/tts/generate.py +449 -0
- nexaai/mlx_backend/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/mlx_backend/mlx_audio/tts/models/base.py +84 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/mlx_backend/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/default_speaker.json +461 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/mlx_backend/mlx_audio/tts/utils.py +337 -0
- nexaai/mlx_backend/mlx_audio/utils.py +237 -0
- nexaai/mlx_backend/mlx_audio/version.py +1 -0
- nexaai/mlx_backend/profiling.py +239 -0
- nexaai/mlx_backend/rerank/__init__.py +0 -0
- nexaai/mlx_backend/rerank/generate.py +174 -0
- nexaai/mlx_backend/rerank/interface.py +287 -0
- nexaai/mlx_backend/rerank/main.py +127 -0
- nexaai/mlx_backend/rerank/modeling/__init__.py +0 -0
- nexaai/mlx_backend/rerank/modeling/nexa_jina_rerank.py +330 -0
- nexaai/mlx_backend/sd/__init__.py +1 -0
- nexaai/mlx_backend/sd/interface.py +362 -0
- nexaai/mlx_backend/sd/main.py +286 -0
- nexaai/mlx_backend/sd/modeling/__init__.py +306 -0
- nexaai/mlx_backend/sd/modeling/clip.py +116 -0
- nexaai/mlx_backend/sd/modeling/config.py +65 -0
- nexaai/mlx_backend/sd/modeling/model_io.py +385 -0
- nexaai/mlx_backend/sd/modeling/sampler.py +105 -0
- nexaai/mlx_backend/sd/modeling/tokenizer.py +100 -0
- nexaai/mlx_backend/sd/modeling/unet.py +460 -0
- nexaai/mlx_backend/sd/modeling/vae.py +274 -0
- nexaai/mlx_backend/tts/__init__.py +12 -0
- nexaai/mlx_backend/tts/interface.py +276 -0
- nexaai/mlx_backend/vlm/__init__.py +3 -0
- nexaai/mlx_backend/vlm/generate.py +572 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +374 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +259 -0
- nexaai/mlx_backend/vlm/interface.py +559 -0
- nexaai/mlx_backend/vlm/main.py +365 -0
- nexaai/mlx_backend/vlm/modeling/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/convert.py +68 -0
- nexaai/mlx_backend/vlm/modeling/models/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/aya_vision.py +193 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/interpolate.py +186 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/language.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/vision.py +503 -0
- nexaai/mlx_backend/vlm/modeling/models/base.py +202 -0
- nexaai/mlx_backend/vlm/modeling/models/cache.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/conversation.py +264 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +472 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/language.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/vision.py +356 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/florence2.py +366 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/language.py +488 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/vision.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/gemma3.py +213 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/language.py +315 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/vision.py +238 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/audio.py +1038 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/config.py +139 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/gemma3n.py +322 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/language.py +629 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/vision.py +1022 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/idefics2.py +294 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/language.py +191 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/vision.py +267 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/idefics3.py +175 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/language.py +192 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/vision.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/internvl_chat.py +140 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/language.py +220 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/processor.py +393 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/vision.py +293 -0
- nexaai/mlx_backend/vlm/modeling/models/kernels.py +307 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/kimi_vl.py +143 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/language.py +509 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/vision.py +522 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/language.py +386 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/llama4.py +138 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/vision.py +560 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/language.py +240 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/llava.py +153 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/vision.py +259 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/language.py +236 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/llava_bunny.py +256 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/vision.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/llava_next.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/vision.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/mistral3.py +283 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/language.py +416 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/mllama.py +172 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/vision.py +499 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/language.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/molmo.py +133 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/vision.py +465 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/multi_modality.py +385 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/sam.py +557 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/vision.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/language.py +282 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/paligemma.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/vision.py +242 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/language.py +21 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/phi3_v.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/su_rope.py +71 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/vision.py +324 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/language.py +229 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/pixtral.py +161 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/vision.py +320 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/config.py +108 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +168 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/vision.py +414 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/config.py +104 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/qwen2_vl.py +167 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/vision.py +312 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +1262 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1308 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/smolvlm.py +62 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_5_vl.py +209 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_vl.py +215 -0
- nexaai/mlx_backend/vlm/modeling/prompt_utils.py +474 -0
- nexaai/mlx_backend/vlm/modeling/sample_utils.py +39 -0
- nexaai/mlx_backend/vlm/modeling/tokenizer_utils.py +344 -0
- nexaai/mlx_backend/vlm/modeling/trainer/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/trainer/lora.py +70 -0
- nexaai/mlx_backend/vlm/modeling/trainer/trainer.py +296 -0
- nexaai/mlx_backend/vlm/modeling/trainer/utils.py +160 -0
- nexaai/mlx_backend/vlm/modeling/utils.py +928 -0
- nexaai/rerank.py +57 -0
- nexaai/rerank_impl/__init__.py +0 -0
- nexaai/rerank_impl/mlx_rerank_impl.py +94 -0
- nexaai/rerank_impl/pybind_rerank_impl.py +136 -0
- nexaai/runtime.py +68 -0
- nexaai/runtime_error.py +24 -0
- nexaai/tts.py +75 -0
- nexaai/tts_impl/__init__.py +0 -0
- nexaai/tts_impl/mlx_tts_impl.py +94 -0
- nexaai/tts_impl/pybind_tts_impl.py +43 -0
- nexaai/utils/decode.py +18 -0
- nexaai/utils/manifest_utils.py +531 -0
- nexaai/utils/model_manager.py +1745 -0
- nexaai/utils/model_types.py +49 -0
- nexaai/utils/progress_tracker.py +389 -0
- nexaai/utils/quantization_utils.py +245 -0
- nexaai/vlm.py +130 -0
- nexaai/vlm_impl/__init__.py +0 -0
- nexaai/vlm_impl/mlx_vlm_impl.py +259 -0
- nexaai/vlm_impl/pybind_vlm_impl.py +275 -0
- nexaai-1.0.29.dist-info/METADATA +35 -0
- nexaai-1.0.29.dist-info/RECORD +580 -0
- nexaai-1.0.29.dist-info/WHEEL +5 -0
- nexaai-1.0.29.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from typing import Optional, Tuple
|
|
5
|
+
|
|
6
|
+
import mlx.core as mx
|
|
7
|
+
|
|
8
|
+
from .model_io import (
|
|
9
|
+
_DEFAULT_MODEL,
|
|
10
|
+
load_autoencoder,
|
|
11
|
+
load_diffusion_config,
|
|
12
|
+
load_text_encoder,
|
|
13
|
+
load_tokenizer,
|
|
14
|
+
load_unet,
|
|
15
|
+
)
|
|
16
|
+
from .sampler import SimpleEulerAncestralSampler, SimpleEulerSampler
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class StableDiffusion:
|
|
20
|
+
def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False):
|
|
21
|
+
self.dtype = mx.float16 if float16 else mx.float32
|
|
22
|
+
self.diffusion_config = load_diffusion_config(model)
|
|
23
|
+
self.unet = load_unet(model, float16)
|
|
24
|
+
self.text_encoder = load_text_encoder(model, float16)
|
|
25
|
+
self.autoencoder = load_autoencoder(model, False)
|
|
26
|
+
self.sampler = SimpleEulerSampler(self.diffusion_config)
|
|
27
|
+
self.tokenizer = load_tokenizer(model)
|
|
28
|
+
|
|
29
|
+
def ensure_models_are_loaded(self):
|
|
30
|
+
mx.eval(self.unet.parameters())
|
|
31
|
+
mx.eval(self.text_encoder.parameters())
|
|
32
|
+
mx.eval(self.autoencoder.parameters())
|
|
33
|
+
|
|
34
|
+
def _tokenize(self, tokenizer, text: str, negative_text: Optional[str] = None):
|
|
35
|
+
# Tokenize the text
|
|
36
|
+
tokens = [tokenizer.tokenize(text)]
|
|
37
|
+
if negative_text is not None:
|
|
38
|
+
tokens += [tokenizer.tokenize(negative_text)]
|
|
39
|
+
lengths = [len(t) for t in tokens]
|
|
40
|
+
N = max(lengths)
|
|
41
|
+
tokens = [t + [0] * (N - len(t)) for t in tokens]
|
|
42
|
+
tokens = mx.array(tokens)
|
|
43
|
+
|
|
44
|
+
return tokens
|
|
45
|
+
|
|
46
|
+
def _get_text_conditioning(
|
|
47
|
+
self,
|
|
48
|
+
text: str,
|
|
49
|
+
n_images: int = 1,
|
|
50
|
+
cfg_weight: float = 7.5,
|
|
51
|
+
negative_text: str = "",
|
|
52
|
+
):
|
|
53
|
+
# Tokenize the text
|
|
54
|
+
tokens = self._tokenize(
|
|
55
|
+
self.tokenizer, text, (negative_text if cfg_weight > 1 else None)
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Compute the features
|
|
59
|
+
conditioning = self.text_encoder(tokens).last_hidden_state
|
|
60
|
+
|
|
61
|
+
# Repeat the conditioning for each of the generated images
|
|
62
|
+
if n_images > 1:
|
|
63
|
+
conditioning = mx.repeat(conditioning, n_images, axis=0)
|
|
64
|
+
|
|
65
|
+
return conditioning
|
|
66
|
+
|
|
67
|
+
def _denoising_step(
|
|
68
|
+
self, x_t, t, t_prev, conditioning, cfg_weight: float = 7.5, text_time=None
|
|
69
|
+
):
|
|
70
|
+
x_t_unet = mx.concatenate([x_t] * 2, axis=0) if cfg_weight > 1 else x_t
|
|
71
|
+
t_unet = mx.broadcast_to(t, [len(x_t_unet)])
|
|
72
|
+
eps_pred = self.unet(
|
|
73
|
+
x_t_unet, t_unet, encoder_x=conditioning, text_time=text_time
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
if cfg_weight > 1:
|
|
77
|
+
eps_text, eps_neg = eps_pred.split(2)
|
|
78
|
+
eps_pred = eps_neg + cfg_weight * (eps_text - eps_neg)
|
|
79
|
+
|
|
80
|
+
x_t_prev = self.sampler.step(eps_pred, x_t, t, t_prev)
|
|
81
|
+
|
|
82
|
+
return x_t_prev
|
|
83
|
+
|
|
84
|
+
def _denoising_loop(
|
|
85
|
+
self,
|
|
86
|
+
x_T,
|
|
87
|
+
T,
|
|
88
|
+
conditioning,
|
|
89
|
+
num_steps: int = 50,
|
|
90
|
+
cfg_weight: float = 7.5,
|
|
91
|
+
text_time=None,
|
|
92
|
+
):
|
|
93
|
+
x_t = x_T
|
|
94
|
+
for t, t_prev in self.sampler.timesteps(
|
|
95
|
+
num_steps, start_time=T, dtype=self.dtype
|
|
96
|
+
):
|
|
97
|
+
x_t = self._denoising_step(
|
|
98
|
+
x_t, t, t_prev, conditioning, cfg_weight, text_time
|
|
99
|
+
)
|
|
100
|
+
yield x_t
|
|
101
|
+
|
|
102
|
+
def generate_latents(
|
|
103
|
+
self,
|
|
104
|
+
text: str,
|
|
105
|
+
n_images: int = 1,
|
|
106
|
+
num_steps: int = 50,
|
|
107
|
+
cfg_weight: float = 7.5,
|
|
108
|
+
negative_text: str = "",
|
|
109
|
+
latent_size: Tuple[int] = (64, 64),
|
|
110
|
+
seed=None,
|
|
111
|
+
):
|
|
112
|
+
# Set the PRNG state
|
|
113
|
+
seed = int(time.time()) if seed is None else seed
|
|
114
|
+
mx.random.seed(seed)
|
|
115
|
+
|
|
116
|
+
# Get the text conditioning
|
|
117
|
+
conditioning = self._get_text_conditioning(
|
|
118
|
+
text, n_images, cfg_weight, negative_text
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Create the latent variables
|
|
122
|
+
x_T = self.sampler.sample_prior(
|
|
123
|
+
(n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Perform the denoising loop
|
|
127
|
+
yield from self._denoising_loop(
|
|
128
|
+
x_T, self.sampler.max_time, conditioning, num_steps, cfg_weight
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
def generate_latents_from_image(
|
|
132
|
+
self,
|
|
133
|
+
image,
|
|
134
|
+
text: str,
|
|
135
|
+
n_images: int = 1,
|
|
136
|
+
strength: float = 0.8,
|
|
137
|
+
num_steps: int = 50,
|
|
138
|
+
cfg_weight: float = 7.5,
|
|
139
|
+
negative_text: str = "",
|
|
140
|
+
seed=None,
|
|
141
|
+
):
|
|
142
|
+
# Set the PRNG state
|
|
143
|
+
seed = int(time.time()) if seed is None else seed
|
|
144
|
+
mx.random.seed(seed)
|
|
145
|
+
|
|
146
|
+
# Define the num steps and start step
|
|
147
|
+
start_step = self.sampler.max_time * strength
|
|
148
|
+
num_steps = int(num_steps * strength)
|
|
149
|
+
|
|
150
|
+
# Get the text conditioning
|
|
151
|
+
conditioning = self._get_text_conditioning(
|
|
152
|
+
text, n_images, cfg_weight, negative_text
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Get the latents from the input image and add noise according to the
|
|
156
|
+
# start time.
|
|
157
|
+
x_0, _ = self.autoencoder.encode(image[None])
|
|
158
|
+
x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:])
|
|
159
|
+
x_T = self.sampler.add_noise(x_0, mx.array(start_step))
|
|
160
|
+
|
|
161
|
+
# Perform the denoising loop
|
|
162
|
+
yield from self._denoising_loop(
|
|
163
|
+
x_T, start_step, conditioning, num_steps, cfg_weight
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
def decode(self, x_t):
|
|
167
|
+
x = self.autoencoder.decode(x_t)
|
|
168
|
+
x = mx.clip(x / 2 + 0.5, 0, 1)
|
|
169
|
+
return x
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class StableDiffusionXL(StableDiffusion):
|
|
173
|
+
def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False):
|
|
174
|
+
super().__init__(model, float16)
|
|
175
|
+
|
|
176
|
+
self.sampler = SimpleEulerAncestralSampler(self.diffusion_config)
|
|
177
|
+
|
|
178
|
+
self.text_encoder_1 = self.text_encoder
|
|
179
|
+
self.tokenizer_1 = self.tokenizer
|
|
180
|
+
del self.tokenizer, self.text_encoder
|
|
181
|
+
|
|
182
|
+
self.text_encoder_2 = load_text_encoder(
|
|
183
|
+
model,
|
|
184
|
+
float16,
|
|
185
|
+
model_key="text_encoder_2",
|
|
186
|
+
)
|
|
187
|
+
self.tokenizer_2 = load_tokenizer(
|
|
188
|
+
model,
|
|
189
|
+
merges_key="tokenizer_2_merges",
|
|
190
|
+
vocab_key="tokenizer_2_vocab",
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
def ensure_models_are_loaded(self):
|
|
194
|
+
mx.eval(self.unet.parameters())
|
|
195
|
+
mx.eval(self.text_encoder_1.parameters())
|
|
196
|
+
mx.eval(self.text_encoder_2.parameters())
|
|
197
|
+
mx.eval(self.autoencoder.parameters())
|
|
198
|
+
|
|
199
|
+
def _get_text_conditioning(
|
|
200
|
+
self,
|
|
201
|
+
text: str,
|
|
202
|
+
n_images: int = 1,
|
|
203
|
+
cfg_weight: float = 7.5,
|
|
204
|
+
negative_text: str = "",
|
|
205
|
+
):
|
|
206
|
+
tokens_1 = self._tokenize(
|
|
207
|
+
self.tokenizer_1,
|
|
208
|
+
text,
|
|
209
|
+
(negative_text if cfg_weight > 1 else None),
|
|
210
|
+
)
|
|
211
|
+
tokens_2 = self._tokenize(
|
|
212
|
+
self.tokenizer_2,
|
|
213
|
+
text,
|
|
214
|
+
(negative_text if cfg_weight > 1 else None),
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
conditioning_1 = self.text_encoder_1(tokens_1)
|
|
218
|
+
conditioning_2 = self.text_encoder_2(tokens_2)
|
|
219
|
+
conditioning = mx.concatenate(
|
|
220
|
+
[conditioning_1.hidden_states[-2], conditioning_2.hidden_states[-2]],
|
|
221
|
+
axis=-1,
|
|
222
|
+
)
|
|
223
|
+
pooled_conditioning = conditioning_2.pooled_output
|
|
224
|
+
|
|
225
|
+
if n_images > 1:
|
|
226
|
+
conditioning = mx.repeat(conditioning, n_images, axis=0)
|
|
227
|
+
pooled_conditioning = mx.repeat(pooled_conditioning, n_images, axis=0)
|
|
228
|
+
|
|
229
|
+
return conditioning, pooled_conditioning
|
|
230
|
+
|
|
231
|
+
def generate_latents(
|
|
232
|
+
self,
|
|
233
|
+
text: str,
|
|
234
|
+
n_images: int = 1,
|
|
235
|
+
num_steps: int = 2,
|
|
236
|
+
cfg_weight: float = 0.0,
|
|
237
|
+
negative_text: str = "",
|
|
238
|
+
latent_size: Tuple[int] = (64, 64),
|
|
239
|
+
seed=None,
|
|
240
|
+
):
|
|
241
|
+
# Set the PRNG state
|
|
242
|
+
seed = int(time.time()) if seed is None else seed
|
|
243
|
+
mx.random.seed(seed)
|
|
244
|
+
|
|
245
|
+
# Get the text conditioning
|
|
246
|
+
conditioning, pooled_conditioning = self._get_text_conditioning(
|
|
247
|
+
text, n_images, cfg_weight, negative_text
|
|
248
|
+
)
|
|
249
|
+
text_time = (
|
|
250
|
+
pooled_conditioning,
|
|
251
|
+
mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)),
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# Create the latent variables
|
|
255
|
+
x_T = self.sampler.sample_prior(
|
|
256
|
+
(n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
# Perform the denoising loop
|
|
260
|
+
yield from self._denoising_loop(
|
|
261
|
+
x_T,
|
|
262
|
+
self.sampler.max_time,
|
|
263
|
+
conditioning,
|
|
264
|
+
num_steps,
|
|
265
|
+
cfg_weight,
|
|
266
|
+
text_time=text_time,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
def generate_latents_from_image(
|
|
270
|
+
self,
|
|
271
|
+
image,
|
|
272
|
+
text: str,
|
|
273
|
+
n_images: int = 1,
|
|
274
|
+
strength: float = 0.8,
|
|
275
|
+
num_steps: int = 2,
|
|
276
|
+
cfg_weight: float = 0.0,
|
|
277
|
+
negative_text: str = "",
|
|
278
|
+
seed=None,
|
|
279
|
+
):
|
|
280
|
+
# Set the PRNG state
|
|
281
|
+
seed = seed or int(time.time())
|
|
282
|
+
mx.random.seed(seed)
|
|
283
|
+
|
|
284
|
+
# Define the num steps and start step
|
|
285
|
+
start_step = self.sampler.max_time * strength
|
|
286
|
+
num_steps = int(num_steps * strength)
|
|
287
|
+
|
|
288
|
+
# Get the text conditioning
|
|
289
|
+
conditioning, pooled_conditioning = self._get_text_conditioning(
|
|
290
|
+
text, n_images, cfg_weight, negative_text
|
|
291
|
+
)
|
|
292
|
+
text_time = (
|
|
293
|
+
pooled_conditioning,
|
|
294
|
+
mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)),
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Get the latents from the input image and add noise according to the
|
|
298
|
+
# start time.
|
|
299
|
+
x_0, _ = self.autoencoder.encode(image[None])
|
|
300
|
+
x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:])
|
|
301
|
+
x_T = self.sampler.add_noise(x_0, mx.array(start_step))
|
|
302
|
+
|
|
303
|
+
# Perform the denoising loop
|
|
304
|
+
yield from self._denoising_loop(
|
|
305
|
+
x_T, start_step, conditioning, num_steps, cfg_weight, text_time=text_time
|
|
306
|
+
)
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import List, Optional
|
|
5
|
+
|
|
6
|
+
import mlx.core as mx
|
|
7
|
+
import mlx.nn as nn
|
|
8
|
+
|
|
9
|
+
from .config import CLIPTextModelConfig
|
|
10
|
+
|
|
11
|
+
_ACTIVATIONS = {"quick_gelu": nn.gelu_fast_approx, "gelu": nn.gelu}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class CLIPOutput:
|
|
16
|
+
# The last_hidden_state indexed at the EOS token and possibly projected if
|
|
17
|
+
# the model has a projection layer
|
|
18
|
+
pooled_output: Optional[mx.array] = None
|
|
19
|
+
|
|
20
|
+
# The full sequence output of the transformer after the final layernorm
|
|
21
|
+
last_hidden_state: Optional[mx.array] = None
|
|
22
|
+
|
|
23
|
+
# A list of hidden states corresponding to the outputs of the transformer layers
|
|
24
|
+
hidden_states: Optional[List[mx.array]] = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class CLIPEncoderLayer(nn.Module):
|
|
28
|
+
"""The transformer encoder layer from CLIP."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, model_dims: int, num_heads: int, activation: str):
|
|
31
|
+
super().__init__()
|
|
32
|
+
|
|
33
|
+
self.layer_norm1 = nn.LayerNorm(model_dims)
|
|
34
|
+
self.layer_norm2 = nn.LayerNorm(model_dims)
|
|
35
|
+
|
|
36
|
+
self.attention = nn.MultiHeadAttention(model_dims, num_heads)
|
|
37
|
+
# Add biases to the attention projections to match CLIP
|
|
38
|
+
self.attention.query_proj.bias = mx.zeros(model_dims)
|
|
39
|
+
self.attention.key_proj.bias = mx.zeros(model_dims)
|
|
40
|
+
self.attention.value_proj.bias = mx.zeros(model_dims)
|
|
41
|
+
self.attention.out_proj.bias = mx.zeros(model_dims)
|
|
42
|
+
|
|
43
|
+
self.linear1 = nn.Linear(model_dims, 4 * model_dims)
|
|
44
|
+
self.linear2 = nn.Linear(4 * model_dims, model_dims)
|
|
45
|
+
|
|
46
|
+
self.act = _ACTIVATIONS[activation]
|
|
47
|
+
|
|
48
|
+
def __call__(self, x, attn_mask=None):
|
|
49
|
+
y = self.layer_norm1(x)
|
|
50
|
+
y = self.attention(y, y, y, attn_mask)
|
|
51
|
+
x = y + x
|
|
52
|
+
|
|
53
|
+
y = self.layer_norm2(x)
|
|
54
|
+
y = self.linear1(y)
|
|
55
|
+
y = self.act(y)
|
|
56
|
+
y = self.linear2(y)
|
|
57
|
+
x = y + x
|
|
58
|
+
|
|
59
|
+
return x
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class CLIPTextModel(nn.Module):
|
|
63
|
+
"""Implements the text encoder transformer from CLIP."""
|
|
64
|
+
|
|
65
|
+
def __init__(self, config: CLIPTextModelConfig):
|
|
66
|
+
super().__init__()
|
|
67
|
+
|
|
68
|
+
self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)
|
|
69
|
+
self.position_embedding = nn.Embedding(config.max_length, config.model_dims)
|
|
70
|
+
self.layers = [
|
|
71
|
+
CLIPEncoderLayer(config.model_dims, config.num_heads, config.hidden_act)
|
|
72
|
+
for i in range(config.num_layers)
|
|
73
|
+
]
|
|
74
|
+
self.final_layer_norm = nn.LayerNorm(config.model_dims)
|
|
75
|
+
|
|
76
|
+
if config.projection_dim is not None:
|
|
77
|
+
self.text_projection = nn.Linear(
|
|
78
|
+
config.model_dims, config.projection_dim, bias=False
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
def _get_mask(self, N, dtype):
|
|
82
|
+
indices = mx.arange(N)
|
|
83
|
+
mask = indices[:, None] < indices[None]
|
|
84
|
+
mask = mask.astype(dtype) * (-6e4 if dtype == mx.float16 else -1e9)
|
|
85
|
+
return mask
|
|
86
|
+
|
|
87
|
+
def __call__(self, x):
|
|
88
|
+
# Extract some shapes
|
|
89
|
+
B, N = x.shape
|
|
90
|
+
eos_tokens = x.argmax(-1)
|
|
91
|
+
|
|
92
|
+
# Compute the embeddings
|
|
93
|
+
x = self.token_embedding(x)
|
|
94
|
+
x = x + self.position_embedding.weight[:N]
|
|
95
|
+
|
|
96
|
+
# Compute the features from the transformer
|
|
97
|
+
mask = self._get_mask(N, x.dtype)
|
|
98
|
+
hidden_states = []
|
|
99
|
+
for l in self.layers:
|
|
100
|
+
x = l(x, mask)
|
|
101
|
+
hidden_states.append(x)
|
|
102
|
+
|
|
103
|
+
# Apply the final layernorm and return
|
|
104
|
+
x = self.final_layer_norm(x)
|
|
105
|
+
last_hidden_state = x
|
|
106
|
+
|
|
107
|
+
# Select the EOS token
|
|
108
|
+
pooled_output = x[mx.arange(len(x)), eos_tokens]
|
|
109
|
+
if "text_projection" in self:
|
|
110
|
+
pooled_output = self.text_projection(pooled_output)
|
|
111
|
+
|
|
112
|
+
return CLIPOutput(
|
|
113
|
+
pooled_output=pooled_output,
|
|
114
|
+
last_hidden_state=last_hidden_state,
|
|
115
|
+
hidden_states=hidden_states,
|
|
116
|
+
)
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Optional, Tuple
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class AutoencoderConfig:
|
|
9
|
+
in_channels: int = 3
|
|
10
|
+
out_channels: int = 3
|
|
11
|
+
latent_channels_out: int = 8
|
|
12
|
+
latent_channels_in: int = 4
|
|
13
|
+
block_out_channels: Tuple[int] = (128, 256, 512, 512)
|
|
14
|
+
layers_per_block: int = 2
|
|
15
|
+
norm_num_groups: int = 32
|
|
16
|
+
scaling_factor: float = 0.18215
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class CLIPTextModelConfig:
|
|
21
|
+
num_layers: int = 23
|
|
22
|
+
model_dims: int = 1024
|
|
23
|
+
num_heads: int = 16
|
|
24
|
+
max_length: int = 77
|
|
25
|
+
vocab_size: int = 49408
|
|
26
|
+
projection_dim: Optional[int] = None
|
|
27
|
+
hidden_act: str = "quick_gelu"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class UNetConfig:
|
|
32
|
+
in_channels: int = 4
|
|
33
|
+
out_channels: int = 4
|
|
34
|
+
conv_in_kernel: int = 3
|
|
35
|
+
conv_out_kernel: int = 3
|
|
36
|
+
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
|
|
37
|
+
layers_per_block: Tuple[int] = (2, 2, 2, 2)
|
|
38
|
+
mid_block_layers: int = 2
|
|
39
|
+
transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)
|
|
40
|
+
num_attention_heads: Tuple[int] = (5, 10, 20, 20)
|
|
41
|
+
cross_attention_dim: Tuple[int] = (1024,) * 4
|
|
42
|
+
norm_num_groups: int = 32
|
|
43
|
+
down_block_types: Tuple[str] = (
|
|
44
|
+
"CrossAttnDownBlock2D",
|
|
45
|
+
"CrossAttnDownBlock2D",
|
|
46
|
+
"CrossAttnDownBlock2D",
|
|
47
|
+
"DownBlock2D",
|
|
48
|
+
)
|
|
49
|
+
up_block_types: Tuple[str] = (
|
|
50
|
+
"UpBlock2D",
|
|
51
|
+
"CrossAttnUpBlock2D",
|
|
52
|
+
"CrossAttnUpBlock2D",
|
|
53
|
+
"CrossAttnUpBlock2D",
|
|
54
|
+
)
|
|
55
|
+
addition_embed_type: Optional[str] = None
|
|
56
|
+
addition_time_embed_dim: Optional[int] = None
|
|
57
|
+
projection_class_embeddings_input_dim: Optional[int] = None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class DiffusionConfig:
|
|
62
|
+
beta_schedule: str = "scaled_linear"
|
|
63
|
+
beta_start: float = 0.00085
|
|
64
|
+
beta_end: float = 0.012
|
|
65
|
+
num_train_steps: int = 1000
|