nexaai 1.0.29__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nexaai/__init__.py +99 -0
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +4 -0
- nexaai/asr.py +68 -0
- nexaai/asr_impl/__init__.py +0 -0
- nexaai/asr_impl/mlx_asr_impl.py +93 -0
- nexaai/asr_impl/pybind_asr_impl.py +127 -0
- nexaai/base.py +39 -0
- nexaai/binds/__init__.py +7 -0
- nexaai/binds/asr_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/common_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/cpu_gpu/libggml-base.dylib +0 -0
- nexaai/binds/cpu_gpu/libggml-cpu.so +0 -0
- nexaai/binds/cpu_gpu/libggml-metal.so +0 -0
- nexaai/binds/cpu_gpu/libggml.dylib +0 -0
- nexaai/binds/cpu_gpu/libmtmd.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_cpu_gpu.dylib +0 -0
- nexaai/binds/cpu_gpu/libnexa_plugin.dylib +0 -0
- nexaai/binds/cv_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/diarize_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/embedder_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/llm_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/metal/libnexa_plugin.dylib +0 -0
- nexaai/binds/metal/py-lib/ml.py +888 -0
- nexaai/binds/metal/py-lib/mlx_audio/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/binds/metal/py-lib/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/binds/metal/py-lib/mlx_audio/server.py +525 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/binds/metal/py-lib/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/generate.py +174 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/binds/metal/py-lib/mlx_audio/stt/utils.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/audio_player.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/convert.py +71 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/generate.py +449 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/base.py +84 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/binds/metal/py-lib/mlx_audio/tts/utils.py +337 -0
- nexaai/binds/metal/py-lib/mlx_audio/utils.py +237 -0
- nexaai/binds/metal/py-lib/mlx_audio/version.py +1 -0
- nexaai/binds/metal/py-lib/profiling.py +239 -0
- nexaai/binds/nexaml/libfftw3.3.dylib +0 -0
- nexaai/binds/nexaml/libfftw3f.3.dylib +0 -0
- nexaai/binds/nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexaml/libggml.dylib +0 -0
- nexaai/binds/nexaml/libmp3lame.0.dylib +0 -0
- nexaai/binds/nexaml/libmpg123.0.dylib +0 -0
- nexaai/binds/nexaml/libnexa-mm-process.dylib +0 -0
- nexaai/binds/nexaml/libnexa-sampling.dylib +0 -0
- nexaai/binds/nexaml/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexaml/libnexaproc.dylib +0 -0
- nexaai/binds/nexaml/libomp.dylib +0 -0
- nexaai/binds/nexaml/libqwen3-vl.dylib +0 -0
- nexaai/binds/nexaml/libqwen3vl-vision.dylib +0 -0
- nexaai/binds/rerank_bind.cpython-310-darwin.so +0 -0
- nexaai/binds/vlm_bind.cpython-310-darwin.so +0 -0
- nexaai/common.py +106 -0
- nexaai/cv.py +95 -0
- nexaai/cv_impl/__init__.py +0 -0
- nexaai/cv_impl/mlx_cv_impl.py +91 -0
- nexaai/cv_impl/pybind_cv_impl.py +124 -0
- nexaai/diarize.py +80 -0
- nexaai/diarize_impl/__init__.py +1 -0
- nexaai/diarize_impl/pybind_diarize_impl.py +125 -0
- nexaai/embedder.py +73 -0
- nexaai/embedder_impl/__init__.py +0 -0
- nexaai/embedder_impl/mlx_embedder_impl.py +118 -0
- nexaai/embedder_impl/pybind_embedder_impl.py +96 -0
- nexaai/image_gen.py +141 -0
- nexaai/image_gen_impl/__init__.py +0 -0
- nexaai/image_gen_impl/mlx_image_gen_impl.py +292 -0
- nexaai/image_gen_impl/pybind_image_gen_impl.py +85 -0
- nexaai/llm.py +98 -0
- nexaai/llm_impl/__init__.py +0 -0
- nexaai/llm_impl/mlx_llm_impl.py +271 -0
- nexaai/llm_impl/pybind_llm_impl.py +238 -0
- nexaai/log.py +92 -0
- nexaai/mlx_backend/asr/__init__.py +12 -0
- nexaai/mlx_backend/asr/interface.py +122 -0
- nexaai/mlx_backend/common/__init__.py +0 -0
- nexaai/mlx_backend/common/utils.py +25 -0
- nexaai/mlx_backend/cv/__init__.py +0 -0
- nexaai/mlx_backend/cv/generate.py +195 -0
- nexaai/mlx_backend/cv/interface.py +162 -0
- nexaai/mlx_backend/cv/main.py +81 -0
- nexaai/mlx_backend/cv/modeling/pp_ocr_v4.py +1736 -0
- nexaai/mlx_backend/embedding/__init__.py +0 -0
- nexaai/mlx_backend/embedding/generate.py +333 -0
- nexaai/mlx_backend/embedding/interface.py +617 -0
- nexaai/mlx_backend/embedding/main.py +173 -0
- nexaai/mlx_backend/embedding/modeling/__init__.py +0 -0
- nexaai/mlx_backend/embedding/modeling/nexa_jina_v2.py +399 -0
- nexaai/mlx_backend/image_gen/__init__.py +1 -0
- nexaai/mlx_backend/image_gen/generate_sd.py +244 -0
- nexaai/mlx_backend/image_gen/interface.py +82 -0
- nexaai/mlx_backend/image_gen/main.py +281 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/__init__.py +306 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/clip.py +116 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/config.py +65 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/model_io.py +386 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/sampler.py +105 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/tokenizer.py +100 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/unet.py +460 -0
- nexaai/mlx_backend/image_gen/stable_diffusion/vae.py +274 -0
- nexaai/mlx_backend/llm/__init__.py +0 -0
- nexaai/mlx_backend/llm/generate.py +149 -0
- nexaai/mlx_backend/llm/interface.py +764 -0
- nexaai/mlx_backend/llm/main.py +68 -0
- nexaai/mlx_backend/ml.py +888 -0
- nexaai/mlx_backend/mlx_audio/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/__init__.py +5 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/activation.py +51 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/amp.py +96 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/bigvgan.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/conv.py +114 -0
- nexaai/mlx_backend/mlx_audio/codec/models/bigvgan/resample.py +177 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/base.py +228 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/dac.py +285 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/layers.py +129 -0
- nexaai/mlx_backend/mlx_audio/codec/models/descript/nn/quantize.py +149 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/encodec/encodec.py +777 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/mimi.py +286 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/__init__.py +20 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/conv.py +398 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/kv_cache.py +199 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/quantization.py +179 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/seanet.py +314 -0
- nexaai/mlx_backend/mlx_audio/codec/models/mimi/modules/transformer.py +256 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model.py +260 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/model_v2.py +383 -0
- nexaai/mlx_backend/mlx_audio/codec/models/s3/utils.py +122 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/attention.py +97 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/layers.py +306 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/snac.py +154 -0
- nexaai/mlx_backend/mlx_audio/codec/models/snac/vq.py +135 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/mel.py +33 -0
- nexaai/mlx_backend/mlx_audio/codec/models/vocos/vocos.py +359 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_bigvgan.py +54 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_descript.py +109 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_encodec.py +58 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_mimi.py +22 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_s3.py +25 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_snac.py +40 -0
- nexaai/mlx_backend/mlx_audio/codec/tests/test_vocos.py +93 -0
- nexaai/mlx_backend/mlx_audio/server.py +525 -0
- nexaai/mlx_backend/mlx_audio/sts/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/sts/tests/test_voice_pipeline.py +156 -0
- nexaai/mlx_backend/mlx_audio/sts/voice_pipeline.py +327 -0
- nexaai/mlx_backend/mlx_audio/stt/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/generate.py +174 -0
- nexaai/mlx_backend/mlx_audio/stt/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/alignment.py +248 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/attention.py +187 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/audio.py +76 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/conformer.py +331 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/ctc.py +34 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/parakeet.py +604 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/rnnt.py +157 -0
- nexaai/mlx_backend/mlx_audio/stt/models/parakeet/tokenizer.py +2 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/feature_extractor.py +757 -0
- nexaai/mlx_backend/mlx_audio/stt/models/wav2vec/wav2vec.py +738 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/audio.py +82 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/decoding.py +742 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/timing.py +329 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/tokenizer.py +398 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/whisper.py +862 -0
- nexaai/mlx_backend/mlx_audio/stt/models/whisper/writers.py +268 -0
- nexaai/mlx_backend/mlx_audio/stt/tests/test_models.py +381 -0
- nexaai/mlx_backend/mlx_audio/stt/utils.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/audio_player.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/convert.py +71 -0
- nexaai/mlx_backend/mlx_audio/tts/generate.py +449 -0
- nexaai/mlx_backend/mlx_audio/tts/models/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/bark.py +528 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/isftnet.py +12 -0
- nexaai/mlx_backend/mlx_audio/tts/models/bark/pipeline.py +442 -0
- nexaai/mlx_backend/mlx_audio/tts/models/base.py +84 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/audio.py +287 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/config.py +256 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/dia.py +592 -0
- nexaai/mlx_backend/mlx_audio/tts/models/dia/layers.py +870 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/attention.py +180 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/bigvgan.py +124 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/conformer.py +247 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/asp.py +59 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/ecapa_tdnn.py +91 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/se_res2net.py +132 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/ecapa_tdnn/tdnn.py +42 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/gpt2.py +38 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/indextts.py +412 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/mel.py +37 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/normalize.py +294 -0
- nexaai/mlx_backend/mlx_audio/tts/models/indextts/perceiver.py +62 -0
- nexaai/mlx_backend/mlx_audio/tts/models/interpolate.py +108 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/__init__.py +4 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/istftnet.py +979 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/kokoro.py +331 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/modules.py +659 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/pipeline.py +453 -0
- nexaai/mlx_backend/mlx_audio/tts/models/kokoro/voice.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/llama/llama.py +324 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/audio_processor.py +351 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/dac_interface.py +162 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/default_speaker.json +461 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/outetts.py +255 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/prompt_processor.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/models/outetts/tokens.py +36 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/__init__.py +3 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/attention.py +195 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/sesame.py +633 -0
- nexaai/mlx_backend/mlx_audio/tts/models/sesame/watermarking.py +105 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/audio_tokenizer.py +138 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/bicodec.py +269 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/blocks/sampler.py +111 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_decoder.py +120 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/feat_encoder.py +136 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/encoder_decoder/wave_generator.py +113 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/finite_scalar_quantization.py +238 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual.py +209 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/residual_fsq.py +309 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/__init__.py +1 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/ecapa_tdnn.py +283 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/perceiver_encoder.py +326 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/pooling_layers.py +297 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/modules/speaker/speaker_encoder.py +155 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/spark.py +382 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/audio.py +220 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/file.py +221 -0
- nexaai/mlx_backend/mlx_audio/tts/models/spark/utils/token_parser.py +181 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/__init__.py +0 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_base.py +66 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_convert.py +173 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_interpolate.py +88 -0
- nexaai/mlx_backend/mlx_audio/tts/tests/test_models.py +974 -0
- nexaai/mlx_backend/mlx_audio/tts/utils.py +337 -0
- nexaai/mlx_backend/mlx_audio/utils.py +237 -0
- nexaai/mlx_backend/mlx_audio/version.py +1 -0
- nexaai/mlx_backend/profiling.py +239 -0
- nexaai/mlx_backend/rerank/__init__.py +0 -0
- nexaai/mlx_backend/rerank/generate.py +174 -0
- nexaai/mlx_backend/rerank/interface.py +287 -0
- nexaai/mlx_backend/rerank/main.py +127 -0
- nexaai/mlx_backend/rerank/modeling/__init__.py +0 -0
- nexaai/mlx_backend/rerank/modeling/nexa_jina_rerank.py +330 -0
- nexaai/mlx_backend/sd/__init__.py +1 -0
- nexaai/mlx_backend/sd/interface.py +362 -0
- nexaai/mlx_backend/sd/main.py +286 -0
- nexaai/mlx_backend/sd/modeling/__init__.py +306 -0
- nexaai/mlx_backend/sd/modeling/clip.py +116 -0
- nexaai/mlx_backend/sd/modeling/config.py +65 -0
- nexaai/mlx_backend/sd/modeling/model_io.py +385 -0
- nexaai/mlx_backend/sd/modeling/sampler.py +105 -0
- nexaai/mlx_backend/sd/modeling/tokenizer.py +100 -0
- nexaai/mlx_backend/sd/modeling/unet.py +460 -0
- nexaai/mlx_backend/sd/modeling/vae.py +274 -0
- nexaai/mlx_backend/tts/__init__.py +12 -0
- nexaai/mlx_backend/tts/interface.py +276 -0
- nexaai/mlx_backend/vlm/__init__.py +3 -0
- nexaai/mlx_backend/vlm/generate.py +572 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +374 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +259 -0
- nexaai/mlx_backend/vlm/interface.py +559 -0
- nexaai/mlx_backend/vlm/main.py +365 -0
- nexaai/mlx_backend/vlm/modeling/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/convert.py +68 -0
- nexaai/mlx_backend/vlm/modeling/models/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/aya_vision.py +193 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/interpolate.py +186 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/language.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/aya_vision/vision.py +503 -0
- nexaai/mlx_backend/vlm/modeling/models/base.py +202 -0
- nexaai/mlx_backend/vlm/modeling/models/cache.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/conversation.py +264 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +472 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/language.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/deepseek_vl_v2/vision.py +356 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/florence2.py +366 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/language.py +488 -0
- nexaai/mlx_backend/vlm/modeling/models/florence2/vision.py +591 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/gemma3.py +213 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/language.py +315 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3/vision.py +238 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/audio.py +1038 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/config.py +139 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/gemma3n.py +322 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/language.py +629 -0
- nexaai/mlx_backend/vlm/modeling/models/gemma3n/vision.py +1022 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/idefics2.py +294 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/language.py +191 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics2/vision.py +267 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/idefics3.py +175 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/language.py +192 -0
- nexaai/mlx_backend/vlm/modeling/models/idefics3/vision.py +233 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/internvl_chat.py +140 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/language.py +220 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/processor.py +393 -0
- nexaai/mlx_backend/vlm/modeling/models/internvl_chat/vision.py +293 -0
- nexaai/mlx_backend/vlm/modeling/models/kernels.py +307 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/kimi_vl.py +143 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/language.py +509 -0
- nexaai/mlx_backend/vlm/modeling/models/kimi_vl/vision.py +522 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/language.py +386 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/llama4.py +138 -0
- nexaai/mlx_backend/vlm/modeling/models/llama4/vision.py +560 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/language.py +240 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/llava.py +153 -0
- nexaai/mlx_backend/vlm/modeling/models/llava/vision.py +259 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/language.py +236 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/llava_bunny.py +256 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_bunny/vision.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/llava_next.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/llava_next/vision.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mistral3/mistral3.py +283 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/language.py +416 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/mllama.py +172 -0
- nexaai/mlx_backend/vlm/modeling/models/mllama/vision.py +499 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/language.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/molmo.py +133 -0
- nexaai/mlx_backend/vlm/modeling/models/molmo/vision.py +465 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/__init__.py +10 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/language.py +230 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/multi_modality.py +385 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/sam.py +557 -0
- nexaai/mlx_backend/vlm/modeling/models/multi_modality/vision.py +526 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/language.py +282 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/paligemma.py +160 -0
- nexaai/mlx_backend/vlm/modeling/models/paligemma/vision.py +242 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/language.py +21 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/phi3_v.py +243 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/su_rope.py +71 -0
- nexaai/mlx_backend/vlm/modeling/models/phi3_v/vision.py +324 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/language.py +229 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/pixtral.py +161 -0
- nexaai/mlx_backend/vlm/modeling/models/pixtral/vision.py +320 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/config.py +108 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +168 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_5_vl/vision.py +414 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/__init__.py +2 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/config.py +104 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/language.py +490 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/qwen2_vl.py +167 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen2_vl/vision.py +312 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +1262 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1308 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/__init__.py +8 -0
- nexaai/mlx_backend/vlm/modeling/models/smolvlm/smolvlm.py +62 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_5_vl.py +209 -0
- nexaai/mlx_backend/vlm/modeling/processing_qwen2_vl.py +215 -0
- nexaai/mlx_backend/vlm/modeling/prompt_utils.py +474 -0
- nexaai/mlx_backend/vlm/modeling/sample_utils.py +39 -0
- nexaai/mlx_backend/vlm/modeling/tokenizer_utils.py +344 -0
- nexaai/mlx_backend/vlm/modeling/trainer/__init__.py +9 -0
- nexaai/mlx_backend/vlm/modeling/trainer/lora.py +70 -0
- nexaai/mlx_backend/vlm/modeling/trainer/trainer.py +296 -0
- nexaai/mlx_backend/vlm/modeling/trainer/utils.py +160 -0
- nexaai/mlx_backend/vlm/modeling/utils.py +928 -0
- nexaai/rerank.py +57 -0
- nexaai/rerank_impl/__init__.py +0 -0
- nexaai/rerank_impl/mlx_rerank_impl.py +94 -0
- nexaai/rerank_impl/pybind_rerank_impl.py +136 -0
- nexaai/runtime.py +68 -0
- nexaai/runtime_error.py +24 -0
- nexaai/tts.py +75 -0
- nexaai/tts_impl/__init__.py +0 -0
- nexaai/tts_impl/mlx_tts_impl.py +94 -0
- nexaai/tts_impl/pybind_tts_impl.py +43 -0
- nexaai/utils/decode.py +18 -0
- nexaai/utils/manifest_utils.py +531 -0
- nexaai/utils/model_manager.py +1745 -0
- nexaai/utils/model_types.py +49 -0
- nexaai/utils/progress_tracker.py +389 -0
- nexaai/utils/quantization_utils.py +245 -0
- nexaai/vlm.py +130 -0
- nexaai/vlm_impl/__init__.py +0 -0
- nexaai/vlm_impl/mlx_vlm_impl.py +259 -0
- nexaai/vlm_impl/pybind_vlm_impl.py +275 -0
- nexaai-1.0.29.dist-info/METADATA +35 -0
- nexaai-1.0.29.dist-info/RECORD +580 -0
- nexaai-1.0.29.dist-info/WHEEL +5 -0
- nexaai-1.0.29.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
|
|
3
|
+
import mlx.core as mx
|
|
4
|
+
|
|
5
|
+
from ..models.vocos import Vocos
|
|
6
|
+
from ..models.vocos.mel import log_mel_spectrogram
|
|
7
|
+
|
|
8
|
+
config_mel = {
|
|
9
|
+
"feature_extractor": {
|
|
10
|
+
"class_path": "vocos.feature_extractors.MelSpectrogramFeatures",
|
|
11
|
+
"init_args": {
|
|
12
|
+
"sample_rate": 24000,
|
|
13
|
+
"n_fft": 1024,
|
|
14
|
+
"hop_length": 256,
|
|
15
|
+
"n_mels": 100,
|
|
16
|
+
},
|
|
17
|
+
},
|
|
18
|
+
"backbone": {
|
|
19
|
+
"class_path": "vocos.models.VocosBackbone",
|
|
20
|
+
"init_args": {
|
|
21
|
+
"input_channels": 100,
|
|
22
|
+
"dim": 512,
|
|
23
|
+
"intermediate_dim": 1536,
|
|
24
|
+
"num_layers": 8,
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
"head": {
|
|
28
|
+
"class_path": "vocos.heads.ISTFTHead",
|
|
29
|
+
"init_args": {"dim": 512, "n_fft": 1024, "hop_length": 256},
|
|
30
|
+
},
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
config_encodec = {
|
|
34
|
+
"feature_extractor": {
|
|
35
|
+
"class_path": "vocos.feature_extractors.EncodecFeatures",
|
|
36
|
+
"init_args": {
|
|
37
|
+
"encodec_model": "encodec_24khz",
|
|
38
|
+
"bandwidths": [1.5, 3.0, 6.0, 12.0, 24.0],
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
"backbone": {
|
|
42
|
+
"class_path": "vocos.models.VocosBackbone",
|
|
43
|
+
"init_args": {
|
|
44
|
+
"input_channels": 128,
|
|
45
|
+
"dim": 384,
|
|
46
|
+
"intermediate_dim": 1152,
|
|
47
|
+
"num_layers": 8,
|
|
48
|
+
"adanorm_num_embeddings": 4,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
"head": {
|
|
52
|
+
"class_path": "vocos.heads.ISTFTHead",
|
|
53
|
+
"init_args": {"dim": 384, "n_fft": 1280, "hop_length": 320, "padding": "same"},
|
|
54
|
+
},
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class TestVocos(unittest.TestCase):
|
|
59
|
+
"""Test Vocos model encoding and decoding."""
|
|
60
|
+
|
|
61
|
+
def test_vocos_24khz(self):
|
|
62
|
+
audio = mx.zeros((120_000))
|
|
63
|
+
|
|
64
|
+
model = Vocos.from_hparams(config_mel)
|
|
65
|
+
|
|
66
|
+
# reconstruct from mel spec
|
|
67
|
+
reconstructed_audio = model(audio)
|
|
68
|
+
self.assertEqual(reconstructed_audio.shape, (119552,))
|
|
69
|
+
|
|
70
|
+
# decode from mel spec
|
|
71
|
+
mel_spec = log_mel_spectrogram(audio)
|
|
72
|
+
decoded = model.decode(mel_spec)
|
|
73
|
+
self.assertEqual(decoded.shape, (119552,))
|
|
74
|
+
|
|
75
|
+
model = Vocos.from_hparams(config_encodec)
|
|
76
|
+
|
|
77
|
+
# reconstruct from encodec codes
|
|
78
|
+
bandwidth_id = [3, 3, 3, 3] # 24kbps
|
|
79
|
+
reconstructed_audio = model(
|
|
80
|
+
audio, bandwidth_id=mx.array(bandwidth_id)[None, ...]
|
|
81
|
+
)
|
|
82
|
+
self.assertEqual(reconstructed_audio.shape, (119680,))
|
|
83
|
+
|
|
84
|
+
# decode with encodec codes
|
|
85
|
+
codes = model.get_encodec_codes(audio, bandwidth_id=bandwidth_id)
|
|
86
|
+
decoded = model.decode_from_codes(
|
|
87
|
+
codes, bandwidth_id=mx.array(bandwidth_id)[None, ...]
|
|
88
|
+
)
|
|
89
|
+
self.assertEqual(decoded.shape, (119680,))
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
unittest.main()
|
|
@@ -0,0 +1,525 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import importlib.util
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
import tempfile
|
|
7
|
+
import uuid
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import requests
|
|
11
|
+
import soundfile as sf
|
|
12
|
+
import uvicorn
|
|
13
|
+
from fastapi import FastAPI, Form
|
|
14
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
15
|
+
from fastapi.responses import FileResponse, HTMLResponse, JSONResponse
|
|
16
|
+
from fastapi.staticfiles import StaticFiles
|
|
17
|
+
from fastrtc import ReplyOnPause, Stream, get_stt_model
|
|
18
|
+
from numpy.typing import NDArray
|
|
19
|
+
from pydantic import BaseModel
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Configure logging
|
|
23
|
+
def setup_logging(verbose: bool = False):
|
|
24
|
+
level = logging.DEBUG if verbose else logging.INFO
|
|
25
|
+
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
26
|
+
if verbose:
|
|
27
|
+
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s"
|
|
28
|
+
|
|
29
|
+
logging.basicConfig(level=level, format=format_str)
|
|
30
|
+
return logging.getLogger("mlx_audio_server")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
logger = setup_logging() # Will be updated with verbose setting in main()
|
|
34
|
+
|
|
35
|
+
from mlx_audio.tts.generate import main as generate_main
|
|
36
|
+
|
|
37
|
+
# Import from mlx_audio package
|
|
38
|
+
from mlx_audio.tts.utils import load_model
|
|
39
|
+
|
|
40
|
+
from .tts.audio_player import AudioPlayer
|
|
41
|
+
|
|
42
|
+
app = FastAPI()
|
|
43
|
+
|
|
44
|
+
# Add CORS middleware to allow requests from the same origin
|
|
45
|
+
app.add_middleware(
|
|
46
|
+
CORSMiddleware,
|
|
47
|
+
allow_origins=["*"], # Allow all origins, will be restricted by host binding
|
|
48
|
+
allow_credentials=True,
|
|
49
|
+
allow_methods=["*"],
|
|
50
|
+
allow_headers=["*"],
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Load the model once on server startup.
|
|
54
|
+
# You can change the model path or pass arguments as needed.
|
|
55
|
+
# For performance, load once globally:
|
|
56
|
+
tts_model = None # Will be loaded when the server starts
|
|
57
|
+
audio_player = None # Will be initialized when the server starts
|
|
58
|
+
stt_model = get_stt_model()
|
|
59
|
+
# Make sure the output folder for generated TTS files exists
|
|
60
|
+
# Use an absolute path that's guaranteed to be writable
|
|
61
|
+
OUTPUT_FOLDER = os.path.join(os.path.expanduser("~"), ".mlx_audio", "outputs")
|
|
62
|
+
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
|
|
63
|
+
logger.debug(f"Using output folder: {OUTPUT_FOLDER}")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def speech_to_speech_handler(
|
|
67
|
+
audio: tuple[int, NDArray[np.int16]], voice: str, speed: float, model: str
|
|
68
|
+
):
|
|
69
|
+
text = stt_model.stt(audio)
|
|
70
|
+
for segment in tts_model.generate(
|
|
71
|
+
text=text,
|
|
72
|
+
voice=voice,
|
|
73
|
+
speed=speed,
|
|
74
|
+
lang_code=voice[0],
|
|
75
|
+
verbose=False,
|
|
76
|
+
):
|
|
77
|
+
yield (24_000, np.array(segment.audio, copy=False))
|
|
78
|
+
yield (24_000, np.zeros(2_400, dtype=np.float32))
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
stream = Stream(
|
|
82
|
+
ReplyOnPause(speech_to_speech_handler, output_sample_rate=24_000),
|
|
83
|
+
mode="send-receive",
|
|
84
|
+
modality="audio",
|
|
85
|
+
)
|
|
86
|
+
stream.mount(app)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class SpeechToSpeechArgs(BaseModel):
|
|
90
|
+
voice: str
|
|
91
|
+
speed: float
|
|
92
|
+
model: str
|
|
93
|
+
webrtc_id: str
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@app.post("/speech_to_speech_input")
|
|
97
|
+
def speech_to_speech_endpoint(args: SpeechToSpeechArgs):
|
|
98
|
+
stream.set_input(args.webrtc_id, args.voice, args.speed, args.model)
|
|
99
|
+
return {"status": "success"}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@app.post("/tts")
|
|
103
|
+
def tts_endpoint(
|
|
104
|
+
text: str = Form(...),
|
|
105
|
+
voice: str = Form("af_heart"),
|
|
106
|
+
speed: float = Form(1.0),
|
|
107
|
+
model: str = Form("mlx-community/Kokoro-82M-4bit"),
|
|
108
|
+
):
|
|
109
|
+
"""
|
|
110
|
+
POST an x-www-form-urlencoded form with 'text' (and optional 'voice', 'speed', and 'model').
|
|
111
|
+
We run TTS on the text, save the audio in a unique file,
|
|
112
|
+
and return JSON with the filename so the client can retrieve it.
|
|
113
|
+
"""
|
|
114
|
+
global tts_model
|
|
115
|
+
|
|
116
|
+
if not text.strip():
|
|
117
|
+
return JSONResponse({"error": "Text is empty"}, status_code=400)
|
|
118
|
+
|
|
119
|
+
# Validate speed parameter
|
|
120
|
+
try:
|
|
121
|
+
speed_float = float(speed)
|
|
122
|
+
if speed_float < 0.5 or speed_float > 2.0:
|
|
123
|
+
return JSONResponse(
|
|
124
|
+
{"error": "Speed must be between 0.5 and 2.0"}, status_code=400
|
|
125
|
+
)
|
|
126
|
+
except ValueError:
|
|
127
|
+
return JSONResponse({"error": "Invalid speed value"}, status_code=400)
|
|
128
|
+
|
|
129
|
+
# Validate model parameter
|
|
130
|
+
valid_models = [
|
|
131
|
+
"mlx-community/Kokoro-82M-4bit",
|
|
132
|
+
"mlx-community/Kokoro-82M-6bit",
|
|
133
|
+
"mlx-community/Kokoro-82M-8bit",
|
|
134
|
+
"mlx-community/Kokoro-82M-bf16",
|
|
135
|
+
]
|
|
136
|
+
if model not in valid_models:
|
|
137
|
+
return JSONResponse(
|
|
138
|
+
{"error": f"Invalid model. Must be one of: {', '.join(valid_models)}"},
|
|
139
|
+
status_code=400,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Store current model repo_id for comparison
|
|
143
|
+
current_model_repo_id = (
|
|
144
|
+
getattr(tts_model, "repo_id", None) if tts_model is not None else None
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Load the model if it's not loaded or if a different model is requested
|
|
148
|
+
if tts_model is None or current_model_repo_id != model:
|
|
149
|
+
try:
|
|
150
|
+
logger.debug(f"Loading TTS model from {model}")
|
|
151
|
+
tts_model = load_model(model)
|
|
152
|
+
logger.debug("TTS model loaded successfully")
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.error(f"Error loading TTS model: {str(e)}")
|
|
155
|
+
return JSONResponse(
|
|
156
|
+
{"error": f"Failed to load model: {str(e)}"}, status_code=500
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# We'll do something like the code in model.generate() from the TTS library:
|
|
160
|
+
# Generate the unique filename
|
|
161
|
+
unique_id = str(uuid.uuid4())
|
|
162
|
+
filename = f"tts_{unique_id}.wav"
|
|
163
|
+
output_path = os.path.join(OUTPUT_FOLDER, filename)
|
|
164
|
+
|
|
165
|
+
logger.debug(
|
|
166
|
+
f"Generating TTS for text: '{text[:50]}...' with voice: {voice}, speed: {speed_float}, model: {model}"
|
|
167
|
+
)
|
|
168
|
+
logger.debug(f"Output file will be: {output_path}")
|
|
169
|
+
|
|
170
|
+
# We'll use the high-level "model.generate" method:
|
|
171
|
+
results = tts_model.generate(
|
|
172
|
+
text=text,
|
|
173
|
+
voice=voice,
|
|
174
|
+
speed=speed_float,
|
|
175
|
+
lang_code=voice[0],
|
|
176
|
+
verbose=False,
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# We'll just gather all segments (if any) into a single wav
|
|
180
|
+
# It's typical for multi-segment text to produce multiple wave segments:
|
|
181
|
+
audio_arrays = []
|
|
182
|
+
for segment in results:
|
|
183
|
+
audio_arrays.append(segment.audio)
|
|
184
|
+
|
|
185
|
+
# If no segments, return error
|
|
186
|
+
if not audio_arrays:
|
|
187
|
+
logger.error("No audio segments generated")
|
|
188
|
+
return JSONResponse({"error": "No audio generated"}, status_code=500)
|
|
189
|
+
|
|
190
|
+
# Concatenate all segments
|
|
191
|
+
cat_audio = np.concatenate(audio_arrays, axis=0)
|
|
192
|
+
|
|
193
|
+
# Write the audio as a WAV
|
|
194
|
+
try:
|
|
195
|
+
sf.write(output_path, cat_audio, 24000)
|
|
196
|
+
logger.debug(f"Successfully wrote audio file to {output_path}")
|
|
197
|
+
|
|
198
|
+
# Verify the file exists
|
|
199
|
+
if not os.path.exists(output_path):
|
|
200
|
+
logger.error(f"File was not created at {output_path}")
|
|
201
|
+
return JSONResponse(
|
|
202
|
+
{"error": "Failed to create audio file"}, status_code=500
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Check file size
|
|
206
|
+
file_size = os.path.getsize(output_path)
|
|
207
|
+
logger.debug(f"File size: {file_size} bytes")
|
|
208
|
+
|
|
209
|
+
if file_size == 0:
|
|
210
|
+
logger.error("File was created but is empty")
|
|
211
|
+
return JSONResponse(
|
|
212
|
+
{"error": "Generated audio file is empty"}, status_code=500
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
except Exception as e:
|
|
216
|
+
logger.error(f"Error writing audio file: {str(e)}")
|
|
217
|
+
return JSONResponse(
|
|
218
|
+
{"error": f"Failed to save audio: {str(e)}"}, status_code=500
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
return {"filename": filename}
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
@app.get("/audio/{filename}")
|
|
225
|
+
def get_audio_file(filename: str):
|
|
226
|
+
"""
|
|
227
|
+
Return an audio file from the outputs folder.
|
|
228
|
+
The user can GET /audio/<filename> to fetch the WAV file.
|
|
229
|
+
"""
|
|
230
|
+
file_path = os.path.join(OUTPUT_FOLDER, filename)
|
|
231
|
+
logger.debug(f"Requested audio file: {file_path}")
|
|
232
|
+
|
|
233
|
+
if not os.path.exists(file_path):
|
|
234
|
+
logger.error(f"File not found: {file_path}")
|
|
235
|
+
# List files in the directory to help debug
|
|
236
|
+
try:
|
|
237
|
+
files = os.listdir(OUTPUT_FOLDER)
|
|
238
|
+
logger.debug(f"Files in output directory: {files}")
|
|
239
|
+
except Exception as e:
|
|
240
|
+
logger.error(f"Error listing output directory: {str(e)}")
|
|
241
|
+
|
|
242
|
+
return JSONResponse({"error": "File not found"}, status_code=404)
|
|
243
|
+
|
|
244
|
+
logger.debug(f"Serving audio file: {file_path}")
|
|
245
|
+
return FileResponse(file_path, media_type="audio/wav")
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
@app.get("/")
|
|
249
|
+
def root():
|
|
250
|
+
"""
|
|
251
|
+
Serve the audio_player.html page or a fallback HTML if not found
|
|
252
|
+
"""
|
|
253
|
+
try:
|
|
254
|
+
# Try to find the audio_player.html file in the package
|
|
255
|
+
static_dir = find_static_dir()
|
|
256
|
+
audio_player_path = os.path.join(static_dir, "audio_player.html")
|
|
257
|
+
return FileResponse(audio_player_path)
|
|
258
|
+
except Exception as e:
|
|
259
|
+
# If there's an error, return a simple HTML page with error information
|
|
260
|
+
return HTMLResponse(
|
|
261
|
+
content=f"""
|
|
262
|
+
<html>
|
|
263
|
+
<head><title>MLX-Audio TTS Server</title></head>
|
|
264
|
+
<body>
|
|
265
|
+
<h1>MLX-Audio TTS Server</h1>
|
|
266
|
+
<p>The server is running, but the web interface could not be loaded.</p>
|
|
267
|
+
<p>Error: {str(e)}</p>
|
|
268
|
+
<h2>API Endpoints</h2>
|
|
269
|
+
<ul>
|
|
270
|
+
<li><code>POST /tts</code> - Generate TTS audio</li>
|
|
271
|
+
<li><code>GET /audio/{{filename}}</code> - Retrieve generated audio file</li>
|
|
272
|
+
</ul>
|
|
273
|
+
</body>
|
|
274
|
+
</html>
|
|
275
|
+
""",
|
|
276
|
+
status_code=200,
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def find_static_dir():
|
|
281
|
+
"""Find the static directory containing HTML files."""
|
|
282
|
+
# Try different methods to find the static directory
|
|
283
|
+
|
|
284
|
+
# Method 1: Use importlib.resources (Python 3.9+)
|
|
285
|
+
try:
|
|
286
|
+
import importlib.resources as pkg_resources
|
|
287
|
+
|
|
288
|
+
static_dir = pkg_resources.files("mlx_audio").joinpath("tts")
|
|
289
|
+
static_dir_str = str(static_dir)
|
|
290
|
+
if os.path.exists(static_dir_str):
|
|
291
|
+
return static_dir_str
|
|
292
|
+
except (ImportError, AttributeError):
|
|
293
|
+
pass
|
|
294
|
+
|
|
295
|
+
# Method 2: Use importlib_resources (Python 3.8)
|
|
296
|
+
try:
|
|
297
|
+
import importlib_resources
|
|
298
|
+
|
|
299
|
+
static_dir = importlib_resources.files("mlx_audio").joinpath("tts")
|
|
300
|
+
static_dir_str = str(static_dir)
|
|
301
|
+
if os.path.exists(static_dir_str):
|
|
302
|
+
return static_dir_str
|
|
303
|
+
except ImportError:
|
|
304
|
+
pass
|
|
305
|
+
|
|
306
|
+
# Method 3: Use pkg_resources
|
|
307
|
+
try:
|
|
308
|
+
static_dir_str = pkg_resources.resource_filename("mlx_audio", "tts")
|
|
309
|
+
if os.path.exists(static_dir_str):
|
|
310
|
+
return static_dir_str
|
|
311
|
+
except (ImportError, pkg_resources.DistributionNotFound):
|
|
312
|
+
pass
|
|
313
|
+
|
|
314
|
+
# Method 4: Try to find the module path directly
|
|
315
|
+
try:
|
|
316
|
+
module_spec = importlib.util.find_spec("mlx_audio")
|
|
317
|
+
if module_spec and module_spec.origin:
|
|
318
|
+
package_dir = os.path.dirname(module_spec.origin)
|
|
319
|
+
static_dir_str = os.path.join(package_dir, "tts")
|
|
320
|
+
if os.path.exists(static_dir_str):
|
|
321
|
+
return static_dir_str
|
|
322
|
+
except (ImportError, AttributeError):
|
|
323
|
+
pass
|
|
324
|
+
|
|
325
|
+
# Method 5: Look in sys.modules
|
|
326
|
+
try:
|
|
327
|
+
if "mlx_audio" in sys.modules:
|
|
328
|
+
module = sys.modules["mlx_audio"]
|
|
329
|
+
if hasattr(module, "__file__"):
|
|
330
|
+
package_dir = os.path.dirname(module.__file__)
|
|
331
|
+
static_dir_str = os.path.join(package_dir, "tts")
|
|
332
|
+
if os.path.exists(static_dir_str):
|
|
333
|
+
return static_dir_str
|
|
334
|
+
except Exception:
|
|
335
|
+
pass
|
|
336
|
+
|
|
337
|
+
# If all methods fail, raise an error
|
|
338
|
+
raise RuntimeError("Could not find static directory")
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
@app.post("/play")
|
|
342
|
+
def play_audio(filename: str = Form(...)):
|
|
343
|
+
"""
|
|
344
|
+
Play audio directly from the server using the AudioPlayer.
|
|
345
|
+
Expects a filename that exists in the OUTPUT_FOLDER.
|
|
346
|
+
"""
|
|
347
|
+
global audio_player
|
|
348
|
+
|
|
349
|
+
if audio_player is None:
|
|
350
|
+
return JSONResponse({"error": "Audio player not initialized"}, status_code=500)
|
|
351
|
+
|
|
352
|
+
file_path = os.path.join(OUTPUT_FOLDER, filename)
|
|
353
|
+
if not os.path.exists(file_path):
|
|
354
|
+
return JSONResponse({"error": "File not found"}, status_code=404)
|
|
355
|
+
|
|
356
|
+
try:
|
|
357
|
+
# Load the audio file
|
|
358
|
+
audio_data, sample_rate = sf.read(file_path)
|
|
359
|
+
|
|
360
|
+
# If audio is stereo, convert to mono
|
|
361
|
+
if len(audio_data.shape) > 1 and audio_data.shape[1] > 1:
|
|
362
|
+
audio_data = audio_data.mean(axis=1)
|
|
363
|
+
|
|
364
|
+
# Queue the audio for playback
|
|
365
|
+
audio_player.queue_audio(audio_data)
|
|
366
|
+
|
|
367
|
+
return {"status": "playing", "filename": filename}
|
|
368
|
+
except Exception as e:
|
|
369
|
+
return JSONResponse(
|
|
370
|
+
{"error": f"Failed to play audio: {str(e)}"}, status_code=500
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
@app.post("/stop")
|
|
375
|
+
def stop_audio():
|
|
376
|
+
"""
|
|
377
|
+
Stop any currently playing audio.
|
|
378
|
+
"""
|
|
379
|
+
global audio_player
|
|
380
|
+
|
|
381
|
+
if audio_player is None:
|
|
382
|
+
return JSONResponse({"error": "Audio player not initialized"}, status_code=500)
|
|
383
|
+
|
|
384
|
+
try:
|
|
385
|
+
audio_player.stop()
|
|
386
|
+
return {"status": "stopped"}
|
|
387
|
+
except Exception as e:
|
|
388
|
+
return JSONResponse(
|
|
389
|
+
{"error": f"Failed to stop audio: {str(e)}"}, status_code=500
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@app.post("/open_output_folder")
|
|
394
|
+
def open_output_folder():
|
|
395
|
+
"""
|
|
396
|
+
Open the output folder in the system file explorer (Finder on macOS).
|
|
397
|
+
This only works when running on localhost for security reasons.
|
|
398
|
+
"""
|
|
399
|
+
global OUTPUT_FOLDER
|
|
400
|
+
|
|
401
|
+
# Check if the request is coming from localhost
|
|
402
|
+
# Note: In a production environment, you would want to check the request IP
|
|
403
|
+
|
|
404
|
+
try:
|
|
405
|
+
# For macOS (Finder)
|
|
406
|
+
if sys.platform == "darwin":
|
|
407
|
+
os.system(f"open {OUTPUT_FOLDER}")
|
|
408
|
+
# For Windows (Explorer)
|
|
409
|
+
elif sys.platform == "win32":
|
|
410
|
+
os.system(f"explorer {OUTPUT_FOLDER}")
|
|
411
|
+
# For Linux (various file managers)
|
|
412
|
+
elif sys.platform == "linux":
|
|
413
|
+
os.system(f"xdg-open {OUTPUT_FOLDER}")
|
|
414
|
+
else:
|
|
415
|
+
return JSONResponse(
|
|
416
|
+
{"error": f"Unsupported platform: {sys.platform}"}, status_code=500
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
logger.debug(f"Opened output folder: {OUTPUT_FOLDER}")
|
|
420
|
+
return {"status": "opened", "path": OUTPUT_FOLDER}
|
|
421
|
+
except Exception as e:
|
|
422
|
+
logger.error(f"Error opening output folder: {str(e)}")
|
|
423
|
+
return JSONResponse(
|
|
424
|
+
{"error": f"Failed to open output folder: {str(e)}"}, status_code=500
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def setup_server():
|
|
429
|
+
"""Setup the server by loading the model and creating the output directory."""
|
|
430
|
+
global tts_model, audio_player, OUTPUT_FOLDER
|
|
431
|
+
|
|
432
|
+
# Make sure the output folder for generated TTS files exists
|
|
433
|
+
try:
|
|
434
|
+
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
|
|
435
|
+
# Test write permissions by creating a test file
|
|
436
|
+
test_file = os.path.join(OUTPUT_FOLDER, "test_write.txt")
|
|
437
|
+
with open(test_file, "w") as f:
|
|
438
|
+
f.write("Test write permissions")
|
|
439
|
+
os.remove(test_file)
|
|
440
|
+
logger.debug(f"Output directory {OUTPUT_FOLDER} is writable")
|
|
441
|
+
except Exception as e:
|
|
442
|
+
logger.error(f"Error with output directory {OUTPUT_FOLDER}: {str(e)}")
|
|
443
|
+
# Try to use a fallback directory in /tmp
|
|
444
|
+
fallback_dir = os.path.join("/tmp", "mlx_audio_outputs")
|
|
445
|
+
logger.debug(f"Trying fallback directory: {fallback_dir}")
|
|
446
|
+
try:
|
|
447
|
+
os.makedirs(fallback_dir, exist_ok=True)
|
|
448
|
+
OUTPUT_FOLDER = fallback_dir
|
|
449
|
+
logger.debug(f"Using fallback output directory: {OUTPUT_FOLDER}")
|
|
450
|
+
except Exception as fallback_error:
|
|
451
|
+
logger.error(f"Error with fallback directory: {str(fallback_error)}")
|
|
452
|
+
|
|
453
|
+
# Load the model if not already loaded
|
|
454
|
+
if tts_model is None:
|
|
455
|
+
try:
|
|
456
|
+
default_model = (
|
|
457
|
+
"mlx-community/Kokoro-82M-4bit" # Same default as in tts_endpoint
|
|
458
|
+
)
|
|
459
|
+
logger.debug(f"Loading TTS model from {default_model}")
|
|
460
|
+
tts_model = load_model(default_model)
|
|
461
|
+
logger.debug("TTS model loaded successfully")
|
|
462
|
+
except Exception as e:
|
|
463
|
+
logger.error(f"Error loading TTS model: {str(e)}")
|
|
464
|
+
raise
|
|
465
|
+
|
|
466
|
+
# Initialize the audio player if not already initialized
|
|
467
|
+
if audio_player is None:
|
|
468
|
+
try:
|
|
469
|
+
logger.debug("Initializing audio player")
|
|
470
|
+
audio_player = AudioPlayer()
|
|
471
|
+
logger.debug("Audio player initialized successfully")
|
|
472
|
+
except Exception as e:
|
|
473
|
+
logger.error(f"Error initializing audio player: {str(e)}")
|
|
474
|
+
|
|
475
|
+
# Try to mount the static files directory
|
|
476
|
+
try:
|
|
477
|
+
static_dir = find_static_dir()
|
|
478
|
+
logger.debug(f"Found static directory: {static_dir}")
|
|
479
|
+
app.mount("/static", StaticFiles(directory=static_dir), name="static")
|
|
480
|
+
logger.debug("Static files mounted successfully")
|
|
481
|
+
except Exception as e:
|
|
482
|
+
logger.error(f"Could not mount static files directory: {e}")
|
|
483
|
+
logger.warning(
|
|
484
|
+
"The server will still function, but the web interface may be limited."
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
def main(host="127.0.0.1", port=8000, verbose=False):
|
|
489
|
+
"""Parse command line arguments for the server and start it."""
|
|
490
|
+
parser = argparse.ArgumentParser(description="Start the MLX-Audio TTS server")
|
|
491
|
+
parser.add_argument(
|
|
492
|
+
"--host",
|
|
493
|
+
type=str,
|
|
494
|
+
default="127.0.0.1",
|
|
495
|
+
help="Host address to bind the server to (default: 127.0.0.1)",
|
|
496
|
+
)
|
|
497
|
+
parser.add_argument(
|
|
498
|
+
"--port",
|
|
499
|
+
type=int,
|
|
500
|
+
default=8000,
|
|
501
|
+
help="Port to bind the server to (default: 8000)",
|
|
502
|
+
)
|
|
503
|
+
parser.add_argument(
|
|
504
|
+
"--verbose",
|
|
505
|
+
action="store_true",
|
|
506
|
+
help="Enable verbose logging with detailed debug information",
|
|
507
|
+
)
|
|
508
|
+
args = parser.parse_args()
|
|
509
|
+
|
|
510
|
+
# Update logger with verbose setting
|
|
511
|
+
global logger
|
|
512
|
+
logger = setup_logging(args.verbose)
|
|
513
|
+
|
|
514
|
+
# Start the server with the parsed arguments
|
|
515
|
+
setup_server()
|
|
516
|
+
uvicorn.run(
|
|
517
|
+
app,
|
|
518
|
+
host=args.host,
|
|
519
|
+
port=args.port,
|
|
520
|
+
log_level="debug" if args.verbose else "info",
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
if __name__ == "__main__":
|
|
525
|
+
main()
|
|
File without changes
|