xinference 1.10.0__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +473 -31
- xinference/client/restful/async_restful_client.py +178 -8
- xinference/client/restful/restful_client.py +151 -3
- xinference/core/supervisor.py +99 -53
- xinference/core/worker.py +10 -0
- xinference/deploy/cmdline.py +15 -0
- xinference/model/audio/core.py +21 -6
- xinference/model/audio/indextts2.py +166 -0
- xinference/model/audio/model_spec.json +58 -21
- xinference/model/image/model_spec.json +159 -90
- xinference/model/image/stable_diffusion/core.py +13 -4
- xinference/model/llm/__init__.py +6 -2
- xinference/model/llm/llm_family.json +1299 -174
- xinference/model/llm/mlx/distributed_models/core.py +41 -0
- xinference/model/llm/mlx/distributed_models/qwen2.py +1 -2
- xinference/model/llm/sglang/core.py +44 -11
- xinference/model/llm/tool_parsers/deepseek_r1_tool_parser.py +94 -32
- xinference/model/llm/tool_parsers/qwen_tool_parser.py +29 -4
- xinference/model/llm/transformers/chatglm.py +3 -0
- xinference/model/llm/transformers/core.py +129 -36
- xinference/model/llm/transformers/multimodal/minicpmv45.py +340 -0
- xinference/model/llm/transformers/multimodal/qwen2_vl.py +34 -8
- xinference/model/llm/transformers/utils.py +23 -0
- xinference/model/llm/utils.py +48 -32
- xinference/model/llm/vllm/core.py +207 -72
- xinference/model/utils.py +74 -31
- xinference/thirdparty/audiotools/__init__.py +10 -0
- xinference/thirdparty/audiotools/core/__init__.py +4 -0
- xinference/thirdparty/audiotools/core/audio_signal.py +1682 -0
- xinference/thirdparty/audiotools/core/display.py +194 -0
- xinference/thirdparty/audiotools/core/dsp.py +390 -0
- xinference/thirdparty/audiotools/core/effects.py +647 -0
- xinference/thirdparty/audiotools/core/ffmpeg.py +211 -0
- xinference/thirdparty/audiotools/core/loudness.py +320 -0
- xinference/thirdparty/audiotools/core/playback.py +252 -0
- xinference/thirdparty/audiotools/core/templates/__init__.py +0 -0
- xinference/thirdparty/audiotools/core/templates/headers.html +322 -0
- xinference/thirdparty/audiotools/core/templates/pandoc.css +407 -0
- xinference/thirdparty/audiotools/core/templates/widget.html +52 -0
- xinference/thirdparty/audiotools/core/util.py +671 -0
- xinference/thirdparty/audiotools/core/whisper.py +97 -0
- xinference/thirdparty/audiotools/data/__init__.py +3 -0
- xinference/thirdparty/audiotools/data/datasets.py +517 -0
- xinference/thirdparty/audiotools/data/preprocess.py +81 -0
- xinference/thirdparty/audiotools/data/transforms.py +1592 -0
- xinference/thirdparty/audiotools/metrics/__init__.py +6 -0
- xinference/thirdparty/audiotools/metrics/distance.py +131 -0
- xinference/thirdparty/audiotools/metrics/quality.py +159 -0
- xinference/thirdparty/audiotools/metrics/spectral.py +247 -0
- xinference/thirdparty/audiotools/ml/__init__.py +5 -0
- xinference/thirdparty/audiotools/ml/accelerator.py +184 -0
- xinference/thirdparty/audiotools/ml/decorators.py +440 -0
- xinference/thirdparty/audiotools/ml/experiment.py +90 -0
- xinference/thirdparty/audiotools/ml/layers/__init__.py +2 -0
- xinference/thirdparty/audiotools/ml/layers/base.py +328 -0
- xinference/thirdparty/audiotools/ml/layers/spectral_gate.py +127 -0
- xinference/thirdparty/audiotools/post.py +140 -0
- xinference/thirdparty/audiotools/preference.py +600 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/text.py +1 -1
- xinference/thirdparty/indextts/BigVGAN/ECAPA_TDNN.py +656 -0
- xinference/thirdparty/indextts/BigVGAN/__init__.py +0 -0
- xinference/thirdparty/indextts/BigVGAN/activations.py +122 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/__init__.py +0 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/.gitignore +1 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/__init__.py +0 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/activation1d.py +76 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu +256 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/compat.h +29 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/load.py +121 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/type_shim.h +92 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/__init__.py +6 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/act.py +31 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/filter.py +102 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/resample.py +58 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_torch/__init__.py +6 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_torch/act.py +29 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_torch/filter.py +96 -0
- xinference/thirdparty/indextts/BigVGAN/alias_free_torch/resample.py +49 -0
- xinference/thirdparty/indextts/BigVGAN/bigvgan.py +534 -0
- xinference/thirdparty/indextts/BigVGAN/models.py +451 -0
- xinference/thirdparty/indextts/BigVGAN/nnet/CNN.py +546 -0
- xinference/thirdparty/indextts/BigVGAN/nnet/__init__.py +0 -0
- xinference/thirdparty/indextts/BigVGAN/nnet/linear.py +89 -0
- xinference/thirdparty/indextts/BigVGAN/nnet/normalization.py +670 -0
- xinference/thirdparty/indextts/BigVGAN/utils.py +101 -0
- xinference/thirdparty/indextts/__init__.py +0 -0
- xinference/thirdparty/indextts/cli.py +65 -0
- xinference/thirdparty/indextts/gpt/__init__.py +0 -0
- xinference/thirdparty/indextts/gpt/conformer/__init__.py +0 -0
- xinference/thirdparty/indextts/gpt/conformer/attention.py +312 -0
- xinference/thirdparty/indextts/gpt/conformer/embedding.py +163 -0
- xinference/thirdparty/indextts/gpt/conformer/subsampling.py +348 -0
- xinference/thirdparty/indextts/gpt/conformer_encoder.py +520 -0
- xinference/thirdparty/indextts/gpt/model.py +713 -0
- xinference/thirdparty/indextts/gpt/model_v2.py +747 -0
- xinference/thirdparty/indextts/gpt/perceiver.py +317 -0
- xinference/thirdparty/indextts/gpt/transformers_beam_search.py +1013 -0
- xinference/thirdparty/indextts/gpt/transformers_generation_utils.py +4747 -0
- xinference/thirdparty/indextts/gpt/transformers_gpt2.py +1878 -0
- xinference/thirdparty/indextts/gpt/transformers_modeling_utils.py +5525 -0
- xinference/thirdparty/indextts/infer.py +690 -0
- xinference/thirdparty/indextts/infer_v2.py +739 -0
- xinference/thirdparty/indextts/s2mel/dac/__init__.py +16 -0
- xinference/thirdparty/indextts/s2mel/dac/__main__.py +36 -0
- xinference/thirdparty/indextts/s2mel/dac/model/__init__.py +4 -0
- xinference/thirdparty/indextts/s2mel/dac/model/base.py +294 -0
- xinference/thirdparty/indextts/s2mel/dac/model/dac.py +400 -0
- xinference/thirdparty/indextts/s2mel/dac/model/discriminator.py +228 -0
- xinference/thirdparty/indextts/s2mel/dac/model/encodec.py +320 -0
- xinference/thirdparty/indextts/s2mel/dac/nn/__init__.py +3 -0
- xinference/thirdparty/indextts/s2mel/dac/nn/layers.py +33 -0
- xinference/thirdparty/indextts/s2mel/dac/nn/loss.py +368 -0
- xinference/thirdparty/indextts/s2mel/dac/nn/quantize.py +339 -0
- xinference/thirdparty/indextts/s2mel/dac/utils/__init__.py +123 -0
- xinference/thirdparty/indextts/s2mel/dac/utils/decode.py +95 -0
- xinference/thirdparty/indextts/s2mel/dac/utils/encode.py +94 -0
- xinference/thirdparty/indextts/s2mel/hf_utils.py +12 -0
- xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/__init__.py +5 -0
- xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/act.py +29 -0
- xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/filter.py +96 -0
- xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/resample.py +57 -0
- xinference/thirdparty/indextts/s2mel/modules/audio.py +82 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/activations.py +120 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/__init__.py +0 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/activation1d.py +77 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/anti_alias_activation_cuda.cu +246 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/compat.h +29 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/load.py +86 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/type_shim.h +92 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/__init__.py +6 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/act.py +30 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/filter.py +101 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/resample.py +58 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/bigvgan.py +492 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/config.json +63 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/env.py +18 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/meldataset.py +354 -0
- xinference/thirdparty/indextts/s2mel/modules/bigvgan/utils.py +99 -0
- xinference/thirdparty/indextts/s2mel/modules/campplus/DTDNN.py +115 -0
- xinference/thirdparty/indextts/s2mel/modules/campplus/classifier.py +70 -0
- xinference/thirdparty/indextts/s2mel/modules/campplus/layers.py +253 -0
- xinference/thirdparty/indextts/s2mel/modules/commons.py +632 -0
- xinference/thirdparty/indextts/s2mel/modules/diffusion_transformer.py +257 -0
- xinference/thirdparty/indextts/s2mel/modules/encodec.py +292 -0
- xinference/thirdparty/indextts/s2mel/modules/flow_matching.py +171 -0
- xinference/thirdparty/indextts/s2mel/modules/gpt_fast/generate.py +436 -0
- xinference/thirdparty/indextts/s2mel/modules/gpt_fast/model.py +360 -0
- xinference/thirdparty/indextts/s2mel/modules/gpt_fast/quantize.py +622 -0
- xinference/thirdparty/indextts/s2mel/modules/hifigan/f0_predictor.py +55 -0
- xinference/thirdparty/indextts/s2mel/modules/hifigan/generator.py +454 -0
- xinference/thirdparty/indextts/s2mel/modules/layers.py +354 -0
- xinference/thirdparty/indextts/s2mel/modules/length_regulator.py +141 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/__init__.py +0 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/api.py +186 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/attentions.py +465 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/checkpoints_v2/converter/config.json +57 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/commons.py +160 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/mel_processing.py +183 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/models.py +499 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/modules.py +598 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/openvoice_app.py +275 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/se_extractor.py +153 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/transforms.py +209 -0
- xinference/thirdparty/indextts/s2mel/modules/openvoice/utils.py +194 -0
- xinference/thirdparty/indextts/s2mel/modules/quantize.py +229 -0
- xinference/thirdparty/indextts/s2mel/modules/rmvpe.py +631 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/__init__.py +4 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/heads.py +164 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/helpers.py +71 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/loss.py +114 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/models.py +118 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/modules.py +213 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/pretrained.py +51 -0
- xinference/thirdparty/indextts/s2mel/modules/vocos/spectral_ops.py +192 -0
- xinference/thirdparty/indextts/s2mel/modules/wavenet.py +174 -0
- xinference/thirdparty/indextts/s2mel/optimizers.py +96 -0
- xinference/thirdparty/indextts/s2mel/wav2vecbert_extract.py +148 -0
- xinference/thirdparty/indextts/utils/__init__.py +0 -0
- xinference/thirdparty/indextts/utils/arch_util.py +120 -0
- xinference/thirdparty/indextts/utils/checkpoint.py +34 -0
- xinference/thirdparty/indextts/utils/common.py +121 -0
- xinference/thirdparty/indextts/utils/feature_extractors.py +50 -0
- xinference/thirdparty/indextts/utils/front.py +536 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/__init__.py +0 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/codec.py +427 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/__init__.py +11 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/factorized_vector_quantize.py +150 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/lookup_free_quantize.py +77 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/residual_vq.py +177 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/vector_quantize.py +401 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/vocos.py +881 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_dataset.py +264 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_inference.py +515 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_sampler.py +126 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_trainer.py +166 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/__init__.py +0 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/__init__.py +5 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/act.py +29 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/filter.py +96 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/resample.py +57 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_dataset.py +98 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_inference.py +137 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_trainer.py +776 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/__init__.py +1 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/bst.t7 +0 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/model.py +219 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/attentions.py +437 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/commons.py +331 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/gradient_reversal.py +35 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/layers.py +460 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/quantize.py +741 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/style_encoder.py +110 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/wavenet.py +224 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/optimizer.py +104 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/kmeans/repcodec_model.py +210 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/kmeans/vocos.py +850 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/melvqgan/melspec.py +108 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/README.md +216 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/__init__.py +6 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/__init__.py +5 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/act.py +29 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/filter.py +96 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/resample.py +57 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/facodec.py +1222 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/gradient_reversal.py +35 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/melspec.py +102 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/__init__.py +7 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/fvq.py +116 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/rvq.py +87 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/transformer.py +234 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/model.py +184 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/__init__.py +27 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/conv.py +346 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/lstm.py +46 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/norm.py +37 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/__init__.py +14 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/ac.py +317 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/core_vq.py +388 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/distrib.py +135 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/vq.py +125 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/seanet.py +414 -0
- xinference/thirdparty/indextts/utils/maskgct/models/codec/vevo/vevo_repcodec.py +592 -0
- xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/ckpt/wav2vec2bert_stats.pt +0 -0
- xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/llama_nar.py +650 -0
- xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/maskgct_s2a.py +503 -0
- xinference/thirdparty/indextts/utils/maskgct_utils.py +259 -0
- xinference/thirdparty/indextts/utils/text_utils.py +41 -0
- xinference/thirdparty/indextts/utils/typical_sampling.py +30 -0
- xinference/thirdparty/indextts/utils/utils.py +93 -0
- xinference/thirdparty/indextts/utils/webui_utils.py +42 -0
- xinference/thirdparty/indextts/utils/xtransformers.py +1247 -0
- xinference/thirdparty/indextts/vqvae/__init__.py +0 -0
- xinference/thirdparty/indextts/vqvae/xtts_dvae.py +395 -0
- xinference/thirdparty/melo/text/chinese_mix.py +2 -2
- xinference/types.py +9 -0
- xinference/ui/gradio/media_interface.py +66 -8
- xinference/ui/web/ui/build/asset-manifest.json +6 -6
- xinference/ui/web/ui/build/index.html +1 -1
- xinference/ui/web/ui/build/static/css/main.5ea97072.css +2 -0
- xinference/ui/web/ui/build/static/css/main.5ea97072.css.map +1 -0
- xinference/ui/web/ui/build/static/js/main.45e78536.js +3 -0
- xinference/ui/web/ui/build/static/js/{main.1086c759.js.LICENSE.txt → main.45e78536.js.LICENSE.txt} +0 -7
- xinference/ui/web/ui/build/static/js/main.45e78536.js.map +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/089c38df5f52348d212ed868dda5c518a42e0c2762caed4175487c0405830c35.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/2b6e3a5b6eb2c5c5f2d007e68cd46c372721cd52bf63508adcdb21ecf79241d8.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/2d887825fd07a56f872eda4420da25fba0b5b62a23bdcc6c6da1a5281887f618.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/4001f9c3e64e73a4f2158826650c174a59d5e3f89ddecddf17cbb6bb688cc4ca.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/4a7018a69e6b7f90fc313248c2aa86f2a8f1eb1db120df586047a8023549b44b.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/64b12aaa1c1d1bf53820ada8a63769067c0ccc5aab46b32348eb1917ae7f2a11.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/7275b67c78ec76ce38a686bb8a576d8c9cecf54e1573614c84859d538efb9be5.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/a68b6ee3b31eadc051fb95ce8f8ccb9c2e8b52c60f290dbab545a1917e065282.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/ae8771cc37693feb160fa8727231312a0c54ef2d1d1ca893be568cd70016ca7e.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/bb4e8722d2d41d87f1fce3661bc8937bffe9448e231fc5f0462630849e851592.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/be6aada1ee4adc2bbf65dbe56d17db32bb3b5478be05d6b527805a8ba6cfb2b9.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/de91c352653c233cf0cb6674e6e04049a44fd0e1156560de65d5c4620521391e.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/e85f7002fc325c83b9c9cd8a1619e5b3ebc701d30e811afc284b88e6ae710cb5.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/e8b603c78944bf3d213639078bfe155ff5c0dfa4048a93cbb967cad6a4eb4ff3.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/ea2a26361204e70cf1018d6990fb6354bed82b3ac69690391e0f100385e7abb7.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/f05535160a508b2a312de546a6de234776c613db276479ea4253c0b1bdeeb7d6.json +1 -0
- xinference/ui/web/ui/node_modules/.cache/babel-loader/f09ba9e11106bd59a0de10cc85c55084097729dcab575f43dfcf07375961ed87.json +1 -0
- xinference/ui/web/ui/node_modules/.package-lock.json +0 -33
- xinference/ui/web/ui/package-lock.json +0 -34
- xinference/ui/web/ui/package.json +0 -1
- xinference/ui/web/ui/src/locales/en.json +9 -3
- xinference/ui/web/ui/src/locales/ja.json +9 -3
- xinference/ui/web/ui/src/locales/ko.json +9 -3
- xinference/ui/web/ui/src/locales/zh.json +9 -3
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/METADATA +24 -6
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/RECORD +296 -77
- xinference/ui/web/ui/build/static/css/main.013f296b.css +0 -2
- xinference/ui/web/ui/build/static/css/main.013f296b.css.map +0 -1
- xinference/ui/web/ui/build/static/js/main.1086c759.js +0 -3
- xinference/ui/web/ui/build/static/js/main.1086c759.js.map +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/0b0f77000cc1b482ca091cfbcae511dfe02f08916971645fad21d0b1234d04a2.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/1c5f8ff423a7c9202bea60b15680f04b1e9964b445b0da3f86c6ff70cf24e797.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/44ce7993e344980e3ed4f13e8f69237d4a5dfc60e37ca6b54f51f8ee1357bd67.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/4aec1cc414ac3ebb3481d3d915e4db597d9127de813291346eacb8554ab170d4.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/644cfec52f3c57a6e222ce60f112237a1efefe9835efd9aad857a685f53d8eed.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/663436f72af53fe0d72394f56d003fa4e0bba489e5bb4e483fd34b00f84637f7.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/69db82ca9bfe27fe417cc6cf2b1716b09be9c6f0cd198530f12bfc60e801bbcf.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/85087e27618d740c236bf159f30e0219db443ab55f0997388eed5fde6f9e90cc.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/88b07838348864aa86c672be3bbca1e9f58f6f3a2881b32070ec27f4e7b449d1.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/8b8cd408ccfbe115acef27ccfa5b233da8597131a2a5712add13e1e4d5d4504b.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/a23824fe746b9c6ca5eee9159b5764d1ff1653c1d856288c0f75c742bbb0023b.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/a3eb18af328280b139693c9092dff2a0ef8c9a967e6c8956ceee0996611f1984.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/bc1aacc65a102db325ca61bcd2f681e1ae22c36a1f1d98a6ff5e4ad49dc7544f.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/c682fd521747c19dae437d83ce3235a306ce6b68e24a117bc57c27ebb8d1f1ca.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/d5c224be7081f18cba1678b7874a9782eba895df004874ff8f243f94ba79942a.json +0 -1
- xinference/ui/web/ui/node_modules/.cache/babel-loader/f7f18bfb539b036a6a342176dd98a85df5057a884a8da978d679f2a0264883d0.json +0 -1
- xinference/ui/web/ui/node_modules/clipboard/.babelrc.json +0 -11
- xinference/ui/web/ui/node_modules/clipboard/.eslintrc.json +0 -24
- xinference/ui/web/ui/node_modules/clipboard/.prettierrc.json +0 -9
- xinference/ui/web/ui/node_modules/clipboard/bower.json +0 -18
- xinference/ui/web/ui/node_modules/clipboard/composer.json +0 -25
- xinference/ui/web/ui/node_modules/clipboard/package.json +0 -63
- xinference/ui/web/ui/node_modules/delegate/package.json +0 -31
- xinference/ui/web/ui/node_modules/good-listener/bower.json +0 -11
- xinference/ui/web/ui/node_modules/good-listener/package.json +0 -35
- xinference/ui/web/ui/node_modules/select/bower.json +0 -13
- xinference/ui/web/ui/node_modules/select/package.json +0 -29
- xinference/ui/web/ui/node_modules/tiny-emitter/package.json +0 -53
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/WHEEL +0 -0
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/entry_points.txt +0 -0
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/licenses/LICENSE +0 -0
- {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,690 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
os.environ['HF_HUB_CACHE'] = './checkpoints/hf_cache'
|
|
4
|
+
import time
|
|
5
|
+
from subprocess import CalledProcessError
|
|
6
|
+
from typing import Dict, List
|
|
7
|
+
|
|
8
|
+
import torch
|
|
9
|
+
import torchaudio
|
|
10
|
+
from torch.nn.utils.rnn import pad_sequence
|
|
11
|
+
from omegaconf import OmegaConf
|
|
12
|
+
from tqdm import tqdm
|
|
13
|
+
|
|
14
|
+
import warnings
|
|
15
|
+
|
|
16
|
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
|
17
|
+
warnings.filterwarnings("ignore", category=UserWarning)
|
|
18
|
+
|
|
19
|
+
from indextts.BigVGAN.models import BigVGAN as Generator
|
|
20
|
+
from indextts.gpt.model import UnifiedVoice
|
|
21
|
+
from indextts.utils.checkpoint import load_checkpoint
|
|
22
|
+
from indextts.utils.feature_extractors import MelSpectrogramFeatures
|
|
23
|
+
|
|
24
|
+
from indextts.utils.front import TextNormalizer, TextTokenizer
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class IndexTTS:
|
|
28
|
+
def __init__(
|
|
29
|
+
self, cfg_path="checkpoints/config.yaml", model_dir="checkpoints", use_fp16=True, device=None,
|
|
30
|
+
use_cuda_kernel=None,
|
|
31
|
+
):
|
|
32
|
+
"""
|
|
33
|
+
Args:
|
|
34
|
+
cfg_path (str): path to the config file.
|
|
35
|
+
model_dir (str): path to the model directory.
|
|
36
|
+
use_fp16 (bool): whether to use fp16.
|
|
37
|
+
device (str): device to use (e.g., 'cuda:0', 'cpu'). If None, it will be set automatically based on the availability of CUDA or MPS.
|
|
38
|
+
use_cuda_kernel (None | bool): whether to use BigVGan custom fused activation CUDA kernel, only for CUDA device.
|
|
39
|
+
"""
|
|
40
|
+
if device is not None:
|
|
41
|
+
self.device = device
|
|
42
|
+
self.use_fp16 = False if device == "cpu" else use_fp16
|
|
43
|
+
self.use_cuda_kernel = use_cuda_kernel is not None and use_cuda_kernel and device.startswith("cuda")
|
|
44
|
+
elif torch.cuda.is_available():
|
|
45
|
+
self.device = "cuda:0"
|
|
46
|
+
self.use_fp16 = use_fp16
|
|
47
|
+
self.use_cuda_kernel = use_cuda_kernel is None or use_cuda_kernel
|
|
48
|
+
elif hasattr(torch, "xpu") and torch.xpu.is_available():
|
|
49
|
+
self.device = "xpu"
|
|
50
|
+
self.use_fp16 = use_fp16
|
|
51
|
+
self.use_cuda_kernel = False
|
|
52
|
+
elif hasattr(torch, "mps") and torch.backends.mps.is_available():
|
|
53
|
+
self.device = "mps"
|
|
54
|
+
self.use_fp16 = False # Use float16 on MPS is overhead than float32
|
|
55
|
+
self.use_cuda_kernel = False
|
|
56
|
+
else:
|
|
57
|
+
self.device = "cpu"
|
|
58
|
+
self.use_fp16 = False
|
|
59
|
+
self.use_cuda_kernel = False
|
|
60
|
+
print(">> Be patient, it may take a while to run in CPU mode.")
|
|
61
|
+
|
|
62
|
+
self.cfg = OmegaConf.load(cfg_path)
|
|
63
|
+
self.model_dir = model_dir
|
|
64
|
+
self.dtype = torch.float16 if self.use_fp16 else None
|
|
65
|
+
self.stop_mel_token = self.cfg.gpt.stop_mel_token
|
|
66
|
+
|
|
67
|
+
# Comment-off to load the VQ-VAE model for debugging tokenizer
|
|
68
|
+
# https://github.com/index-tts/index-tts/issues/34
|
|
69
|
+
#
|
|
70
|
+
# from indextts.vqvae.xtts_dvae import DiscreteVAE
|
|
71
|
+
# self.dvae = DiscreteVAE(**self.cfg.vqvae)
|
|
72
|
+
# self.dvae_path = os.path.join(self.model_dir, self.cfg.dvae_checkpoint)
|
|
73
|
+
# load_checkpoint(self.dvae, self.dvae_path)
|
|
74
|
+
# self.dvae = self.dvae.to(self.device)
|
|
75
|
+
# if self.use_fp16:
|
|
76
|
+
# self.dvae.eval().half()
|
|
77
|
+
# else:
|
|
78
|
+
# self.dvae.eval()
|
|
79
|
+
# print(">> vqvae weights restored from:", self.dvae_path)
|
|
80
|
+
self.gpt = UnifiedVoice(**self.cfg.gpt)
|
|
81
|
+
self.gpt_path = os.path.join(self.model_dir, self.cfg.gpt_checkpoint)
|
|
82
|
+
load_checkpoint(self.gpt, self.gpt_path)
|
|
83
|
+
self.gpt = self.gpt.to(self.device)
|
|
84
|
+
if self.use_fp16:
|
|
85
|
+
self.gpt.eval().half()
|
|
86
|
+
else:
|
|
87
|
+
self.gpt.eval()
|
|
88
|
+
print(">> GPT weights restored from:", self.gpt_path)
|
|
89
|
+
if self.use_fp16:
|
|
90
|
+
try:
|
|
91
|
+
import deepspeed
|
|
92
|
+
|
|
93
|
+
use_deepspeed = True
|
|
94
|
+
except (ImportError, OSError, CalledProcessError) as e:
|
|
95
|
+
use_deepspeed = False
|
|
96
|
+
print(f">> DeepSpeed加载失败,回退到标准推理: {e}")
|
|
97
|
+
|
|
98
|
+
self.gpt.post_init_gpt2_config(use_deepspeed=use_deepspeed, kv_cache=True, half=True)
|
|
99
|
+
else:
|
|
100
|
+
self.gpt.post_init_gpt2_config(use_deepspeed=False, kv_cache=False, half=False)
|
|
101
|
+
|
|
102
|
+
if self.use_cuda_kernel:
|
|
103
|
+
# preload the CUDA kernel for BigVGAN
|
|
104
|
+
try:
|
|
105
|
+
from indextts.BigVGAN.alias_free_activation.cuda import load
|
|
106
|
+
|
|
107
|
+
anti_alias_activation_cuda = load.load()
|
|
108
|
+
print(">> Preload custom CUDA kernel for BigVGAN", anti_alias_activation_cuda)
|
|
109
|
+
except:
|
|
110
|
+
print(">> Failed to load custom CUDA kernel for BigVGAN. Falling back to torch.")
|
|
111
|
+
self.use_cuda_kernel = False
|
|
112
|
+
self.bigvgan = Generator(self.cfg.bigvgan, use_cuda_kernel=self.use_cuda_kernel)
|
|
113
|
+
self.bigvgan_path = os.path.join(self.model_dir, self.cfg.bigvgan_checkpoint)
|
|
114
|
+
vocoder_dict = torch.load(self.bigvgan_path, map_location="cpu")
|
|
115
|
+
self.bigvgan.load_state_dict(vocoder_dict["generator"])
|
|
116
|
+
self.bigvgan = self.bigvgan.to(self.device)
|
|
117
|
+
# remove weight norm on eval mode
|
|
118
|
+
self.bigvgan.remove_weight_norm()
|
|
119
|
+
self.bigvgan.eval()
|
|
120
|
+
print(">> bigvgan weights restored from:", self.bigvgan_path)
|
|
121
|
+
self.bpe_path = os.path.join(self.model_dir, self.cfg.dataset["bpe_model"])
|
|
122
|
+
self.normalizer = TextNormalizer()
|
|
123
|
+
self.normalizer.load()
|
|
124
|
+
print(">> TextNormalizer loaded")
|
|
125
|
+
self.tokenizer = TextTokenizer(self.bpe_path, self.normalizer)
|
|
126
|
+
print(">> bpe model loaded from:", self.bpe_path)
|
|
127
|
+
# 缓存参考音频mel:
|
|
128
|
+
self.cache_audio_prompt = None
|
|
129
|
+
self.cache_cond_mel = None
|
|
130
|
+
# 进度引用显示(可选)
|
|
131
|
+
self.gr_progress = None
|
|
132
|
+
self.model_version = self.cfg.version if hasattr(self.cfg, "version") else None
|
|
133
|
+
|
|
134
|
+
def remove_long_silence(self, codes: torch.Tensor, silent_token=52, max_consecutive=30):
|
|
135
|
+
"""
|
|
136
|
+
Shrink special tokens (silent_token and stop_mel_token) in codes
|
|
137
|
+
codes: [B, T]
|
|
138
|
+
"""
|
|
139
|
+
code_lens = []
|
|
140
|
+
codes_list = []
|
|
141
|
+
device = codes.device
|
|
142
|
+
dtype = codes.dtype
|
|
143
|
+
isfix = False
|
|
144
|
+
for i in range(0, codes.shape[0]):
|
|
145
|
+
code = codes[i]
|
|
146
|
+
if not torch.any(code == self.stop_mel_token).item():
|
|
147
|
+
len_ = code.size(0)
|
|
148
|
+
else:
|
|
149
|
+
stop_mel_idx = (code == self.stop_mel_token).nonzero(as_tuple=False)
|
|
150
|
+
len_ = stop_mel_idx[0].item() if len(stop_mel_idx) > 0 else code.size(0)
|
|
151
|
+
|
|
152
|
+
count = torch.sum(code == silent_token).item()
|
|
153
|
+
if count > max_consecutive:
|
|
154
|
+
# code = code.cpu().tolist()
|
|
155
|
+
ncode_idx = []
|
|
156
|
+
n = 0
|
|
157
|
+
for k in range(len_):
|
|
158
|
+
assert code[
|
|
159
|
+
k] != self.stop_mel_token, f"stop_mel_token {self.stop_mel_token} should be shrinked here"
|
|
160
|
+
if code[k] != silent_token:
|
|
161
|
+
ncode_idx.append(k)
|
|
162
|
+
n = 0
|
|
163
|
+
elif code[k] == silent_token and n < 10:
|
|
164
|
+
ncode_idx.append(k)
|
|
165
|
+
n += 1
|
|
166
|
+
# if (k == 0 and code[k] == 52) or (code[k] == 52 and code[k-1] == 52):
|
|
167
|
+
# n += 1
|
|
168
|
+
# new code
|
|
169
|
+
len_ = len(ncode_idx)
|
|
170
|
+
codes_list.append(code[ncode_idx])
|
|
171
|
+
isfix = True
|
|
172
|
+
else:
|
|
173
|
+
# shrink to len_
|
|
174
|
+
codes_list.append(code[:len_])
|
|
175
|
+
code_lens.append(len_)
|
|
176
|
+
if isfix:
|
|
177
|
+
if len(codes_list) > 1:
|
|
178
|
+
codes = pad_sequence(codes_list, batch_first=True, padding_value=self.stop_mel_token)
|
|
179
|
+
else:
|
|
180
|
+
codes = codes_list[0].unsqueeze(0)
|
|
181
|
+
else:
|
|
182
|
+
# unchanged
|
|
183
|
+
pass
|
|
184
|
+
# clip codes to max length
|
|
185
|
+
max_len = max(code_lens)
|
|
186
|
+
if max_len < codes.shape[1]:
|
|
187
|
+
codes = codes[:, :max_len]
|
|
188
|
+
code_lens = torch.tensor(code_lens, dtype=torch.long, device=device)
|
|
189
|
+
return codes, code_lens
|
|
190
|
+
|
|
191
|
+
def bucket_segments(self, segments, bucket_max_size=4) -> List[List[Dict]]:
|
|
192
|
+
"""
|
|
193
|
+
Segment data bucketing.
|
|
194
|
+
if ``bucket_max_size=1``, return all segments in one bucket.
|
|
195
|
+
"""
|
|
196
|
+
outputs: List[Dict] = []
|
|
197
|
+
for idx, sent in enumerate(segments):
|
|
198
|
+
outputs.append({"idx": idx, "sent": sent, "len": len(sent)})
|
|
199
|
+
|
|
200
|
+
if len(outputs) > bucket_max_size:
|
|
201
|
+
# split segments into buckets by segment length
|
|
202
|
+
buckets: List[List[Dict]] = []
|
|
203
|
+
factor = 1.5
|
|
204
|
+
last_bucket = None
|
|
205
|
+
last_bucket_sent_len_median = 0
|
|
206
|
+
|
|
207
|
+
for sent in sorted(outputs, key=lambda x: x["len"]):
|
|
208
|
+
current_sent_len = sent["len"]
|
|
209
|
+
if current_sent_len == 0:
|
|
210
|
+
print(">> skip empty segment")
|
|
211
|
+
continue
|
|
212
|
+
if last_bucket is None \
|
|
213
|
+
or current_sent_len >= int(last_bucket_sent_len_median * factor) \
|
|
214
|
+
or len(last_bucket) >= bucket_max_size:
|
|
215
|
+
# new bucket
|
|
216
|
+
buckets.append([sent])
|
|
217
|
+
last_bucket = buckets[-1]
|
|
218
|
+
last_bucket_sent_len_median = current_sent_len
|
|
219
|
+
else:
|
|
220
|
+
# current bucket can hold more segments
|
|
221
|
+
last_bucket.append(sent) # sorted
|
|
222
|
+
mid = len(last_bucket) // 2
|
|
223
|
+
last_bucket_sent_len_median = last_bucket[mid]["len"]
|
|
224
|
+
last_bucket = None
|
|
225
|
+
# merge all buckets with size 1
|
|
226
|
+
out_buckets: List[List[Dict]] = []
|
|
227
|
+
only_ones: List[Dict] = []
|
|
228
|
+
for b in buckets:
|
|
229
|
+
if len(b) == 1:
|
|
230
|
+
only_ones.append(b[0])
|
|
231
|
+
else:
|
|
232
|
+
out_buckets.append(b)
|
|
233
|
+
if len(only_ones) > 0:
|
|
234
|
+
# merge into previous buckets if possible
|
|
235
|
+
# print("only_ones:", [(o["idx"], o["len"]) for o in only_ones])
|
|
236
|
+
for i in range(len(out_buckets)):
|
|
237
|
+
b = out_buckets[i]
|
|
238
|
+
if len(b) < bucket_max_size:
|
|
239
|
+
b.append(only_ones.pop(0))
|
|
240
|
+
if len(only_ones) == 0:
|
|
241
|
+
break
|
|
242
|
+
# combined all remaining sized 1 buckets
|
|
243
|
+
if len(only_ones) > 0:
|
|
244
|
+
out_buckets.extend(
|
|
245
|
+
[only_ones[i:i + bucket_max_size] for i in range(0, len(only_ones), bucket_max_size)])
|
|
246
|
+
return out_buckets
|
|
247
|
+
return [outputs]
|
|
248
|
+
|
|
249
|
+
def pad_tokens_cat(self, tokens: List[torch.Tensor]) -> torch.Tensor:
|
|
250
|
+
if self.model_version and self.model_version >= 1.5:
|
|
251
|
+
# 1.5版本以上,直接使用stop_text_token 右侧填充,填充到最大长度
|
|
252
|
+
# [1, N] -> [N,]
|
|
253
|
+
tokens = [t.squeeze(0) for t in tokens]
|
|
254
|
+
return pad_sequence(tokens, batch_first=True, padding_value=self.cfg.gpt.stop_text_token,
|
|
255
|
+
padding_side="right")
|
|
256
|
+
max_len = max(t.size(1) for t in tokens)
|
|
257
|
+
outputs = []
|
|
258
|
+
for tensor in tokens:
|
|
259
|
+
pad_len = max_len - tensor.size(1)
|
|
260
|
+
if pad_len > 0:
|
|
261
|
+
n = min(8, pad_len)
|
|
262
|
+
tensor = torch.nn.functional.pad(tensor, (0, n), value=self.cfg.gpt.stop_text_token)
|
|
263
|
+
tensor = torch.nn.functional.pad(tensor, (0, pad_len - n), value=self.cfg.gpt.start_text_token)
|
|
264
|
+
tensor = tensor[:, :max_len]
|
|
265
|
+
outputs.append(tensor)
|
|
266
|
+
tokens = torch.cat(outputs, dim=0)
|
|
267
|
+
return tokens
|
|
268
|
+
|
|
269
|
+
def torch_empty_cache(self):
|
|
270
|
+
try:
|
|
271
|
+
if "cuda" in str(self.device):
|
|
272
|
+
torch.cuda.empty_cache()
|
|
273
|
+
elif "mps" in str(self.device):
|
|
274
|
+
torch.mps.empty_cache()
|
|
275
|
+
except Exception as e:
|
|
276
|
+
pass
|
|
277
|
+
|
|
278
|
+
def _set_gr_progress(self, value, desc):
|
|
279
|
+
if self.gr_progress is not None:
|
|
280
|
+
self.gr_progress(value, desc=desc)
|
|
281
|
+
|
|
282
|
+
# 快速推理:对于“多句长文本”,可实现至少 2~10 倍以上的速度提升~ (First modified by sunnyboxs 2025-04-16)
|
|
283
|
+
def infer_fast(self, audio_prompt, text, output_path, verbose=False, max_text_tokens_per_segment=100,
|
|
284
|
+
segments_bucket_max_size=4, **generation_kwargs):
|
|
285
|
+
"""
|
|
286
|
+
Args:
|
|
287
|
+
``max_text_tokens_per_segment``: 分句的最大token数,默认``100``,可以根据GPU硬件情况调整
|
|
288
|
+
- 越小,batch 越多,推理速度越*快*,占用内存更多,可能影响质量
|
|
289
|
+
- 越大,batch 越少,推理速度越*慢*,占用内存和质量更接近于非快速推理
|
|
290
|
+
``segments_bucket_max_size``: 分句分桶的最大容量,默认``4``,可以根据GPU内存调整
|
|
291
|
+
- 越大,bucket数量越少,batch越多,推理速度越*快*,占用内存更多,可能影响质量
|
|
292
|
+
- 越小,bucket数量越多,batch越少,推理速度越*慢*,占用内存和质量更接近于非快速推理
|
|
293
|
+
"""
|
|
294
|
+
print(">> starting fast inference...")
|
|
295
|
+
|
|
296
|
+
self._set_gr_progress(0, "starting fast inference...")
|
|
297
|
+
if verbose:
|
|
298
|
+
print(f"origin text:{text}")
|
|
299
|
+
start_time = time.perf_counter()
|
|
300
|
+
|
|
301
|
+
# 如果参考音频改变了,才需要重新生成 cond_mel, 提升速度
|
|
302
|
+
if self.cache_cond_mel is None or self.cache_audio_prompt != audio_prompt:
|
|
303
|
+
audio, sr = torchaudio.load(audio_prompt)
|
|
304
|
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
|
305
|
+
if audio.shape[0] > 1:
|
|
306
|
+
audio = audio[0].unsqueeze(0)
|
|
307
|
+
audio = torchaudio.transforms.Resample(sr, 24000)(audio)
|
|
308
|
+
|
|
309
|
+
max_audio_length_seconds = 50
|
|
310
|
+
max_audio_samples = int(max_audio_length_seconds * 24000)
|
|
311
|
+
|
|
312
|
+
if audio.shape[1] > max_audio_samples:
|
|
313
|
+
if verbose:
|
|
314
|
+
print(f"Audio too long ({audio.shape[1]} samples), truncating to {max_audio_samples} samples")
|
|
315
|
+
audio = audio[:, :max_audio_samples]
|
|
316
|
+
|
|
317
|
+
cond_mel = MelSpectrogramFeatures()(audio).to(self.device)
|
|
318
|
+
cond_mel_frame = cond_mel.shape[-1]
|
|
319
|
+
if verbose:
|
|
320
|
+
print(f"cond_mel shape: {cond_mel.shape}", "dtype:", cond_mel.dtype)
|
|
321
|
+
|
|
322
|
+
self.cache_audio_prompt = audio_prompt
|
|
323
|
+
self.cache_cond_mel = cond_mel
|
|
324
|
+
else:
|
|
325
|
+
cond_mel = self.cache_cond_mel
|
|
326
|
+
cond_mel_frame = cond_mel.shape[-1]
|
|
327
|
+
pass
|
|
328
|
+
|
|
329
|
+
auto_conditioning = cond_mel
|
|
330
|
+
cond_mel_lengths = torch.tensor([cond_mel_frame], device=self.device)
|
|
331
|
+
|
|
332
|
+
# text_tokens
|
|
333
|
+
text_tokens_list = self.tokenizer.tokenize(text)
|
|
334
|
+
|
|
335
|
+
segments = self.tokenizer.split_segments(text_tokens_list,
|
|
336
|
+
max_text_tokens_per_segment=max_text_tokens_per_segment)
|
|
337
|
+
if verbose:
|
|
338
|
+
print(">> text token count:", len(text_tokens_list))
|
|
339
|
+
print(" segments count:", len(segments))
|
|
340
|
+
print(" max_text_tokens_per_segment:", max_text_tokens_per_segment)
|
|
341
|
+
print(*segments, sep="\n")
|
|
342
|
+
do_sample = generation_kwargs.pop("do_sample", True)
|
|
343
|
+
top_p = generation_kwargs.pop("top_p", 0.8)
|
|
344
|
+
top_k = generation_kwargs.pop("top_k", 30)
|
|
345
|
+
temperature = generation_kwargs.pop("temperature", 1.0)
|
|
346
|
+
autoregressive_batch_size = 1
|
|
347
|
+
length_penalty = generation_kwargs.pop("length_penalty", 0.0)
|
|
348
|
+
num_beams = generation_kwargs.pop("num_beams", 3)
|
|
349
|
+
repetition_penalty = generation_kwargs.pop("repetition_penalty", 10.0)
|
|
350
|
+
max_mel_tokens = generation_kwargs.pop("max_mel_tokens", 600)
|
|
351
|
+
sampling_rate = 24000
|
|
352
|
+
# lang = "EN"
|
|
353
|
+
# lang = "ZH"
|
|
354
|
+
wavs = []
|
|
355
|
+
gpt_gen_time = 0
|
|
356
|
+
gpt_forward_time = 0
|
|
357
|
+
bigvgan_time = 0
|
|
358
|
+
|
|
359
|
+
# text processing
|
|
360
|
+
all_text_tokens: List[List[torch.Tensor]] = []
|
|
361
|
+
self._set_gr_progress(0.1, "text processing...")
|
|
362
|
+
bucket_max_size = segments_bucket_max_size if self.device != "cpu" else 1
|
|
363
|
+
all_segments = self.bucket_segments(segments, bucket_max_size=bucket_max_size)
|
|
364
|
+
bucket_count = len(all_segments)
|
|
365
|
+
if verbose:
|
|
366
|
+
print(">> segments bucket_count:", bucket_count,
|
|
367
|
+
"bucket sizes:", [(len(s), [t["idx"] for t in s]) for s in all_segments],
|
|
368
|
+
"bucket_max_size:", bucket_max_size)
|
|
369
|
+
for segments in all_segments:
|
|
370
|
+
temp_tokens: List[torch.Tensor] = []
|
|
371
|
+
all_text_tokens.append(temp_tokens)
|
|
372
|
+
for item in segments:
|
|
373
|
+
sent = item["sent"]
|
|
374
|
+
text_tokens = self.tokenizer.convert_tokens_to_ids(sent)
|
|
375
|
+
text_tokens = torch.tensor(text_tokens, dtype=torch.int32, device=self.device).unsqueeze(0)
|
|
376
|
+
if verbose:
|
|
377
|
+
print(text_tokens)
|
|
378
|
+
print(f"text_tokens shape: {text_tokens.shape}, text_tokens type: {text_tokens.dtype}")
|
|
379
|
+
# debug tokenizer
|
|
380
|
+
text_token_syms = self.tokenizer.convert_ids_to_tokens(text_tokens[0].tolist())
|
|
381
|
+
print("text_token_syms is same as segment tokens", text_token_syms == sent)
|
|
382
|
+
temp_tokens.append(text_tokens)
|
|
383
|
+
|
|
384
|
+
# Sequential processing of bucketing data
|
|
385
|
+
all_batch_num = sum(len(s) for s in all_segments)
|
|
386
|
+
all_batch_codes = []
|
|
387
|
+
processed_num = 0
|
|
388
|
+
for item_tokens in all_text_tokens:
|
|
389
|
+
batch_num = len(item_tokens)
|
|
390
|
+
if batch_num > 1:
|
|
391
|
+
batch_text_tokens = self.pad_tokens_cat(item_tokens)
|
|
392
|
+
else:
|
|
393
|
+
batch_text_tokens = item_tokens[0]
|
|
394
|
+
processed_num += batch_num
|
|
395
|
+
# gpt speech
|
|
396
|
+
self._set_gr_progress(0.2 + 0.3 * processed_num / all_batch_num,
|
|
397
|
+
f"gpt speech inference {processed_num}/{all_batch_num}...")
|
|
398
|
+
m_start_time = time.perf_counter()
|
|
399
|
+
with torch.no_grad():
|
|
400
|
+
with torch.amp.autocast(batch_text_tokens.device.type, enabled=self.dtype is not None,
|
|
401
|
+
dtype=self.dtype):
|
|
402
|
+
temp_codes = self.gpt.inference_speech(auto_conditioning, batch_text_tokens,
|
|
403
|
+
cond_mel_lengths=cond_mel_lengths,
|
|
404
|
+
# text_lengths=text_len,
|
|
405
|
+
do_sample=do_sample,
|
|
406
|
+
top_p=top_p,
|
|
407
|
+
top_k=top_k,
|
|
408
|
+
temperature=temperature,
|
|
409
|
+
num_return_sequences=autoregressive_batch_size,
|
|
410
|
+
length_penalty=length_penalty,
|
|
411
|
+
num_beams=num_beams,
|
|
412
|
+
repetition_penalty=repetition_penalty,
|
|
413
|
+
max_generate_length=max_mel_tokens,
|
|
414
|
+
**generation_kwargs)
|
|
415
|
+
all_batch_codes.append(temp_codes)
|
|
416
|
+
gpt_gen_time += time.perf_counter() - m_start_time
|
|
417
|
+
|
|
418
|
+
# gpt latent
|
|
419
|
+
self._set_gr_progress(0.5, "gpt latents inference...")
|
|
420
|
+
all_idxs = []
|
|
421
|
+
all_latents = []
|
|
422
|
+
has_warned = False
|
|
423
|
+
for batch_codes, batch_tokens, batch_segments in zip(all_batch_codes, all_text_tokens, all_segments):
|
|
424
|
+
for i in range(batch_codes.shape[0]):
|
|
425
|
+
codes = batch_codes[i] # [x]
|
|
426
|
+
if not has_warned and codes[-1] != self.stop_mel_token:
|
|
427
|
+
warnings.warn(
|
|
428
|
+
f"WARN: generation stopped due to exceeding `max_mel_tokens` ({max_mel_tokens}). "
|
|
429
|
+
f"Consider reducing `max_text_tokens_per_segment`({max_text_tokens_per_segment}) or increasing `max_mel_tokens`.",
|
|
430
|
+
category=RuntimeWarning
|
|
431
|
+
)
|
|
432
|
+
has_warned = True
|
|
433
|
+
codes = codes.unsqueeze(0) # [x] -> [1, x]
|
|
434
|
+
if verbose:
|
|
435
|
+
print("codes:", codes.shape)
|
|
436
|
+
print(codes)
|
|
437
|
+
codes, code_lens = self.remove_long_silence(codes, silent_token=52, max_consecutive=30)
|
|
438
|
+
if verbose:
|
|
439
|
+
print("fix codes:", codes.shape)
|
|
440
|
+
print(codes)
|
|
441
|
+
print("code_lens:", code_lens)
|
|
442
|
+
text_tokens = batch_tokens[i]
|
|
443
|
+
all_idxs.append(batch_segments[i]["idx"])
|
|
444
|
+
m_start_time = time.perf_counter()
|
|
445
|
+
with torch.no_grad():
|
|
446
|
+
with torch.amp.autocast(text_tokens.device.type, enabled=self.dtype is not None, dtype=self.dtype):
|
|
447
|
+
latent = \
|
|
448
|
+
self.gpt(auto_conditioning, text_tokens,
|
|
449
|
+
torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), codes,
|
|
450
|
+
code_lens * self.gpt.mel_length_compression,
|
|
451
|
+
cond_mel_lengths=torch.tensor([auto_conditioning.shape[-1]],
|
|
452
|
+
device=text_tokens.device),
|
|
453
|
+
return_latent=True, clip_inputs=False)
|
|
454
|
+
gpt_forward_time += time.perf_counter() - m_start_time
|
|
455
|
+
all_latents.append(latent)
|
|
456
|
+
del all_batch_codes, all_text_tokens, all_segments
|
|
457
|
+
# bigvgan chunk
|
|
458
|
+
chunk_size = 2
|
|
459
|
+
all_latents = [all_latents[all_idxs.index(i)] for i in range(len(all_latents))]
|
|
460
|
+
if verbose:
|
|
461
|
+
print(">> all_latents:", len(all_latents))
|
|
462
|
+
print(" latents length:", [l.shape[1] for l in all_latents])
|
|
463
|
+
chunk_latents = [all_latents[i: i + chunk_size] for i in range(0, len(all_latents), chunk_size)]
|
|
464
|
+
chunk_length = len(chunk_latents)
|
|
465
|
+
latent_length = len(all_latents)
|
|
466
|
+
|
|
467
|
+
# bigvgan chunk decode
|
|
468
|
+
self._set_gr_progress(0.7, "bigvgan decoding...")
|
|
469
|
+
tqdm_progress = tqdm(total=latent_length, desc="bigvgan")
|
|
470
|
+
for items in chunk_latents:
|
|
471
|
+
tqdm_progress.update(len(items))
|
|
472
|
+
latent = torch.cat(items, dim=1)
|
|
473
|
+
with torch.no_grad():
|
|
474
|
+
with torch.amp.autocast(latent.device.type, enabled=self.dtype is not None, dtype=self.dtype):
|
|
475
|
+
m_start_time = time.perf_counter()
|
|
476
|
+
wav, _ = self.bigvgan(latent, auto_conditioning.transpose(1, 2))
|
|
477
|
+
bigvgan_time += time.perf_counter() - m_start_time
|
|
478
|
+
wav = wav.squeeze(1)
|
|
479
|
+
pass
|
|
480
|
+
wav = torch.clamp(32767 * wav, -32767.0, 32767.0)
|
|
481
|
+
wavs.append(wav.cpu()) # to cpu before saving
|
|
482
|
+
|
|
483
|
+
# clear cache
|
|
484
|
+
tqdm_progress.close() # 确保进度条被关闭
|
|
485
|
+
del all_latents, chunk_latents
|
|
486
|
+
end_time = time.perf_counter()
|
|
487
|
+
self.torch_empty_cache()
|
|
488
|
+
|
|
489
|
+
# wav audio output
|
|
490
|
+
self._set_gr_progress(0.9, "saving audio...")
|
|
491
|
+
wav = torch.cat(wavs, dim=1)
|
|
492
|
+
wav_length = wav.shape[-1] / sampling_rate
|
|
493
|
+
print(f">> Reference audio length: {cond_mel_frame * 256 / sampling_rate:.2f} seconds")
|
|
494
|
+
print(f">> gpt_gen_time: {gpt_gen_time:.2f} seconds")
|
|
495
|
+
print(f">> gpt_forward_time: {gpt_forward_time:.2f} seconds")
|
|
496
|
+
print(f">> bigvgan_time: {bigvgan_time:.2f} seconds")
|
|
497
|
+
print(f">> Total fast inference time: {end_time - start_time:.2f} seconds")
|
|
498
|
+
print(f">> Generated audio length: {wav_length:.2f} seconds")
|
|
499
|
+
print(f">> [fast] bigvgan chunk_length: {chunk_length}")
|
|
500
|
+
print(f">> [fast] batch_num: {all_batch_num} bucket_max_size: {bucket_max_size}",
|
|
501
|
+
f"bucket_count: {bucket_count}" if bucket_max_size > 1 else "")
|
|
502
|
+
print(f">> [fast] RTF: {(end_time - start_time) / wav_length:.4f}")
|
|
503
|
+
|
|
504
|
+
# save audio
|
|
505
|
+
wav = wav.cpu() # to cpu
|
|
506
|
+
if output_path:
|
|
507
|
+
# 直接保存音频到指定路径中
|
|
508
|
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
|
509
|
+
torchaudio.save(output_path, wav.type(torch.int16), sampling_rate)
|
|
510
|
+
print(">> wav file saved to:", output_path)
|
|
511
|
+
return output_path
|
|
512
|
+
else:
|
|
513
|
+
# 返回以符合Gradio的格式要求
|
|
514
|
+
wav_data = wav.type(torch.int16)
|
|
515
|
+
wav_data = wav_data.numpy().T
|
|
516
|
+
return (sampling_rate, wav_data)
|
|
517
|
+
|
|
518
|
+
# 原始推理模式
|
|
519
|
+
def infer(self, audio_prompt, text, output_path, verbose=False, max_text_tokens_per_segment=120,
|
|
520
|
+
**generation_kwargs):
|
|
521
|
+
print(">> starting inference...")
|
|
522
|
+
self._set_gr_progress(0, "starting inference...")
|
|
523
|
+
if verbose:
|
|
524
|
+
print(f"origin text:{text}")
|
|
525
|
+
start_time = time.perf_counter()
|
|
526
|
+
|
|
527
|
+
# 如果参考音频改变了,才需要重新生成 cond_mel, 提升速度
|
|
528
|
+
if self.cache_cond_mel is None or self.cache_audio_prompt != audio_prompt:
|
|
529
|
+
audio, sr = torchaudio.load(audio_prompt)
|
|
530
|
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
|
531
|
+
if audio.shape[0] > 1:
|
|
532
|
+
audio = audio[0].unsqueeze(0)
|
|
533
|
+
audio = torchaudio.transforms.Resample(sr, 24000)(audio)
|
|
534
|
+
cond_mel = MelSpectrogramFeatures()(audio).to(self.device)
|
|
535
|
+
cond_mel_frame = cond_mel.shape[-1]
|
|
536
|
+
if verbose:
|
|
537
|
+
print(f"cond_mel shape: {cond_mel.shape}", "dtype:", cond_mel.dtype)
|
|
538
|
+
|
|
539
|
+
self.cache_audio_prompt = audio_prompt
|
|
540
|
+
self.cache_cond_mel = cond_mel
|
|
541
|
+
else:
|
|
542
|
+
cond_mel = self.cache_cond_mel
|
|
543
|
+
cond_mel_frame = cond_mel.shape[-1]
|
|
544
|
+
pass
|
|
545
|
+
|
|
546
|
+
self._set_gr_progress(0.1, "text processing...")
|
|
547
|
+
auto_conditioning = cond_mel
|
|
548
|
+
text_tokens_list = self.tokenizer.tokenize(text)
|
|
549
|
+
segments = self.tokenizer.split_segments(text_tokens_list, max_text_tokens_per_segment)
|
|
550
|
+
if verbose:
|
|
551
|
+
print("text token count:", len(text_tokens_list))
|
|
552
|
+
print("segments count:", len(segments))
|
|
553
|
+
print("max_text_tokens_per_segment:", max_text_tokens_per_segment)
|
|
554
|
+
print(*segments, sep="\n")
|
|
555
|
+
do_sample = generation_kwargs.pop("do_sample", True)
|
|
556
|
+
top_p = generation_kwargs.pop("top_p", 0.8)
|
|
557
|
+
top_k = generation_kwargs.pop("top_k", 30)
|
|
558
|
+
temperature = generation_kwargs.pop("temperature", 1.0)
|
|
559
|
+
autoregressive_batch_size = 1
|
|
560
|
+
length_penalty = generation_kwargs.pop("length_penalty", 0.0)
|
|
561
|
+
num_beams = generation_kwargs.pop("num_beams", 3)
|
|
562
|
+
repetition_penalty = generation_kwargs.pop("repetition_penalty", 10.0)
|
|
563
|
+
max_mel_tokens = generation_kwargs.pop("max_mel_tokens", 600)
|
|
564
|
+
sampling_rate = 24000
|
|
565
|
+
# lang = "EN"
|
|
566
|
+
# lang = "ZH"
|
|
567
|
+
wavs = []
|
|
568
|
+
gpt_gen_time = 0
|
|
569
|
+
gpt_forward_time = 0
|
|
570
|
+
bigvgan_time = 0
|
|
571
|
+
progress = 0
|
|
572
|
+
has_warned = False
|
|
573
|
+
for sent in segments:
|
|
574
|
+
text_tokens = self.tokenizer.convert_tokens_to_ids(sent)
|
|
575
|
+
text_tokens = torch.tensor(text_tokens, dtype=torch.int32, device=self.device).unsqueeze(0)
|
|
576
|
+
# text_tokens = F.pad(text_tokens, (0, 1)) # This may not be necessary.
|
|
577
|
+
# text_tokens = F.pad(text_tokens, (1, 0), value=0)
|
|
578
|
+
# text_tokens = F.pad(text_tokens, (0, 1), value=1)
|
|
579
|
+
if verbose:
|
|
580
|
+
print(text_tokens)
|
|
581
|
+
print(f"text_tokens shape: {text_tokens.shape}, text_tokens type: {text_tokens.dtype}")
|
|
582
|
+
# debug tokenizer
|
|
583
|
+
text_token_syms = self.tokenizer.convert_ids_to_tokens(text_tokens[0].tolist())
|
|
584
|
+
print("text_token_syms is same as segment tokens", text_token_syms == sent)
|
|
585
|
+
|
|
586
|
+
# text_len = torch.IntTensor([text_tokens.size(1)], device=text_tokens.device)
|
|
587
|
+
# print(text_len)
|
|
588
|
+
progress += 1
|
|
589
|
+
self._set_gr_progress(0.2 + 0.4 * (progress - 1) / len(segments),
|
|
590
|
+
f"gpt latents inference {progress}/{len(segments)}...")
|
|
591
|
+
m_start_time = time.perf_counter()
|
|
592
|
+
with torch.no_grad():
|
|
593
|
+
with torch.amp.autocast(text_tokens.device.type, enabled=self.dtype is not None, dtype=self.dtype):
|
|
594
|
+
codes = self.gpt.inference_speech(auto_conditioning, text_tokens,
|
|
595
|
+
cond_mel_lengths=torch.tensor([auto_conditioning.shape[-1]],
|
|
596
|
+
device=text_tokens.device),
|
|
597
|
+
# text_lengths=text_len,
|
|
598
|
+
do_sample=do_sample,
|
|
599
|
+
top_p=top_p,
|
|
600
|
+
top_k=top_k,
|
|
601
|
+
temperature=temperature,
|
|
602
|
+
num_return_sequences=autoregressive_batch_size,
|
|
603
|
+
length_penalty=length_penalty,
|
|
604
|
+
num_beams=num_beams,
|
|
605
|
+
repetition_penalty=repetition_penalty,
|
|
606
|
+
max_generate_length=max_mel_tokens,
|
|
607
|
+
**generation_kwargs)
|
|
608
|
+
gpt_gen_time += time.perf_counter() - m_start_time
|
|
609
|
+
if not has_warned and (codes[:, -1] != self.stop_mel_token).any():
|
|
610
|
+
warnings.warn(
|
|
611
|
+
f"WARN: generation stopped due to exceeding `max_mel_tokens` ({max_mel_tokens}). "
|
|
612
|
+
f"Input text tokens: {text_tokens.shape[1]}. "
|
|
613
|
+
f"Consider reducing `max_text_tokens_per_segment`({max_text_tokens_per_segment}) or increasing `max_mel_tokens`.",
|
|
614
|
+
category=RuntimeWarning
|
|
615
|
+
)
|
|
616
|
+
has_warned = True
|
|
617
|
+
|
|
618
|
+
code_lens = torch.tensor([codes.shape[-1]], device=codes.device, dtype=codes.dtype)
|
|
619
|
+
if verbose:
|
|
620
|
+
print(codes, type(codes))
|
|
621
|
+
print(f"codes shape: {codes.shape}, codes type: {codes.dtype}")
|
|
622
|
+
print(f"code len: {code_lens}")
|
|
623
|
+
|
|
624
|
+
# remove ultra-long silence if exits
|
|
625
|
+
# temporarily fix the long silence bug.
|
|
626
|
+
codes, code_lens = self.remove_long_silence(codes, silent_token=52, max_consecutive=30)
|
|
627
|
+
if verbose:
|
|
628
|
+
print(codes, type(codes))
|
|
629
|
+
print(f"fix codes shape: {codes.shape}, codes type: {codes.dtype}")
|
|
630
|
+
print(f"code len: {code_lens}")
|
|
631
|
+
self._set_gr_progress(0.2 + 0.4 * progress / len(segments),
|
|
632
|
+
f"gpt speech inference {progress}/{len(segments)}...")
|
|
633
|
+
m_start_time = time.perf_counter()
|
|
634
|
+
# latent, text_lens_out, code_lens_out = \
|
|
635
|
+
with torch.amp.autocast(text_tokens.device.type, enabled=self.dtype is not None, dtype=self.dtype):
|
|
636
|
+
latent = \
|
|
637
|
+
self.gpt(auto_conditioning, text_tokens,
|
|
638
|
+
torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), codes,
|
|
639
|
+
code_lens * self.gpt.mel_length_compression,
|
|
640
|
+
cond_mel_lengths=torch.tensor([auto_conditioning.shape[-1]],
|
|
641
|
+
device=text_tokens.device),
|
|
642
|
+
return_latent=True, clip_inputs=False)
|
|
643
|
+
gpt_forward_time += time.perf_counter() - m_start_time
|
|
644
|
+
|
|
645
|
+
m_start_time = time.perf_counter()
|
|
646
|
+
wav, _ = self.bigvgan(latent, auto_conditioning.transpose(1, 2))
|
|
647
|
+
bigvgan_time += time.perf_counter() - m_start_time
|
|
648
|
+
wav = wav.squeeze(1)
|
|
649
|
+
|
|
650
|
+
wav = torch.clamp(32767 * wav, -32767.0, 32767.0)
|
|
651
|
+
if verbose:
|
|
652
|
+
print(f"wav shape: {wav.shape}", "min:", wav.min(), "max:", wav.max())
|
|
653
|
+
# wavs.append(wav[:, :-512])
|
|
654
|
+
wavs.append(wav.cpu()) # to cpu before saving
|
|
655
|
+
end_time = time.perf_counter()
|
|
656
|
+
self._set_gr_progress(0.9, "saving audio...")
|
|
657
|
+
wav = torch.cat(wavs, dim=1)
|
|
658
|
+
wav_length = wav.shape[-1] / sampling_rate
|
|
659
|
+
print(f">> Reference audio length: {cond_mel_frame * 256 / sampling_rate:.2f} seconds")
|
|
660
|
+
print(f">> gpt_gen_time: {gpt_gen_time:.2f} seconds")
|
|
661
|
+
print(f">> gpt_forward_time: {gpt_forward_time:.2f} seconds")
|
|
662
|
+
print(f">> bigvgan_time: {bigvgan_time:.2f} seconds")
|
|
663
|
+
print(f">> Total inference time: {end_time - start_time:.2f} seconds")
|
|
664
|
+
print(f">> Generated audio length: {wav_length:.2f} seconds")
|
|
665
|
+
print(f">> RTF: {(end_time - start_time) / wav_length:.4f}")
|
|
666
|
+
|
|
667
|
+
# save audio
|
|
668
|
+
wav = wav.cpu() # to cpu
|
|
669
|
+
if output_path:
|
|
670
|
+
# 直接保存音频到指定路径中
|
|
671
|
+
if os.path.isfile(output_path):
|
|
672
|
+
os.remove(output_path)
|
|
673
|
+
print(">> remove old wav file:", output_path)
|
|
674
|
+
if os.path.dirname(output_path) != "":
|
|
675
|
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
|
676
|
+
torchaudio.save(output_path, wav.type(torch.int16), sampling_rate)
|
|
677
|
+
print(">> wav file saved to:", output_path)
|
|
678
|
+
return output_path
|
|
679
|
+
else:
|
|
680
|
+
# 返回以符合Gradio的格式要求
|
|
681
|
+
wav_data = wav.type(torch.int16)
|
|
682
|
+
wav_data = wav_data.numpy().T
|
|
683
|
+
return (sampling_rate, wav_data)
|
|
684
|
+
|
|
685
|
+
if __name__ == "__main__":
|
|
686
|
+
prompt_wav = "examples/voice_01.wav"
|
|
687
|
+
text = '欢迎大家来体验indextts2,并给予我们意见与反馈,谢谢大家。'
|
|
688
|
+
|
|
689
|
+
tts = IndexTTS(cfg_path="checkpoints/config.yaml", model_dir="checkpoints", use_cuda_kernel=False)
|
|
690
|
+
tts.infer(audio_prompt=prompt_wav, text=text, output_path="gen.wav", verbose=True)
|