xinference 0.14.1.post1__py3-none-any.whl → 0.14.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +15 -34
- xinference/client/restful/restful_client.py +2 -2
- xinference/core/chat_interface.py +45 -10
- xinference/core/image_interface.py +9 -0
- xinference/core/model.py +8 -5
- xinference/core/scheduler.py +1 -2
- xinference/core/worker.py +49 -42
- xinference/deploy/cmdline.py +2 -2
- xinference/deploy/test/test_cmdline.py +7 -7
- xinference/model/audio/chattts.py +24 -9
- xinference/model/audio/core.py +8 -2
- xinference/model/audio/fish_speech.py +228 -0
- xinference/model/audio/model_spec.json +8 -0
- xinference/model/embedding/core.py +23 -1
- xinference/model/image/model_spec.json +2 -1
- xinference/model/image/model_spec_modelscope.json +2 -1
- xinference/model/image/stable_diffusion/core.py +49 -1
- xinference/model/llm/__init__.py +26 -27
- xinference/model/llm/{ggml/llamacpp.py → llama_cpp/core.py} +2 -35
- xinference/model/llm/llm_family.json +606 -1266
- xinference/model/llm/llm_family.py +16 -139
- xinference/model/llm/llm_family_modelscope.json +276 -313
- xinference/model/llm/lmdeploy/__init__.py +0 -0
- xinference/model/llm/lmdeploy/core.py +557 -0
- xinference/model/llm/memory.py +9 -9
- xinference/model/llm/sglang/core.py +2 -2
- xinference/model/llm/{pytorch → transformers}/chatglm.py +6 -13
- xinference/model/llm/{pytorch → transformers}/cogvlm2.py +4 -45
- xinference/model/llm/transformers/cogvlm2_video.py +524 -0
- xinference/model/llm/{pytorch → transformers}/core.py +3 -10
- xinference/model/llm/{pytorch → transformers}/glm4v.py +2 -23
- xinference/model/llm/transformers/intern_vl.py +540 -0
- xinference/model/llm/{pytorch → transformers}/internlm2.py +4 -8
- xinference/model/llm/{pytorch → transformers}/minicpmv25.py +2 -23
- xinference/model/llm/{pytorch → transformers}/minicpmv26.py +66 -41
- xinference/model/llm/{pytorch → transformers}/utils.py +1 -2
- xinference/model/llm/{pytorch → transformers}/yi_vl.py +2 -24
- xinference/model/llm/utils.py +85 -70
- xinference/model/llm/vllm/core.py +110 -11
- xinference/model/utils.py +1 -95
- xinference/thirdparty/fish_speech/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/callbacks/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/callbacks/grad_norm.py +113 -0
- xinference/thirdparty/fish_speech/fish_speech/configs/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/configs/lora/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/conversation.py +2 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/concat_repeat.py +53 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/text_data_pb2.py +33 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/text_data_stream.py +36 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/semantic.py +496 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/vqgan.py +147 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/core.py +40 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/en_US.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/es_ES.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/ja_JP.json +123 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/pt_BR.json +133 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/zh_CN.json +122 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/scan.py +122 -0
- xinference/thirdparty/fish_speech/fish_speech/models/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/lit_module.py +202 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/llama.py +779 -0
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/lora.py +92 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/__init__.py +3 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/lit_module.py +442 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/discriminator.py +44 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/firefly.py +625 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/fsq.py +139 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/reference.py +115 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/wavenet.py +225 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/utils.py +94 -0
- xinference/thirdparty/fish_speech/fish_speech/scheduler.py +40 -0
- xinference/thirdparty/fish_speech/fish_speech/text/__init__.py +4 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_class.py +172 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_constant.py +30 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/basic_util.py +342 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/cardinal.py +32 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/date.py +75 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/digit.py +32 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/fraction.py +35 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/money.py +43 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/percentage.py +33 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/telephone.py +51 -0
- xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/text.py +177 -0
- xinference/thirdparty/fish_speech/fish_speech/text/clean.py +69 -0
- xinference/thirdparty/fish_speech/fish_speech/text/spliter.py +130 -0
- xinference/thirdparty/fish_speech/fish_speech/train.py +139 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/__init__.py +23 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/braceexpand.py +217 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/context.py +13 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/file.py +16 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/instantiators.py +50 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/logger.py +55 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/logging_utils.py +48 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/rich_utils.py +100 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/spectrogram.py +122 -0
- xinference/thirdparty/fish_speech/fish_speech/utils/utils.py +114 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/launch_utils.py +120 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/manage.py +1237 -0
- xinference/thirdparty/fish_speech/tools/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/api.py +495 -0
- xinference/thirdparty/fish_speech/tools/auto_rerank.py +159 -0
- xinference/thirdparty/fish_speech/tools/download_models.py +55 -0
- xinference/thirdparty/fish_speech/tools/extract_model.py +21 -0
- xinference/thirdparty/fish_speech/tools/file.py +108 -0
- xinference/thirdparty/fish_speech/tools/gen_ref.py +36 -0
- xinference/thirdparty/fish_speech/tools/llama/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/llama/build_dataset.py +169 -0
- xinference/thirdparty/fish_speech/tools/llama/eval_in_context.py +171 -0
- xinference/thirdparty/fish_speech/tools/llama/generate.py +698 -0
- xinference/thirdparty/fish_speech/tools/llama/merge_lora.py +95 -0
- xinference/thirdparty/fish_speech/tools/llama/quantize.py +497 -0
- xinference/thirdparty/fish_speech/tools/llama/rebuild_tokenizer.py +57 -0
- xinference/thirdparty/fish_speech/tools/merge_asr_files.py +55 -0
- xinference/thirdparty/fish_speech/tools/post_api.py +164 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/auto_model.py +573 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/fun_asr.py +332 -0
- xinference/thirdparty/fish_speech/tools/sensevoice/vad_utils.py +61 -0
- xinference/thirdparty/fish_speech/tools/smart_pad.py +47 -0
- xinference/thirdparty/fish_speech/tools/vqgan/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/vqgan/create_train_split.py +83 -0
- xinference/thirdparty/fish_speech/tools/vqgan/extract_vq.py +227 -0
- xinference/thirdparty/fish_speech/tools/vqgan/inference.py +120 -0
- xinference/thirdparty/fish_speech/tools/webui.py +619 -0
- xinference/thirdparty/fish_speech/tools/whisper_asr.py +176 -0
- xinference/thirdparty/internvl/__init__.py +0 -0
- xinference/thirdparty/internvl/conversation.py +393 -0
- xinference/thirdparty/omnilmm/model/utils.py +16 -1
- xinference/web/ui/build/asset-manifest.json +3 -3
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/js/main.661c7b0a.js +3 -0
- xinference/web/ui/build/static/js/{main.17ca0398.js.map → main.661c7b0a.js.map} +1 -1
- xinference/web/ui/node_modules/.cache/babel-loader/070d8c6b3b0f3485c6d3885f0b6bbfdf9643e088a468acbd5d596f2396071c16.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/213b5913e164773c2b0567455377765715f5f07225fbac77ad8e1e9dc9648a47.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/4de9a6942c5f1749d6cbfdd54279699975f16016b182848bc253886f52ec2ec3.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/5391543180fead1eeef5364300301498d58a7d91d62de3841a32768b67f4552f.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/5c26a23b5eacf5b752a08531577ae3840bb247745ef9a39583dc2d05ba93a82a.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/714c37ce0ec5b5c591033f02be2f3f491fdd70da3ef568ee4a4f94689a3d5ca2.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/822586ed1077201b64b954f12f25e3f9b45678c1acbabe53d8af3ca82ca71f33.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/978b57d1a04a701bc3fcfebc511f5f274eed6ed7eade67f6fb76c27d5fd9ecc8.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/a797831de0dc74897f4b50b3426555d748f328b4c2cc391de709eadaf6a5f3e3.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/bd6ad8159341315a1764c397621a560809f7eb7219ab5174c801fca7e969d943.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/e64b7e8cedcf43d4c95deba60ec1341855c887705805bb62431693118b870c69.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/e91938976f229ce986b2907e51e1f00540b584ced0a315d498c172d13220739d.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/f72f011744c4649fabddca6f7a9327861ac0a315a89b1a2e62a39774e7863845.json +1 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/METADATA +22 -13
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/RECORD +170 -79
- xinference/locale/utils.py +0 -39
- xinference/locale/zh_CN.json +0 -26
- xinference/model/llm/ggml/tools/__init__.py +0 -15
- xinference/model/llm/ggml/tools/convert_ggml_to_gguf.py +0 -498
- xinference/model/llm/ggml/tools/gguf.py +0 -884
- xinference/model/llm/pytorch/__init__.py +0 -13
- xinference/model/llm/pytorch/baichuan.py +0 -81
- xinference/model/llm/pytorch/falcon.py +0 -138
- xinference/model/llm/pytorch/intern_vl.py +0 -352
- xinference/model/llm/pytorch/vicuna.py +0 -69
- xinference/web/ui/build/static/js/main.17ca0398.js +0 -3
- xinference/web/ui/node_modules/.cache/babel-loader/1444c41a4d04494f1cbc2d8c1537df107b451cb569cb2c1fbf5159f3a4841a5f.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/2f40209b32e7e46a2eab6b8c8a355eb42c3caa8bc3228dd929f32fd2b3940294.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/44774c783428f952d8e2e4ad0998a9c5bc16a57cd9c68b7c5ff18aaa5a41d65c.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/5262556baf9207738bf6a8ba141ec6599d0a636345c245d61fdf88d3171998cb.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/6450605fac003812485f6251b9f0caafbf2e5bfc3bbe2f000050d9e2fdb8dcd3.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/71684495d995c7e266eecc6a0ad8ea0284cc785f80abddf863789c57a6134969.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/80acd1edf31542ab1dcccfad02cb4b38f3325cff847a781fcce97500cfd6f878.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/8a9742ddd8ba8546ef42dc14caca443f2b4524fabed7bf269e0eff3b7b64ee7d.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/d06a96a3c9c32e42689094aa3aaad41c8125894e956b8f84a70fadce6e3f65b3.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/d93730e2b5d7e8c957b4d0965d2ed1dac9045a649adbd47c220d11f255d4b1e0.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e656dc00b4d8b387f0a81ba8fc558767df1601c66369e2eb86a5ef27cf080572.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f28b83886159d83b84f099b05d607a822dca4dd7f2d8aa6d56fe08bab0b5b086.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f3e02274cb1964e99b1fe69cbb6db233d3d8d7dd05d50ebcdb8e66d50b224b7b.json +0 -1
- /xinference/{locale → model/llm/llama_cpp}/__init__.py +0 -0
- /xinference/model/llm/{ggml → transformers}/__init__.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/compression.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/deepseek_vl.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/llama_2.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/omnilmm.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/qwen_vl.py +0 -0
- /xinference/model/llm/{pytorch → transformers}/tensorizer_utils.py +0 -0
- /xinference/web/ui/build/static/js/{main.17ca0398.js.LICENSE.txt → main.661c7b0a.js.LICENSE.txt} +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/LICENSE +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/WHEEL +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/entry_points.txt +0 -0
- {xinference-0.14.1.post1.dist-info → xinference-0.14.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
{
|
|
2
|
+
"16-mixed is recommended for 10+ series GPU": "16-mixed is recommended for 10+ series GPU",
|
|
3
|
+
"5 to 10 seconds of reference audio, useful for specifying speaker.": "5 to 10 seconds of reference audio, useful for specifying speaker.",
|
|
4
|
+
"A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).": "A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).",
|
|
5
|
+
"Accumulate Gradient Batches": "Accumulate Gradient Batches",
|
|
6
|
+
"Add to Processing Area": "Add to Processing Area",
|
|
7
|
+
"Added path successfully!": "Added path successfully!",
|
|
8
|
+
"Advanced Config": "Advanced Config",
|
|
9
|
+
"Base LLAMA Model": "Base LLAMA Model",
|
|
10
|
+
"Batch Inference": "Batch Inference",
|
|
11
|
+
"Batch Size": "Batch Size",
|
|
12
|
+
"Changing with the Model Path": "Changing with the Model Path",
|
|
13
|
+
"Chinese": "Chinese",
|
|
14
|
+
"Compile Model": "Compile Model",
|
|
15
|
+
"Compile the model can significantly reduce the inference time, but will increase cold start time": "Compile the model can significantly reduce the inference time, but will increase cold start time",
|
|
16
|
+
"Copy": "Copy",
|
|
17
|
+
"Data Preprocessing": "Data Preprocessing",
|
|
18
|
+
"Data Preprocessing Path": "Data Preprocessing Path",
|
|
19
|
+
"Data Source": "Data Source",
|
|
20
|
+
"Decoder Model Config": "Decoder Model Config",
|
|
21
|
+
"Decoder Model Path": "Decoder Model Path",
|
|
22
|
+
"Disabled": "Disabled",
|
|
23
|
+
"Enable Reference Audio": "Enable Reference Audio",
|
|
24
|
+
"English": "English",
|
|
25
|
+
"Error Message": "Error Message",
|
|
26
|
+
"File Preprocessing": "File Preprocessing",
|
|
27
|
+
"Generate": "Generate",
|
|
28
|
+
"Generated Audio": "Generated Audio",
|
|
29
|
+
"If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format": "If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format",
|
|
30
|
+
"Infer interface is closed": "Infer interface is closed",
|
|
31
|
+
"Inference Configuration": "Inference Configuration",
|
|
32
|
+
"Inference Server Configuration": "Inference Server Configuration",
|
|
33
|
+
"Inference Server Error": "Inference Server Error",
|
|
34
|
+
"Inferring interface is launched at {}": "Inferring interface is launched at {}",
|
|
35
|
+
"Initial Learning Rate": "Initial Learning Rate",
|
|
36
|
+
"Input Audio & Source Path for Transcription": "Input Audio & Source Path for Transcription",
|
|
37
|
+
"Input Text": "Input Text",
|
|
38
|
+
"Invalid path: {}": "Invalid path: {}",
|
|
39
|
+
"It is recommended to use CUDA, if you have low configuration, use CPU": "It is recommended to use CUDA, if you have low configuration, use CPU",
|
|
40
|
+
"Iterative Prompt Length, 0 means off": "Iterative Prompt Length, 0 means off",
|
|
41
|
+
"Japanese": "Japanese",
|
|
42
|
+
"LLAMA Configuration": "LLAMA Configuration",
|
|
43
|
+
"LLAMA Model Config": "LLAMA Model Config",
|
|
44
|
+
"LLAMA Model Path": "LLAMA Model Path",
|
|
45
|
+
"Labeling Device": "Labeling Device",
|
|
46
|
+
"LoRA Model to be merged": "LoRA Model to be merged",
|
|
47
|
+
"Maximum Audio Duration": "Maximum Audio Duration",
|
|
48
|
+
"Maximum Length per Sample": "Maximum Length per Sample",
|
|
49
|
+
"Maximum Training Steps": "Maximum Training Steps",
|
|
50
|
+
"Maximum tokens per batch, 0 means no limit": "Maximum tokens per batch, 0 means no limit",
|
|
51
|
+
"Merge": "Merge",
|
|
52
|
+
"Merge LoRA": "Merge LoRA",
|
|
53
|
+
"Merge successfully": "Merge successfully",
|
|
54
|
+
"Minimum Audio Duration": "Minimum Audio Duration",
|
|
55
|
+
"Model Output Path": "Model Output Path",
|
|
56
|
+
"Model Size": "Model Size",
|
|
57
|
+
"Move": "Move",
|
|
58
|
+
"Move files successfully": "Move files successfully",
|
|
59
|
+
"No audio generated, please check the input text.": "No audio generated, please check the input text.",
|
|
60
|
+
"No selected options": "No selected options",
|
|
61
|
+
"Number of Workers": "Number of Workers",
|
|
62
|
+
"Open Inference Server": "Open Inference Server",
|
|
63
|
+
"Open Labeler WebUI": "Open Labeler WebUI",
|
|
64
|
+
"Open Tensorboard": "Open Tensorboard",
|
|
65
|
+
"Opened labeler in browser": "Opened labeler in browser",
|
|
66
|
+
"Optional Label Language": "Optional Label Language",
|
|
67
|
+
"Optional online ver": "Optional online ver",
|
|
68
|
+
"Output Path": "Output Path",
|
|
69
|
+
"Path error, please check the model file exists in the corresponding path": "Path error, please check the model file exists in the corresponding path",
|
|
70
|
+
"Precision": "Precision",
|
|
71
|
+
"Probability of applying Speaker Condition": "Probability of applying Speaker Condition",
|
|
72
|
+
"Put your text here.": "Put your text here.",
|
|
73
|
+
"Reference Audio": "Reference Audio",
|
|
74
|
+
"Reference Text": "Reference Text",
|
|
75
|
+
"Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.": "Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.",
|
|
76
|
+
"Remove Selected Data": "Remove Selected Data",
|
|
77
|
+
"Removed path successfully!": "Removed path successfully!",
|
|
78
|
+
"Repetition Penalty": "Repetition Penalty",
|
|
79
|
+
"Save model every n steps": "Save model every n steps",
|
|
80
|
+
"Select LLAMA ckpt": "Select LLAMA ckpt",
|
|
81
|
+
"Select VITS ckpt": "Select VITS ckpt",
|
|
82
|
+
"Select VQGAN ckpt": "Select VQGAN ckpt",
|
|
83
|
+
"Select source file processing method": "Select source file processing method",
|
|
84
|
+
"Select the model to be trained (Depending on the Tab page you are on)": "Select the model to be trained (Depending on the Tab page you are on)",
|
|
85
|
+
"Selected: {}": "Selected: {}",
|
|
86
|
+
"Speaker": "Speaker",
|
|
87
|
+
"Speaker is identified by the folder name": "Speaker is identified by the folder name",
|
|
88
|
+
"Start Training": "Start Training",
|
|
89
|
+
"Streaming Audio": "Streaming Audio",
|
|
90
|
+
"Streaming Generate": "Streaming Generate",
|
|
91
|
+
"Tensorboard Host": "Tensorboard Host",
|
|
92
|
+
"Tensorboard Log Path": "Tensorboard Log Path",
|
|
93
|
+
"Tensorboard Port": "Tensorboard Port",
|
|
94
|
+
"Tensorboard interface is closed": "Tensorboard interface is closed",
|
|
95
|
+
"Tensorboard interface is launched at {}": "Tensorboard interface is launched at {}",
|
|
96
|
+
"Text is too long, please keep it under {} characters.": "Text is too long, please keep it under {} characters.",
|
|
97
|
+
"The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.": "The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.",
|
|
98
|
+
"Training Configuration": "Training Configuration",
|
|
99
|
+
"Training Error": "Training Error",
|
|
100
|
+
"Training stopped": "Training stopped",
|
|
101
|
+
"Type name of the speaker": "Type name of the speaker",
|
|
102
|
+
"Type the path or select from the dropdown": "Type the path or select from the dropdown",
|
|
103
|
+
"Use LoRA": "Use LoRA",
|
|
104
|
+
"Use LoRA can save GPU memory, but may reduce the quality of the model": "Use LoRA can save GPU memory, but may reduce the quality of the model",
|
|
105
|
+
"Use filelist": "Use filelist",
|
|
106
|
+
"Use large for 10G+ GPU, medium for 5G, small for 2G": "Use large for 10G+ GPU, medium for 5G, small for 2G",
|
|
107
|
+
"VITS Configuration": "VITS Configuration",
|
|
108
|
+
"VQGAN Configuration": "VQGAN Configuration",
|
|
109
|
+
"Validation Batch Size": "Validation Batch Size",
|
|
110
|
+
"View the status of the preprocessing folder (use the slider to control the depth of the tree)": "View the status of the preprocessing folder (use the slider to control the depth of the tree)",
|
|
111
|
+
"We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.": "We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.",
|
|
112
|
+
"WebUI Host": "WebUI Host",
|
|
113
|
+
"WebUI Port": "WebUI Port",
|
|
114
|
+
"Whisper Model": "Whisper Model",
|
|
115
|
+
"You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).": "You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).",
|
|
116
|
+
"bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU": "bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU",
|
|
117
|
+
"latest": "latest",
|
|
118
|
+
"new": "new",
|
|
119
|
+
"Realtime Transform Text": "Realtime Transform Text",
|
|
120
|
+
"Normalization Result Preview (Currently Only Chinese)": "Normalization Result Preview (Currently Only Chinese)",
|
|
121
|
+
"Text Normalization": "Text Normalization"
|
|
122
|
+
}
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
{
|
|
2
|
+
"16-mixed is recommended for 10+ series GPU": "se recomienda 16-mixed para GPU de la serie 10+",
|
|
3
|
+
"5 to 10 seconds of reference audio, useful for specifying speaker.": "5 a 10 segundos de audio de referencia, útil para especificar el hablante.",
|
|
4
|
+
"A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).": "Un modelo de texto a voz basado en VQ-GAN y Llama desarrollado por [Fish Audio](https://fish.audio).",
|
|
5
|
+
"Accumulate Gradient Batches": "Acumular lotes de gradientes",
|
|
6
|
+
"Add to Processing Area": "Agregar al Área de Procesamiento",
|
|
7
|
+
"Added path successfully!": "¡Ruta agregada exitosamente!",
|
|
8
|
+
"Advanced Config": "Configuración Avanzada",
|
|
9
|
+
"Base LLAMA Model": "Modelo Base LLAMA",
|
|
10
|
+
"Batch Inference": "Inferencia por Lote",
|
|
11
|
+
"Batch Size": "Tamaño del Lote",
|
|
12
|
+
"Changing with the Model Path": "Cambiando con la Ruta del Modelo",
|
|
13
|
+
"Chinese": "Chino",
|
|
14
|
+
"Compile Model": "Compilar Modelo",
|
|
15
|
+
"Compile the model can significantly reduce the inference time, but will increase cold start time": "Compilar el modelo puede reducir significativamente el tiempo de inferencia, pero aumentará el tiempo de inicio en frío",
|
|
16
|
+
"Copy": "Copiar",
|
|
17
|
+
"Data Preprocessing": "Preprocesamiento de Datos",
|
|
18
|
+
"Data Preprocessing Path": "Ruta de Preprocesamiento de Datos",
|
|
19
|
+
"Data Source": "Fuente de Datos",
|
|
20
|
+
"Decoder Model Config": "Configuración del modelo decodificador",
|
|
21
|
+
"Decoder Model Path": "Ruta del modelo decodificador",
|
|
22
|
+
"Disabled": "Desactivado",
|
|
23
|
+
"Enable Reference Audio": "Habilitar Audio de Referencia",
|
|
24
|
+
"English": "Inglés",
|
|
25
|
+
"Error Message": "Mensaje de Error",
|
|
26
|
+
"File Preprocessing": "Preprocesamiento de Archivos",
|
|
27
|
+
"Generate": "Generar",
|
|
28
|
+
"Generated Audio": "Audio Generado",
|
|
29
|
+
"If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format": "Si no hay texto correspondiente para el audio, aplique ASR para asistencia, soporte para formato .txt o .lab",
|
|
30
|
+
"Infer interface is closed": "La interfaz de inferencia está cerrada",
|
|
31
|
+
"Inference Configuration": "Configuración de Inferencia",
|
|
32
|
+
"Inference Server Configuration": "Configuración del Servidor de Inferencia",
|
|
33
|
+
"Inference Server Error": "Error del Servidor de Inferencia",
|
|
34
|
+
"Inferring interface is launched at {}": "La interfaz de inferencia se ha lanzado en {}",
|
|
35
|
+
"Initial Learning Rate": "Tasa de Aprendizaje Inicial",
|
|
36
|
+
"Input Audio & Source Path for Transcription": "Audio de Entrada y Ruta de Origen para Transcripción",
|
|
37
|
+
"Input Text": "Texto de Entrada",
|
|
38
|
+
"Invalid path: {}": "Ruta inválida: {}",
|
|
39
|
+
"It is recommended to use CUDA, if you have low configuration, use CPU": "Se recomienda usar CUDA, si tiene una configuración baja, use CPU",
|
|
40
|
+
"Iterative Prompt Length, 0 means off": "Longitud de la Indicación Iterativa, 0 significa apagado",
|
|
41
|
+
"Japanese": "Japonés",
|
|
42
|
+
"LLAMA Configuration": "Configuración de LLAMA",
|
|
43
|
+
"LLAMA Model Config": "Configuración del Modelo LLAMA",
|
|
44
|
+
"LLAMA Model Path": "Ruta del Modelo LLAMA",
|
|
45
|
+
"Labeling Device": "Dispositivo de Etiquetado",
|
|
46
|
+
"LoRA Model to be merged": "Modelo LoRA a fusionar",
|
|
47
|
+
"Maximum Audio Duration": "Duración máxima de audio",
|
|
48
|
+
"Maximum Length per Sample": "Longitud Máxima por Muestra",
|
|
49
|
+
"Maximum Training Steps": "Pasos Máximos de Entrenamiento",
|
|
50
|
+
"Maximum tokens per batch, 0 means no limit": "Máximo de tokens por lote, 0 significa sin límite",
|
|
51
|
+
"Merge": "Fusionar",
|
|
52
|
+
"Merge LoRA": "Fusionar LoRA",
|
|
53
|
+
"Merge successfully": "Fusionado exitosamente",
|
|
54
|
+
"Minimum Audio Duration": "Duración mínima de audio",
|
|
55
|
+
"Model Output Path": "Ruta de Salida del Modelo",
|
|
56
|
+
"Model Size": "Tamaño del Modelo",
|
|
57
|
+
"Move": "Mover",
|
|
58
|
+
"Move files successfully": "Archivos movidos exitosamente",
|
|
59
|
+
"No audio generated, please check the input text.": "No se generó audio, por favor verifique el texto de entrada.",
|
|
60
|
+
"No selected options": "No hay opciones seleccionadas",
|
|
61
|
+
"Number of Workers": "Número de Trabajadores",
|
|
62
|
+
"Open Inference Server": "Abrir Servidor de Inferencia",
|
|
63
|
+
"Open Labeler WebUI": "Abrir Interfaz Web del Etiquetador",
|
|
64
|
+
"Open Tensorboard": "Abrir Tensorboard",
|
|
65
|
+
"Opened labeler in browser": "Se abrió el etiquetador en el navegador",
|
|
66
|
+
"Optional Label Language": "Idioma de Etiquetado Opcional",
|
|
67
|
+
"Optional online ver": "Ver en línea opcional",
|
|
68
|
+
"Output Path": "Ruta de Salida",
|
|
69
|
+
"Path error, please check the model file exists in the corresponding path": "Error de ruta, por favor verifique que el archivo del modelo exista en la ruta correspondiente",
|
|
70
|
+
"Precision": "Precisión",
|
|
71
|
+
"Probability of applying Speaker Condition": "Probabilidad de aplicar Condición de Hablante",
|
|
72
|
+
"Put your text here.": "Ponga su texto aquí.",
|
|
73
|
+
"Reference Audio": "Audio de Referencia",
|
|
74
|
+
"Reference Text": "Texto de Referencia",
|
|
75
|
+
"Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.": "El código relacionado se publica bajo la Licencia BSD-3-Clause, y los pesos se publican bajo la Licencia CC BY-NC-SA 4.0.",
|
|
76
|
+
"Remove Selected Data": "Eliminar Datos Seleccionados",
|
|
77
|
+
"Removed path successfully!": "¡Ruta eliminada exitosamente!",
|
|
78
|
+
"Repetition Penalty": "Penalización por Repetición",
|
|
79
|
+
"Save model every n steps": "Guardar modelo cada n pasos",
|
|
80
|
+
"Select LLAMA ckpt": "Seleccionar punto de control LLAMA",
|
|
81
|
+
"Select VITS ckpt": "Seleccionar punto de control VITS",
|
|
82
|
+
"Select VQGAN ckpt": "Seleccionar punto de control VQGAN",
|
|
83
|
+
"Select source file processing method": "Seleccione el método de procesamiento de archivos fuente",
|
|
84
|
+
"Select the model to be trained (Depending on the Tab page you are on)": "Seleccione el modelo a entrenar (Dependiendo de la pestaña en la que se encuentre)",
|
|
85
|
+
"Selected: {}": "Seleccionado: {}",
|
|
86
|
+
"Speaker": "Hablante",
|
|
87
|
+
"Speaker is identified by the folder name": "El hablante se identifica por el nombre de la carpeta",
|
|
88
|
+
"Start Training": "Iniciar Entrenamiento",
|
|
89
|
+
"Streaming Audio": "transmisión de audio",
|
|
90
|
+
"Streaming Generate": "síntesis en flujo",
|
|
91
|
+
"Tensorboard Host": "Host de Tensorboard",
|
|
92
|
+
"Tensorboard Log Path": "Ruta de Registro de Tensorboard",
|
|
93
|
+
"Tensorboard Port": "Puerto de Tensorboard",
|
|
94
|
+
"Tensorboard interface is closed": "La interfaz de Tensorboard está cerrada",
|
|
95
|
+
"Tensorboard interface is launched at {}": "La interfaz de Tensorboard se ha lanzado en {}",
|
|
96
|
+
"Text is too long, please keep it under {} characters.": "El texto es demasiado largo, por favor manténgalo por debajo de {} caracteres.",
|
|
97
|
+
"The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.": "La ruta de la carpeta de entrada a la izquierda o la lista de archivos. Ya sea que esté marcado o no, se utilizará para el entrenamiento posterior en esta lista.",
|
|
98
|
+
"Training Configuration": "Configuración de Entrenamiento",
|
|
99
|
+
"Training Error": "Error de Entrenamiento",
|
|
100
|
+
"Training stopped": "Entrenamiento detenido",
|
|
101
|
+
"Type name of the speaker": "Escriba el nombre del hablante",
|
|
102
|
+
"Type the path or select from the dropdown": "Escriba la ruta o seleccione de la lista desplegable",
|
|
103
|
+
"Use LoRA": "Usar LoRA",
|
|
104
|
+
"Use LoRA can save GPU memory, but may reduce the quality of the model": "Usar LoRA puede ahorrar memoria GPU, pero puede reducir la calidad del modelo",
|
|
105
|
+
"Use filelist": "Usar lista de archivos",
|
|
106
|
+
"Use large for 10G+ GPU, medium for 5G, small for 2G": "Use grande para GPU de 10G+, mediano para 5G, pequeño para 2G",
|
|
107
|
+
"VITS Configuration": "Configuración de VITS",
|
|
108
|
+
"VQGAN Configuration": "Configuración de VQGAN",
|
|
109
|
+
"Validation Batch Size": "Tamaño del Lote de Validación",
|
|
110
|
+
"View the status of the preprocessing folder (use the slider to control the depth of the tree)": "Vea el estado de la carpeta de preprocesamiento (use el control deslizante para controlar la profundidad del árbol)",
|
|
111
|
+
"We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.": "No somos responsables de ningún mal uso del modelo, por favor considere sus leyes y regulaciones locales antes de usarlo.",
|
|
112
|
+
"WebUI Host": "Host de WebUI",
|
|
113
|
+
"WebUI Port": "Puerto de WebUI",
|
|
114
|
+
"Whisper Model": "Modelo Whisper",
|
|
115
|
+
"You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).": "Puede encontrar el código fuente [aquí](https://github.com/fishaudio/fish-speech) y los modelos [aquí](https://huggingface.co/fishaudio/fish-speech-1).",
|
|
116
|
+
"bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU": "Se recomienda bf16-true para GPU de la serie 30+, se recomienda 16-mixed para GPU de la serie 10+",
|
|
117
|
+
"latest": "más reciente",
|
|
118
|
+
"new": "nuevo",
|
|
119
|
+
"Realtime Transform Text": "Transformación de Texto en Tiempo Real",
|
|
120
|
+
"Normalization Result Preview (Currently Only Chinese)": "Vista Previa del Resultado de Normalización (Actualmente Solo Chino)",
|
|
121
|
+
"Text Normalization": "Normalización de Texto"
|
|
122
|
+
}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
{
|
|
2
|
+
"16-mixed is recommended for 10+ series GPU": "10シリーズ以降のGPUには16-mixedをお勧めします",
|
|
3
|
+
"5 to 10 seconds of reference audio, useful for specifying speaker.": "話者を指定するのに役立つ、5~10秒のリファレンスオーディオ。",
|
|
4
|
+
"A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).": "[Fish Audio](https://fish.audio)が開発したVQ-GANとLlamaに基づくテキスト音声合成モデル。",
|
|
5
|
+
"Accumulate Gradient Batches": "勾配バッチの累積",
|
|
6
|
+
"Add to Processing Area": "処理エリアに追加",
|
|
7
|
+
"Added path successfully!": "パスの追加に成功しました!",
|
|
8
|
+
"Advanced Config": "詳細設定",
|
|
9
|
+
"Base LLAMA Model": "基本LLAMAモデル",
|
|
10
|
+
"Batch Inference": "バッチ推論",
|
|
11
|
+
"Batch Size": "バッチサイズ",
|
|
12
|
+
"Changing with the Model Path": "モデルのパスに伴って変化する",
|
|
13
|
+
"Chinese": "中国語",
|
|
14
|
+
"Compile Model": "モデルのコンパイル",
|
|
15
|
+
"Compile the model can significantly reduce the inference time, but will increase cold start time": "モデルをコンパイルすると推論時間を大幅に短縮できますが、コールドスタート時間が長くなります",
|
|
16
|
+
"Copy": "コピー",
|
|
17
|
+
"Data Preprocessing": "データ前処理",
|
|
18
|
+
"Data Preprocessing Path": "データ前処理パス",
|
|
19
|
+
"Data Source": "データソース",
|
|
20
|
+
"Decoder Model Config": "デコーダーモデルの構成",
|
|
21
|
+
"Decoder Model Path": "デコーダーモデルのパス",
|
|
22
|
+
"Disabled": "無効",
|
|
23
|
+
"Enable Reference Audio": "リファレンスオーディオを有効にする",
|
|
24
|
+
"English": "英語",
|
|
25
|
+
"Error Message": "エラーメッセージ",
|
|
26
|
+
"File Preprocessing": "文書前处理",
|
|
27
|
+
"Generate": "生成",
|
|
28
|
+
"Generated Audio": "生成されたオーディオ",
|
|
29
|
+
"If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format": "音声に対応するテキストがない場合は、ASRを適用してサポートします。.txtまたは.lab形式をサポートしています",
|
|
30
|
+
"Infer interface is closed": "推論インターフェースが閉じられています",
|
|
31
|
+
"Inference Configuration": "推論設定",
|
|
32
|
+
"Inference Server Configuration": "推論サーバー設定",
|
|
33
|
+
"Inference Server Error": "推論サーバーエラー",
|
|
34
|
+
"Inferring interface is launched at {}": "推論インターフェースが{}で起動しました",
|
|
35
|
+
"Initial Learning Rate": "初期学習率",
|
|
36
|
+
"Input Audio & Source Path for Transcription": "入力オーディオと文字起こしのソースパス",
|
|
37
|
+
"Input Text": "入力テキスト",
|
|
38
|
+
"Invalid path: {}": "無効なパス: {}",
|
|
39
|
+
"It is recommended to use CUDA, if you have low configuration, use CPU": "CUDAの使用をお勧めします。低い構成の場合はCPUを使用してください",
|
|
40
|
+
"Iterative Prompt Length, 0 means off": "反復プロンプト長。0はオフを意味します",
|
|
41
|
+
"Japanese": "日本語",
|
|
42
|
+
"LLAMA Configuration": "LLAMA設定",
|
|
43
|
+
"LLAMA Model Config": "LLAMAモデル設定",
|
|
44
|
+
"LLAMA Model Path": "LLAMAモデルパス",
|
|
45
|
+
"Labeling Device": "ラベリングデバイス",
|
|
46
|
+
"LoRA Model to be merged": "マージするLoRAモデル",
|
|
47
|
+
"Maximum Audio Duration": "最大オーディオの長さ",
|
|
48
|
+
"Maximum Length per Sample": "サンプルあたりの最大長",
|
|
49
|
+
"Maximum Training Steps": "最大トレーニングステップ数",
|
|
50
|
+
"Maximum tokens per batch, 0 means no limit": "バッチあたりの最大トークン数。0は制限なしを意味します",
|
|
51
|
+
"Merge": "マージ",
|
|
52
|
+
"Merge LoRA": "LoRAのマージ",
|
|
53
|
+
"Merge successfully": "マージに成功しました",
|
|
54
|
+
"Minimum Audio Duration": "最小オーディオの長さ",
|
|
55
|
+
"Model Output Path": "モデル出力パス",
|
|
56
|
+
"Model Size": "モデルサイズ",
|
|
57
|
+
"Move": "移動",
|
|
58
|
+
"Move files successfully": "ファイルの移動に成功しました",
|
|
59
|
+
"No audio generated, please check the input text.": "オーディオが生成されていません。入力テキストを確認してください。",
|
|
60
|
+
"No selected options": "選択されたオプションはありません",
|
|
61
|
+
"Number of Workers": "ワーカー数",
|
|
62
|
+
"Open Inference Server": "推論サーバーを開く",
|
|
63
|
+
"Open Labeler WebUI": "ラベラーWebUIを開く",
|
|
64
|
+
"Open Tensorboard": "Tensorboardを開く",
|
|
65
|
+
"Opened labeler in browser": "ブラウザでラベラーを開きました",
|
|
66
|
+
"Optional Label Language": "オプションのラベル言語",
|
|
67
|
+
"Optional online ver": "オプションのオンラインバージョン",
|
|
68
|
+
"Output Path": "出力パス",
|
|
69
|
+
"Path error, please check the model file exists in the corresponding path": "パスエラー。対応するパスにモデルファイルが存在するか確認してください",
|
|
70
|
+
"Precision": "精度",
|
|
71
|
+
"Probability of applying Speaker Condition": "話者条件を適用する確率",
|
|
72
|
+
"Put your text here.": "ここにテキストを入力してください。",
|
|
73
|
+
"Reference Audio": "リファレンスオーディオ",
|
|
74
|
+
"Reference Text": "リファレンステキスト",
|
|
75
|
+
"Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.": "関連コードはBSD-3-Clauseライセンスの下でリリースされ、重みはCC BY-NC-SA 4.0ライセンスの下でリリースされます。",
|
|
76
|
+
"Remove Selected Data": "選択したデータを削除",
|
|
77
|
+
"Removed path successfully!": "パスの削除に成功しました!",
|
|
78
|
+
"Repetition Penalty": "反復ペナルティ",
|
|
79
|
+
"Save model every n steps": "nステップごとにモデルを保存",
|
|
80
|
+
"Select LLAMA ckpt": " LLAMA チェックポイントを選択",
|
|
81
|
+
"Select VITS ckpt": "VITS チェックポイントを選択",
|
|
82
|
+
"Select VQGAN ckpt": "VQGAN チェックポイントを選択",
|
|
83
|
+
"Select source file processing method": "ソースファイルの処理方法を選択",
|
|
84
|
+
"Select the model to be trained (Depending on the Tab page you are on)": "タブページに応じてトレーニングするモデルを選択してください",
|
|
85
|
+
"Selected: {}": "選択済み: {}",
|
|
86
|
+
"Speaker": "話者",
|
|
87
|
+
"Speaker is identified by the folder name": "話者はフォルダ名で識別されます",
|
|
88
|
+
"Start Training": "トレーニング開始",
|
|
89
|
+
"Streaming Audio": "ストリーミングオーディオ",
|
|
90
|
+
"Streaming Generate": "ストリーミング合成",
|
|
91
|
+
"Tensorboard Host": "Tensorboardホスト",
|
|
92
|
+
"Tensorboard Log Path": "Tensorboardログパス",
|
|
93
|
+
"Tensorboard Port": "Tensorboardポート",
|
|
94
|
+
"Tensorboard interface is closed": "Tensorboardインターフェースが閉じられています",
|
|
95
|
+
"Tensorboard interface is launched at {}": "Tensorboardインターフェースが{}で起動されました",
|
|
96
|
+
"Text is too long, please keep it under {} characters.": "テキストが長すぎます。{}文字以内に抑えてください。",
|
|
97
|
+
"The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.": "左側の入力フォルダまたはファイルリストのパス。チェックの有無にかかわらず、このリストの後続のトレーニングに使用されます。",
|
|
98
|
+
"Training Configuration": "トレーニング設定",
|
|
99
|
+
"Training Error": "トレーニングエラー",
|
|
100
|
+
"Training stopped": "トレーニングが停止しました",
|
|
101
|
+
"Type name of the speaker": "話者の名前を入力",
|
|
102
|
+
"Type the path or select from the dropdown": "パスを入力するか、ドロップダウンから選択してください",
|
|
103
|
+
"Use LoRA": "LoRAを使用",
|
|
104
|
+
"Use LoRA can save GPU memory, but may reduce the quality of the model": "LoRAを使用するとGPUメモリを節約できますが、モデルの品質が低下する可能性があります",
|
|
105
|
+
"Use filelist": "ファイルリストを使用",
|
|
106
|
+
"Use large for 10G+ GPU, medium for 5G, small for 2G": "10G以上のGPUには大、5Gには中、2Gには小を使用してください",
|
|
107
|
+
"VITS Configuration": "VITS の構成",
|
|
108
|
+
"VQGAN Configuration": "VQGAN の構成",
|
|
109
|
+
"Validation Batch Size": "検証バッチサイズ",
|
|
110
|
+
"View the status of the preprocessing folder (use the slider to control the depth of the tree)": "前処理フォルダの状態を表示(スライダーを使用してツリーの深さを制御)",
|
|
111
|
+
"We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.": "モデルの誤用については一切責任を負いません。使用する前に、現地の法律と規制を考慮してください。",
|
|
112
|
+
"WebUI Host": "WebUIホスト",
|
|
113
|
+
"WebUI Port": "WebUIポート",
|
|
114
|
+
"Whisper Model": "Whisperモデル",
|
|
115
|
+
"You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).": "ソースコードは[こちら](https://github.com/fishaudio/fish-speech)、モデルは[こちら](https://huggingface.co/fishaudio/fish-speech-1)にあります。",
|
|
116
|
+
"bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU": "30シリーズ以降のGPUにはbf16-trueを、10シリーズ以降のGPUには16-mixedをお勧めします",
|
|
117
|
+
"latest": "最新",
|
|
118
|
+
"new": "新規",
|
|
119
|
+
"Realtime Transform Text": "リアルタイム変換テキスト",
|
|
120
|
+
"Normalization Result Preview (Currently Only Chinese)": "正規化結果プレビュー(現在は中国語のみ)",
|
|
121
|
+
"Text Normalization": "テキスト正規化"
|
|
122
|
+
|
|
123
|
+
}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
{
|
|
2
|
+
"5 to 10 seconds of reference audio, useful for specifying speaker.": "5 a 10 segundos de áudio de referência, útil para especificar o orador.",
|
|
3
|
+
"A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).": "Um modelo de texto para fala baseado em VQ-GAN e Llama desenvolvido por [Fish Audio](https://fish.audio).",
|
|
4
|
+
"Accumulate Gradient Batches": "Acumular Lotes de Gradiente",
|
|
5
|
+
"Add to Processing Area": "Adicionar à Área de Processamento",
|
|
6
|
+
"Added path successfully!": "Caminho adicionado com sucesso!",
|
|
7
|
+
"Advanced Config": "Configuração Avançada",
|
|
8
|
+
"Base LLAMA Model": "Modelo LLAMA Base",
|
|
9
|
+
"Batch Inference": "Inferência em Lote",
|
|
10
|
+
"Batch Size": "Tamanho do Lote",
|
|
11
|
+
"Changing with the Model Path": "Alterando com o Caminho do Modelo",
|
|
12
|
+
|
|
13
|
+
"Compile Model": "Compilar Modelo",
|
|
14
|
+
"Compile the model can significantly reduce the inference time, but will increase cold start time": "Compilar o modelo pode reduzir significativamente o tempo de inferência, mas aumentará a latência inicial",
|
|
15
|
+
"Copy": "Copiar",
|
|
16
|
+
"Data Preprocessing": "Pré-processamento de Dados",
|
|
17
|
+
"Data Preprocessing Path": "Caminho de Pré-processamento de Dados",
|
|
18
|
+
"Data Source": "Fonte de Dados",
|
|
19
|
+
"Decoder Model Config": "Configuração do Modelo Decodificador",
|
|
20
|
+
"Decoder Model Path": "Caminho do Modelo Decodificador",
|
|
21
|
+
"Disabled": "Desativado",
|
|
22
|
+
"Enable Initial Prompt": "Habilitar Prompt Inicial",
|
|
23
|
+
"Enable Reference Audio": "Habilitar Áudio de Referência",
|
|
24
|
+
"English": "Inglês",
|
|
25
|
+
"Japanese": "Japonês",
|
|
26
|
+
"Chinese": "Chinês",
|
|
27
|
+
"Portuguese": "Português",
|
|
28
|
+
"Spanish": "Espanhol",
|
|
29
|
+
"Error Message": "Mensagem de Erro",
|
|
30
|
+
"Faster Whisper, Up to 5g GPU memory usage": "Faster Whisper (Usa até 5 GB de vRAM)",
|
|
31
|
+
"File Preprocessing": "Pré-processamento de Arquivos",
|
|
32
|
+
"Generate": "Gerar",
|
|
33
|
+
"Generated Audio": "Áudio Gerado",
|
|
34
|
+
"If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format": "Se não houver texto correspondente ao áudio, utilize o ASR para assistência (formatos .txt ou .lab)",
|
|
35
|
+
"Infer interface is closed": "A interface de inferência foi fechada",
|
|
36
|
+
"Inference Configuration": "Configuração de Inferência",
|
|
37
|
+
"Inference Server Configuration": "Configuração do Servidor de Inferência",
|
|
38
|
+
"Inference Server Error": "Erro do Servidor de Inferência",
|
|
39
|
+
"Inferring interface is launched at {}": "A interface de inferência foi iniciada em {}",
|
|
40
|
+
"Initial Learning Rate": "Taxa de Aprendizagem Inicial",
|
|
41
|
+
"Initial Prompt": "Prompt Inicial",
|
|
42
|
+
"Initial prompt can provide contextual or vocabulary-specific guidance to the model.": "O prompt inicial pode fornecer orientação contextual ou específica de vocabulário para o modelo.",
|
|
43
|
+
"Input Audio & Source Path for Transcription": "Entrada de Áudio/Caminho de Origem para Transcrição",
|
|
44
|
+
"Input Text": "Texto de Entrada",
|
|
45
|
+
"Invalid path: {}": "Caminho inválido: {}",
|
|
46
|
+
"It is recommended to use CUDA, if you have low configuration, use CPU": "Para GPUs Nvidia é recomendado usar CUDA. Se não tiver uma GPU Nvidia, use CPU",
|
|
47
|
+
"Iterative Prompt Length, 0 means off": "Comprimento do Prompt Iterativo (0 = desativado)",
|
|
48
|
+
"LLAMA Configuration": "Configuração do LLAMA",
|
|
49
|
+
"LLAMA Model Config": "Configuração do Modelo LLAMA",
|
|
50
|
+
"LLAMA Model Path": "Caminho do Modelo LLAMA",
|
|
51
|
+
"Labeling Device": "Dispositivo de Rotulagem",
|
|
52
|
+
"LoRA Model to be merged": "Modelo LoRA para mesclagem",
|
|
53
|
+
"Maximum Length per Sample": "Comprimento Máximo por Amostra",
|
|
54
|
+
"Maximum Training Steps": "Etapas Máximas de Treinamento",
|
|
55
|
+
"Maximum tokens per batch, 0 means no limit": "Número máximo de tokens por lote, 0 significa sem limite",
|
|
56
|
+
"Merge": "Mesclar",
|
|
57
|
+
"Merge LoRA": "Mesclar LoRA",
|
|
58
|
+
"Merge successfully": "Mesclado com sucesso",
|
|
59
|
+
"Model Output Path": "Caminho de Saída do Modelo",
|
|
60
|
+
"Model Quantization": "Quantização do Modelo",
|
|
61
|
+
"Model Size": "Tamanho do Modelo",
|
|
62
|
+
"Move": "Mover",
|
|
63
|
+
"Move files successfully": "Arquivos movidos com sucesso",
|
|
64
|
+
"No audio generated, please check the input text.": "Nenhum áudio gerado, verifique o texto de entrada.",
|
|
65
|
+
"No selected options": "Nenhuma opção selecionada",
|
|
66
|
+
"Normalization Result Preview (Currently Only Chinese)": "Pré-visualização do Resultado da Normalização (Atualmente Apenas Chinês)",
|
|
67
|
+
"Number of Workers": "Número de Processos",
|
|
68
|
+
"Open Inference Server": "Abrir Servidor de Inferência",
|
|
69
|
+
"Open Labeler WebUI": "Abrir WebUI de Rotulagem",
|
|
70
|
+
"Open Tensorboard": "Abrir Tensorboard",
|
|
71
|
+
"Opened labeler in browser": "WebUI de rotulagem aberta no navegador",
|
|
72
|
+
"Optional Label Language": "Idioma do Rótulo (Opcional)",
|
|
73
|
+
"Optional online ver": "Versão online (opcional)",
|
|
74
|
+
"Output Path": "Caminho de Saída",
|
|
75
|
+
"Path error, please check the model file exists in the corresponding path": "Erro de caminho, verifique se o arquivo do modelo existe no caminho correspondente",
|
|
76
|
+
"Post-quantification Precision": "Precisão Pós-quantização",
|
|
77
|
+
"Precision": "Precisão",
|
|
78
|
+
"Probability of applying Speaker Condition": "Probabilidade de Aplicar Condição de Orador",
|
|
79
|
+
"Put your text here.": "Insira seu texto aqui.",
|
|
80
|
+
"Quantify": "Quantizar",
|
|
81
|
+
"Quantify successfully": "Quantizado com sucesso",
|
|
82
|
+
"Realtime Transform Text": "Transformar Texto em Tempo Real",
|
|
83
|
+
"Reference Audio": "Áudio de Referência",
|
|
84
|
+
"Reference Text": "Texto de Referência",
|
|
85
|
+
"warning": "Aviso",
|
|
86
|
+
"Pre-processing begins...": "O pré-processamento começou!",
|
|
87
|
+
"Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.": "O código relacionado é licenciado sob a Licença BSD-3-Clause, e os pesos sob a Licença CC BY-NC-SA 4.0.",
|
|
88
|
+
"Remove Selected Data": "Remover Dados Selecionados",
|
|
89
|
+
"Removed path successfully!": "Caminho removido com sucesso!",
|
|
90
|
+
"Repetition Penalty": "Penalidade de Repetição",
|
|
91
|
+
"Save model every n steps": "Salvar modelo a cada n etapas",
|
|
92
|
+
"Select LLAMA ckpt": "Selecionar .ckpt do LLAMA",
|
|
93
|
+
"Select source file processing method": "Escolha como processar o arquivo de origem",
|
|
94
|
+
"Select the model to be trained (Depending on the Tab page you are on)": "Selecione o modelo para o treinamento (dependendo da aba em que você está)",
|
|
95
|
+
"Selected: {}": "Selecionado: {}",
|
|
96
|
+
"Speaker is identified by the folder name": "O orador é identificado pelo nome da pasta",
|
|
97
|
+
"Start Training": "Iniciar Treinamento",
|
|
98
|
+
"Streaming Audio": "Áudio em Streaming",
|
|
99
|
+
"Streaming Generate": "Geração em Streaming",
|
|
100
|
+
"Tensorboard Host": "Host do Tensorboard",
|
|
101
|
+
"Tensorboard Log Path": "Caminho de Log do Tensorboard",
|
|
102
|
+
"Tensorboard Port": "Porta do Tensorboard",
|
|
103
|
+
"Tensorboard interface is closed": "A interface do Tensorboard está fechada",
|
|
104
|
+
"Tensorboard interface is launched at {}": "A interface do Tensorboard foi iniciada em {}",
|
|
105
|
+
"Text Normalization": "Normalização de Texto",
|
|
106
|
+
"Text is too long, please keep it under {} characters.": "O texto é muito longo. Mantenha-o com menos de {} caracteres.",
|
|
107
|
+
"The lower the quantitative precision, the more the effectiveness may decrease, but the greater the efficiency will increase": "Quanto menor a precisão quantitativa, mais a eficácia pode diminuir, mas maior será o aumento da eficiência",
|
|
108
|
+
"The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.": "O caminho da pasta de entrada à esquerda ou a lista de arquivos. Independentemente de estar marcada ou não, ela será utilizada para o treinamento subsequente nesta lista.",
|
|
109
|
+
"Training Configuration": "Configuração de Treinamento",
|
|
110
|
+
"Training Error": "Erro de Treinamento",
|
|
111
|
+
"Training stopped": "Treinamento interrompido!",
|
|
112
|
+
"Type the path or select from the dropdown": "Digite o caminho ou selecione no menu suspenso",
|
|
113
|
+
"Use LoRA": "Usar LoRA",
|
|
114
|
+
"Use LoRA can save GPU memory, but may reduce the quality of the model": "O uso de LoRAs pode economizar memória da GPU, mas também pode reduzir a qualidade",
|
|
115
|
+
"Use filelist": "Usar lista de arquivos",
|
|
116
|
+
"VQGAN Configuration": "Configuração do VQGAN",
|
|
117
|
+
"View the status of the preprocessing folder (use the slider to control the depth of the tree)": "Visualizar o status da pasta de pré-processamento (use o controle deslizante para controlar a profundidade da árvore)",
|
|
118
|
+
"We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.": "Não nos responsabilizamos por qualquer uso indevido do modelo. Por favor, considere as leis e regulamentações locais antes de usá-lo.",
|
|
119
|
+
"WebUI Host": "Host da WebUI",
|
|
120
|
+
"WebUI Port": "Porta da WebUI",
|
|
121
|
+
"Whisper Model": "Modelo Whisper",
|
|
122
|
+
"You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).": "Você pode encontrar o código fonte [aqui](https://github.com/fishaudio/fish-speech) e os modelos [aqui](https://huggingface.co/fishaudio/fish-speech-1).",
|
|
123
|
+
"auto": "automático",
|
|
124
|
+
"bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU": "bf16-true é recomendado para GPUs da série 30+, 16-mixed é recomendado para GPUs da série 10+",
|
|
125
|
+
"latest": "mais recente",
|
|
126
|
+
"new": "novo",
|
|
127
|
+
"This audio introduces the basic concepts and applications of artificial intelligence and machine learning.": "Este áudio introduz os conceitos básicos e aplicações de inteligência artificial e aprendizado de máquina.",
|
|
128
|
+
"You don't need to train this model!": "Não é necessário treinar este modelo!",
|
|
129
|
+
"Yes": "Sim",
|
|
130
|
+
"No": "Não",
|
|
131
|
+
"version:": "versão:",
|
|
132
|
+
"author:": "autor:"
|
|
133
|
+
}
|