xinference 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +4 -7
- xinference/client/handlers.py +3 -0
- xinference/core/chat_interface.py +6 -1
- xinference/core/model.py +2 -0
- xinference/core/scheduler.py +4 -7
- xinference/core/supervisor.py +114 -23
- xinference/core/worker.py +70 -4
- xinference/deploy/local.py +2 -1
- xinference/model/audio/core.py +11 -0
- xinference/model/audio/cosyvoice.py +16 -5
- xinference/model/audio/kokoro.py +139 -0
- xinference/model/audio/melotts.py +110 -0
- xinference/model/audio/model_spec.json +80 -0
- xinference/model/audio/model_spec_modelscope.json +18 -0
- xinference/model/audio/whisper.py +35 -10
- xinference/model/llm/llama_cpp/core.py +21 -14
- xinference/model/llm/llm_family.json +527 -1
- xinference/model/llm/llm_family.py +4 -1
- xinference/model/llm/llm_family_modelscope.json +495 -3
- xinference/model/llm/memory.py +1 -1
- xinference/model/llm/mlx/core.py +24 -6
- xinference/model/llm/transformers/core.py +9 -1
- xinference/model/llm/transformers/qwen2_audio.py +3 -1
- xinference/model/llm/transformers/qwen2_vl.py +20 -3
- xinference/model/llm/transformers/utils.py +22 -11
- xinference/model/llm/utils.py +115 -1
- xinference/model/llm/vllm/core.py +14 -4
- xinference/model/llm/vllm/xavier/block.py +3 -4
- xinference/model/llm/vllm/xavier/block_tracker.py +71 -58
- xinference/model/llm/vllm/xavier/collective.py +74 -0
- xinference/model/llm/vllm/xavier/collective_manager.py +147 -0
- xinference/model/llm/vllm/xavier/executor.py +18 -16
- xinference/model/llm/vllm/xavier/scheduler.py +79 -63
- xinference/model/llm/vllm/xavier/test/test_xavier.py +60 -35
- xinference/model/llm/vllm/xavier/transfer.py +53 -32
- xinference/thirdparty/cosyvoice/bin/spk2info.pt +0 -0
- xinference/thirdparty/melo/__init__.py +0 -0
- xinference/thirdparty/melo/api.py +135 -0
- xinference/thirdparty/melo/app.py +61 -0
- xinference/thirdparty/melo/attentions.py +459 -0
- xinference/thirdparty/melo/commons.py +160 -0
- xinference/thirdparty/melo/configs/config.json +94 -0
- xinference/thirdparty/melo/data/example/metadata.list +20 -0
- xinference/thirdparty/melo/data_utils.py +413 -0
- xinference/thirdparty/melo/download_utils.py +67 -0
- xinference/thirdparty/melo/infer.py +25 -0
- xinference/thirdparty/melo/init_downloads.py +14 -0
- xinference/thirdparty/melo/losses.py +58 -0
- xinference/thirdparty/melo/main.py +36 -0
- xinference/thirdparty/melo/mel_processing.py +174 -0
- xinference/thirdparty/melo/models.py +1030 -0
- xinference/thirdparty/melo/modules.py +598 -0
- xinference/thirdparty/melo/monotonic_align/__init__.py +16 -0
- xinference/thirdparty/melo/monotonic_align/core.py +46 -0
- xinference/thirdparty/melo/preprocess_text.py +135 -0
- xinference/thirdparty/melo/split_utils.py +174 -0
- xinference/thirdparty/melo/text/__init__.py +35 -0
- xinference/thirdparty/melo/text/chinese.py +199 -0
- xinference/thirdparty/melo/text/chinese_bert.py +107 -0
- xinference/thirdparty/melo/text/chinese_mix.py +253 -0
- xinference/thirdparty/melo/text/cleaner.py +36 -0
- xinference/thirdparty/melo/text/cleaner_multiling.py +110 -0
- xinference/thirdparty/melo/text/cmudict.rep +129530 -0
- xinference/thirdparty/melo/text/cmudict_cache.pickle +0 -0
- xinference/thirdparty/melo/text/english.py +284 -0
- xinference/thirdparty/melo/text/english_bert.py +39 -0
- xinference/thirdparty/melo/text/english_utils/__init__.py +0 -0
- xinference/thirdparty/melo/text/english_utils/abbreviations.py +35 -0
- xinference/thirdparty/melo/text/english_utils/number_norm.py +97 -0
- xinference/thirdparty/melo/text/english_utils/time_norm.py +47 -0
- xinference/thirdparty/melo/text/es_phonemizer/__init__.py +0 -0
- xinference/thirdparty/melo/text/es_phonemizer/base.py +140 -0
- xinference/thirdparty/melo/text/es_phonemizer/cleaner.py +109 -0
- xinference/thirdparty/melo/text/es_phonemizer/es_symbols.json +79 -0
- xinference/thirdparty/melo/text/es_phonemizer/es_symbols.txt +1 -0
- xinference/thirdparty/melo/text/es_phonemizer/es_symbols_v2.json +83 -0
- xinference/thirdparty/melo/text/es_phonemizer/es_to_ipa.py +12 -0
- xinference/thirdparty/melo/text/es_phonemizer/example_ipa.txt +400 -0
- xinference/thirdparty/melo/text/es_phonemizer/gruut_wrapper.py +253 -0
- xinference/thirdparty/melo/text/es_phonemizer/punctuation.py +174 -0
- xinference/thirdparty/melo/text/es_phonemizer/spanish_symbols.txt +1 -0
- xinference/thirdparty/melo/text/es_phonemizer/test.ipynb +124 -0
- xinference/thirdparty/melo/text/fr_phonemizer/__init__.py +0 -0
- xinference/thirdparty/melo/text/fr_phonemizer/base.py +140 -0
- xinference/thirdparty/melo/text/fr_phonemizer/cleaner.py +122 -0
- xinference/thirdparty/melo/text/fr_phonemizer/en_symbols.json +78 -0
- xinference/thirdparty/melo/text/fr_phonemizer/example_ipa.txt +1 -0
- xinference/thirdparty/melo/text/fr_phonemizer/fr_symbols.json +89 -0
- xinference/thirdparty/melo/text/fr_phonemizer/fr_to_ipa.py +30 -0
- xinference/thirdparty/melo/text/fr_phonemizer/french_abbreviations.py +48 -0
- xinference/thirdparty/melo/text/fr_phonemizer/french_symbols.txt +1 -0
- xinference/thirdparty/melo/text/fr_phonemizer/gruut_wrapper.py +258 -0
- xinference/thirdparty/melo/text/fr_phonemizer/punctuation.py +172 -0
- xinference/thirdparty/melo/text/french.py +94 -0
- xinference/thirdparty/melo/text/french_bert.py +39 -0
- xinference/thirdparty/melo/text/japanese.py +647 -0
- xinference/thirdparty/melo/text/japanese_bert.py +49 -0
- xinference/thirdparty/melo/text/ko_dictionary.py +44 -0
- xinference/thirdparty/melo/text/korean.py +192 -0
- xinference/thirdparty/melo/text/opencpop-strict.txt +429 -0
- xinference/thirdparty/melo/text/spanish.py +122 -0
- xinference/thirdparty/melo/text/spanish_bert.py +39 -0
- xinference/thirdparty/melo/text/symbols.py +290 -0
- xinference/thirdparty/melo/text/tone_sandhi.py +769 -0
- xinference/thirdparty/melo/train.py +635 -0
- xinference/thirdparty/melo/train.sh +19 -0
- xinference/thirdparty/melo/transforms.py +209 -0
- xinference/thirdparty/melo/utils.py +424 -0
- xinference/types.py +2 -0
- xinference/web/ui/build/asset-manifest.json +3 -3
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/js/{main.1eb206d1.js → main.b0936c54.js} +3 -3
- xinference/web/ui/build/static/js/main.b0936c54.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/a3ff866acddf34917a7ee399e0e571a4dfd8ba66d5057db885f243e16a6eb17d.json +1 -0
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/METADATA +37 -27
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/RECORD +122 -45
- xinference/web/ui/build/static/js/main.1eb206d1.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/2213d49de260e1f67c888081b18f120f5225462b829ae57c9e05a05cec83689d.json +0 -1
- /xinference/web/ui/build/static/js/{main.1eb206d1.js.LICENSE.txt → main.b0936c54.js.LICENSE.txt} +0 -0
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/LICENSE +0 -0
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/WHEEL +0 -0
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/entry_points.txt +0 -0
- {xinference-1.2.0.dist-info → xinference-1.2.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import pickle
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
from . import symbols
|
|
6
|
+
from .es_phonemizer import cleaner as es_cleaner
|
|
7
|
+
from .es_phonemizer import es_to_ipa
|
|
8
|
+
from transformers import AutoTokenizer
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def distribute_phone(n_phone, n_word):
|
|
12
|
+
phones_per_word = [0] * n_word
|
|
13
|
+
for task in range(n_phone):
|
|
14
|
+
min_tasks = min(phones_per_word)
|
|
15
|
+
min_index = phones_per_word.index(min_tasks)
|
|
16
|
+
phones_per_word[min_index] += 1
|
|
17
|
+
return phones_per_word
|
|
18
|
+
|
|
19
|
+
def text_normalize(text):
|
|
20
|
+
text = es_cleaner.spanish_cleaners(text)
|
|
21
|
+
return text
|
|
22
|
+
|
|
23
|
+
def post_replace_ph(ph):
|
|
24
|
+
rep_map = {
|
|
25
|
+
":": ",",
|
|
26
|
+
";": ",",
|
|
27
|
+
",": ",",
|
|
28
|
+
"。": ".",
|
|
29
|
+
"!": "!",
|
|
30
|
+
"?": "?",
|
|
31
|
+
"\n": ".",
|
|
32
|
+
"·": ",",
|
|
33
|
+
"、": ",",
|
|
34
|
+
"...": "…"
|
|
35
|
+
}
|
|
36
|
+
if ph in rep_map.keys():
|
|
37
|
+
ph = rep_map[ph]
|
|
38
|
+
if ph in symbols:
|
|
39
|
+
return ph
|
|
40
|
+
if ph not in symbols:
|
|
41
|
+
ph = "UNK"
|
|
42
|
+
return ph
|
|
43
|
+
|
|
44
|
+
def refine_ph(phn):
|
|
45
|
+
tone = 0
|
|
46
|
+
if re.search(r"\d$", phn):
|
|
47
|
+
tone = int(phn[-1]) + 1
|
|
48
|
+
phn = phn[:-1]
|
|
49
|
+
return phn.lower(), tone
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def refine_syllables(syllables):
|
|
53
|
+
tones = []
|
|
54
|
+
phonemes = []
|
|
55
|
+
for phn_list in syllables:
|
|
56
|
+
for i in range(len(phn_list)):
|
|
57
|
+
phn = phn_list[i]
|
|
58
|
+
phn, tone = refine_ph(phn)
|
|
59
|
+
phonemes.append(phn)
|
|
60
|
+
tones.append(tone)
|
|
61
|
+
return phonemes, tones
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# model_id = 'bert-base-uncased'
|
|
65
|
+
model_id = 'dccuchile/bert-base-spanish-wwm-uncased'
|
|
66
|
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
67
|
+
|
|
68
|
+
def g2p(text, pad_start_end=True, tokenized=None):
|
|
69
|
+
if tokenized is None:
|
|
70
|
+
tokenized = tokenizer.tokenize(text)
|
|
71
|
+
# import pdb; pdb.set_trace()
|
|
72
|
+
phs = []
|
|
73
|
+
ph_groups = []
|
|
74
|
+
for t in tokenized:
|
|
75
|
+
if not t.startswith("#"):
|
|
76
|
+
ph_groups.append([t])
|
|
77
|
+
else:
|
|
78
|
+
ph_groups[-1].append(t.replace("#", ""))
|
|
79
|
+
|
|
80
|
+
phones = []
|
|
81
|
+
tones = []
|
|
82
|
+
word2ph = []
|
|
83
|
+
# print(ph_groups)
|
|
84
|
+
for group in ph_groups:
|
|
85
|
+
w = "".join(group)
|
|
86
|
+
phone_len = 0
|
|
87
|
+
word_len = len(group)
|
|
88
|
+
if w == '[UNK]':
|
|
89
|
+
phone_list = ['UNK']
|
|
90
|
+
else:
|
|
91
|
+
phone_list = list(filter(lambda p: p != " ", es_to_ipa.es2ipa(w)))
|
|
92
|
+
|
|
93
|
+
for ph in phone_list:
|
|
94
|
+
phones.append(ph)
|
|
95
|
+
tones.append(0)
|
|
96
|
+
phone_len += 1
|
|
97
|
+
aaa = distribute_phone(phone_len, word_len)
|
|
98
|
+
word2ph += aaa
|
|
99
|
+
# print(phone_list, aaa)
|
|
100
|
+
# print('=' * 10)
|
|
101
|
+
|
|
102
|
+
if pad_start_end:
|
|
103
|
+
phones = ["_"] + phones + ["_"]
|
|
104
|
+
tones = [0] + tones + [0]
|
|
105
|
+
word2ph = [1] + word2ph + [1]
|
|
106
|
+
return phones, tones, word2ph
|
|
107
|
+
|
|
108
|
+
def get_bert_feature(text, word2ph, device=None):
|
|
109
|
+
from text import spanish_bert
|
|
110
|
+
return spanish_bert.get_bert_feature(text, word2ph, device=device)
|
|
111
|
+
|
|
112
|
+
if __name__ == "__main__":
|
|
113
|
+
text = "en nuestros tiempos estos dos pueblos ilustres empiezan a curarse, gracias sólo a la sana y vigorosa higiene de 1789."
|
|
114
|
+
# print(text)
|
|
115
|
+
text = text_normalize(text)
|
|
116
|
+
print(text)
|
|
117
|
+
phones, tones, word2ph = g2p(text)
|
|
118
|
+
bert = get_bert_feature(text, word2ph)
|
|
119
|
+
print(phones)
|
|
120
|
+
print(len(phones), tones, sum(word2ph), bert.shape)
|
|
121
|
+
|
|
122
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
model_id = 'dccuchile/bert-base-spanish-wwm-uncased'
|
|
6
|
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
7
|
+
model = None
|
|
8
|
+
|
|
9
|
+
def get_bert_feature(text, word2ph, device=None):
|
|
10
|
+
global model
|
|
11
|
+
if (
|
|
12
|
+
sys.platform == "darwin"
|
|
13
|
+
and torch.backends.mps.is_available()
|
|
14
|
+
and device == "cpu"
|
|
15
|
+
):
|
|
16
|
+
device = "mps"
|
|
17
|
+
if not device:
|
|
18
|
+
device = "cuda"
|
|
19
|
+
if model is None:
|
|
20
|
+
model = AutoModelForMaskedLM.from_pretrained(model_id).to(
|
|
21
|
+
device
|
|
22
|
+
)
|
|
23
|
+
with torch.no_grad():
|
|
24
|
+
inputs = tokenizer(text, return_tensors="pt")
|
|
25
|
+
for i in inputs:
|
|
26
|
+
inputs[i] = inputs[i].to(device)
|
|
27
|
+
res = model(**inputs, output_hidden_states=True)
|
|
28
|
+
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
|
|
29
|
+
|
|
30
|
+
assert inputs["input_ids"].shape[-1] == len(word2ph)
|
|
31
|
+
word2phone = word2ph
|
|
32
|
+
phone_level_feature = []
|
|
33
|
+
for i in range(len(word2phone)):
|
|
34
|
+
repeat_feature = res[i].repeat(word2phone[i], 1)
|
|
35
|
+
phone_level_feature.append(repeat_feature)
|
|
36
|
+
|
|
37
|
+
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
|
38
|
+
|
|
39
|
+
return phone_level_feature.T
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
# punctuation = ["!", "?", "…", ",", ".", "'", "-"]
|
|
2
|
+
punctuation = ["!", "?", "…", ",", ".", "'", "-", "¿", "¡"]
|
|
3
|
+
pu_symbols = punctuation + ["SP", "UNK"]
|
|
4
|
+
pad = "_"
|
|
5
|
+
|
|
6
|
+
# chinese
|
|
7
|
+
zh_symbols = [
|
|
8
|
+
"E",
|
|
9
|
+
"En",
|
|
10
|
+
"a",
|
|
11
|
+
"ai",
|
|
12
|
+
"an",
|
|
13
|
+
"ang",
|
|
14
|
+
"ao",
|
|
15
|
+
"b",
|
|
16
|
+
"c",
|
|
17
|
+
"ch",
|
|
18
|
+
"d",
|
|
19
|
+
"e",
|
|
20
|
+
"ei",
|
|
21
|
+
"en",
|
|
22
|
+
"eng",
|
|
23
|
+
"er",
|
|
24
|
+
"f",
|
|
25
|
+
"g",
|
|
26
|
+
"h",
|
|
27
|
+
"i",
|
|
28
|
+
"i0",
|
|
29
|
+
"ia",
|
|
30
|
+
"ian",
|
|
31
|
+
"iang",
|
|
32
|
+
"iao",
|
|
33
|
+
"ie",
|
|
34
|
+
"in",
|
|
35
|
+
"ing",
|
|
36
|
+
"iong",
|
|
37
|
+
"ir",
|
|
38
|
+
"iu",
|
|
39
|
+
"j",
|
|
40
|
+
"k",
|
|
41
|
+
"l",
|
|
42
|
+
"m",
|
|
43
|
+
"n",
|
|
44
|
+
"o",
|
|
45
|
+
"ong",
|
|
46
|
+
"ou",
|
|
47
|
+
"p",
|
|
48
|
+
"q",
|
|
49
|
+
"r",
|
|
50
|
+
"s",
|
|
51
|
+
"sh",
|
|
52
|
+
"t",
|
|
53
|
+
"u",
|
|
54
|
+
"ua",
|
|
55
|
+
"uai",
|
|
56
|
+
"uan",
|
|
57
|
+
"uang",
|
|
58
|
+
"ui",
|
|
59
|
+
"un",
|
|
60
|
+
"uo",
|
|
61
|
+
"v",
|
|
62
|
+
"van",
|
|
63
|
+
"ve",
|
|
64
|
+
"vn",
|
|
65
|
+
"w",
|
|
66
|
+
"x",
|
|
67
|
+
"y",
|
|
68
|
+
"z",
|
|
69
|
+
"zh",
|
|
70
|
+
"AA",
|
|
71
|
+
"EE",
|
|
72
|
+
"OO",
|
|
73
|
+
]
|
|
74
|
+
num_zh_tones = 6
|
|
75
|
+
|
|
76
|
+
# japanese
|
|
77
|
+
ja_symbols = [
|
|
78
|
+
"N",
|
|
79
|
+
"a",
|
|
80
|
+
"a:",
|
|
81
|
+
"b",
|
|
82
|
+
"by",
|
|
83
|
+
"ch",
|
|
84
|
+
"d",
|
|
85
|
+
"dy",
|
|
86
|
+
"e",
|
|
87
|
+
"e:",
|
|
88
|
+
"f",
|
|
89
|
+
"g",
|
|
90
|
+
"gy",
|
|
91
|
+
"h",
|
|
92
|
+
"hy",
|
|
93
|
+
"i",
|
|
94
|
+
"i:",
|
|
95
|
+
"j",
|
|
96
|
+
"k",
|
|
97
|
+
"ky",
|
|
98
|
+
"m",
|
|
99
|
+
"my",
|
|
100
|
+
"n",
|
|
101
|
+
"ny",
|
|
102
|
+
"o",
|
|
103
|
+
"o:",
|
|
104
|
+
"p",
|
|
105
|
+
"py",
|
|
106
|
+
"q",
|
|
107
|
+
"r",
|
|
108
|
+
"ry",
|
|
109
|
+
"s",
|
|
110
|
+
"sh",
|
|
111
|
+
"t",
|
|
112
|
+
"ts",
|
|
113
|
+
"ty",
|
|
114
|
+
"u",
|
|
115
|
+
"u:",
|
|
116
|
+
"w",
|
|
117
|
+
"y",
|
|
118
|
+
"z",
|
|
119
|
+
"zy",
|
|
120
|
+
]
|
|
121
|
+
num_ja_tones = 1
|
|
122
|
+
|
|
123
|
+
# English
|
|
124
|
+
en_symbols = [
|
|
125
|
+
"aa",
|
|
126
|
+
"ae",
|
|
127
|
+
"ah",
|
|
128
|
+
"ao",
|
|
129
|
+
"aw",
|
|
130
|
+
"ay",
|
|
131
|
+
"b",
|
|
132
|
+
"ch",
|
|
133
|
+
"d",
|
|
134
|
+
"dh",
|
|
135
|
+
"eh",
|
|
136
|
+
"er",
|
|
137
|
+
"ey",
|
|
138
|
+
"f",
|
|
139
|
+
"g",
|
|
140
|
+
"hh",
|
|
141
|
+
"ih",
|
|
142
|
+
"iy",
|
|
143
|
+
"jh",
|
|
144
|
+
"k",
|
|
145
|
+
"l",
|
|
146
|
+
"m",
|
|
147
|
+
"n",
|
|
148
|
+
"ng",
|
|
149
|
+
"ow",
|
|
150
|
+
"oy",
|
|
151
|
+
"p",
|
|
152
|
+
"r",
|
|
153
|
+
"s",
|
|
154
|
+
"sh",
|
|
155
|
+
"t",
|
|
156
|
+
"th",
|
|
157
|
+
"uh",
|
|
158
|
+
"uw",
|
|
159
|
+
"V",
|
|
160
|
+
"w",
|
|
161
|
+
"y",
|
|
162
|
+
"z",
|
|
163
|
+
"zh",
|
|
164
|
+
]
|
|
165
|
+
num_en_tones = 4
|
|
166
|
+
|
|
167
|
+
# Korean
|
|
168
|
+
kr_symbols = ['ᄌ', 'ᅥ', 'ᆫ', 'ᅦ', 'ᄋ', 'ᅵ', 'ᄅ', 'ᅴ', 'ᄀ', 'ᅡ', 'ᄎ', 'ᅪ', 'ᄑ', 'ᅩ', 'ᄐ', 'ᄃ', 'ᅢ', 'ᅮ', 'ᆼ', 'ᅳ', 'ᄒ', 'ᄆ', 'ᆯ', 'ᆷ', 'ᄂ', 'ᄇ', 'ᄉ', 'ᆮ', 'ᄁ', 'ᅬ', 'ᅣ', 'ᄄ', 'ᆨ', 'ᄍ', 'ᅧ', 'ᄏ', 'ᆸ', 'ᅭ', '(', 'ᄊ', ')', 'ᅲ', 'ᅨ', 'ᄈ', 'ᅱ', 'ᅯ', 'ᅫ', 'ᅰ', 'ᅤ', '~', '\\', '[', ']', '/', '^', ':', 'ㄸ', '*']
|
|
169
|
+
num_kr_tones = 1
|
|
170
|
+
|
|
171
|
+
# Spanish
|
|
172
|
+
es_symbols = [
|
|
173
|
+
"N",
|
|
174
|
+
"Q",
|
|
175
|
+
"a",
|
|
176
|
+
"b",
|
|
177
|
+
"d",
|
|
178
|
+
"e",
|
|
179
|
+
"f",
|
|
180
|
+
"g",
|
|
181
|
+
"h",
|
|
182
|
+
"i",
|
|
183
|
+
"j",
|
|
184
|
+
"k",
|
|
185
|
+
"l",
|
|
186
|
+
"m",
|
|
187
|
+
"n",
|
|
188
|
+
"o",
|
|
189
|
+
"p",
|
|
190
|
+
"s",
|
|
191
|
+
"t",
|
|
192
|
+
"u",
|
|
193
|
+
"v",
|
|
194
|
+
"w",
|
|
195
|
+
"x",
|
|
196
|
+
"y",
|
|
197
|
+
"z",
|
|
198
|
+
"ɑ",
|
|
199
|
+
"æ",
|
|
200
|
+
"ʃ",
|
|
201
|
+
"ʑ",
|
|
202
|
+
"ç",
|
|
203
|
+
"ɯ",
|
|
204
|
+
"ɪ",
|
|
205
|
+
"ɔ",
|
|
206
|
+
"ɛ",
|
|
207
|
+
"ɹ",
|
|
208
|
+
"ð",
|
|
209
|
+
"ə",
|
|
210
|
+
"ɫ",
|
|
211
|
+
"ɥ",
|
|
212
|
+
"ɸ",
|
|
213
|
+
"ʊ",
|
|
214
|
+
"ɾ",
|
|
215
|
+
"ʒ",
|
|
216
|
+
"θ",
|
|
217
|
+
"β",
|
|
218
|
+
"ŋ",
|
|
219
|
+
"ɦ",
|
|
220
|
+
"ɡ",
|
|
221
|
+
"r",
|
|
222
|
+
"ɲ",
|
|
223
|
+
"ʝ",
|
|
224
|
+
"ɣ",
|
|
225
|
+
"ʎ",
|
|
226
|
+
"ˈ",
|
|
227
|
+
"ˌ",
|
|
228
|
+
"ː"
|
|
229
|
+
]
|
|
230
|
+
num_es_tones = 1
|
|
231
|
+
|
|
232
|
+
# French
|
|
233
|
+
fr_symbols = [
|
|
234
|
+
"\u0303",
|
|
235
|
+
"œ",
|
|
236
|
+
"ø",
|
|
237
|
+
"ʁ",
|
|
238
|
+
"ɒ",
|
|
239
|
+
"ʌ",
|
|
240
|
+
"ɜ",
|
|
241
|
+
"ɐ"
|
|
242
|
+
]
|
|
243
|
+
num_fr_tones = 1
|
|
244
|
+
|
|
245
|
+
# German
|
|
246
|
+
de_symbols = [
|
|
247
|
+
"ʏ",
|
|
248
|
+
"̩"
|
|
249
|
+
]
|
|
250
|
+
num_de_tones = 1
|
|
251
|
+
|
|
252
|
+
# Russian
|
|
253
|
+
ru_symbols = [
|
|
254
|
+
"ɭ",
|
|
255
|
+
"ʲ",
|
|
256
|
+
"ɕ",
|
|
257
|
+
"\"",
|
|
258
|
+
"ɵ",
|
|
259
|
+
"^",
|
|
260
|
+
"ɬ"
|
|
261
|
+
]
|
|
262
|
+
num_ru_tones = 1
|
|
263
|
+
|
|
264
|
+
# combine all symbols
|
|
265
|
+
normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols + kr_symbols + es_symbols + fr_symbols + de_symbols + ru_symbols))
|
|
266
|
+
symbols = [pad] + normal_symbols + pu_symbols
|
|
267
|
+
sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
|
|
268
|
+
|
|
269
|
+
# combine all tones
|
|
270
|
+
num_tones = num_zh_tones + num_ja_tones + num_en_tones + num_kr_tones + num_es_tones + num_fr_tones + num_de_tones + num_ru_tones
|
|
271
|
+
|
|
272
|
+
# language maps
|
|
273
|
+
language_id_map = {"ZH": 0, "JP": 1, "EN": 2, "ZH_MIX_EN": 3, 'KR': 4, 'ES': 5, 'SP': 5 ,'FR': 6}
|
|
274
|
+
num_languages = len(language_id_map.keys())
|
|
275
|
+
|
|
276
|
+
language_tone_start_map = {
|
|
277
|
+
"ZH": 0,
|
|
278
|
+
"ZH_MIX_EN": 0,
|
|
279
|
+
"JP": num_zh_tones,
|
|
280
|
+
"EN": num_zh_tones + num_ja_tones,
|
|
281
|
+
'KR': num_zh_tones + num_ja_tones + num_en_tones,
|
|
282
|
+
"ES": num_zh_tones + num_ja_tones + num_en_tones + num_kr_tones,
|
|
283
|
+
"SP": num_zh_tones + num_ja_tones + num_en_tones + num_kr_tones,
|
|
284
|
+
"FR": num_zh_tones + num_ja_tones + num_en_tones + num_kr_tones + num_es_tones,
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
if __name__ == "__main__":
|
|
288
|
+
a = set(zh_symbols)
|
|
289
|
+
b = set(en_symbols)
|
|
290
|
+
print(sorted(a & b))
|