sonatoki 0.1.5__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sonatoki-0.1.5 → sonatoki-0.2.0}/PKG-INFO +1 -3
- {sonatoki-0.1.5 → sonatoki-0.2.0}/pyproject.toml +1 -4
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/Cleaners.py +25 -2
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/Configs.py +6 -6
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/Filters.py +63 -34
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/Scorers.py +0 -1
- sonatoki-0.2.0/src/sonatoki/Tokenizers.py +139 -0
- sonatoki-0.2.0/src/sonatoki/constants.py +67 -0
- sonatoki-0.2.0/src/sonatoki/utils.py +90 -0
- sonatoki-0.2.0/tests/test_cleaners.py +36 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/test_filters.py +32 -9
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/test_scorers.py +2 -2
- sonatoki-0.2.0/tests/test_tokenize.py +123 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/tokenize_cases/tokenize_sentences_tok.yml +7 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/tokenize_cases/tokenize_words_tok.yml +76 -0
- sonatoki-0.1.5/src/sonatoki/Tokenizers.py +0 -91
- sonatoki-0.1.5/src/sonatoki/constants.py +0 -83
- sonatoki-0.1.5/tests/test_cleaners.py +0 -26
- sonatoki-0.1.5/tests/test_tokenize.py +0 -111
- sonatoki-0.1.5/tests/tokenize_cases/tokenize_sentences.yml +0 -7
- sonatoki-0.1.5/tests/tokenize_cases/tokenize_words.yml +0 -15
- {sonatoki-0.1.5 → sonatoki-0.2.0}/LICENSE +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/README.md +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/Preprocessors.py +5 -5
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/__init__.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/__main__.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/ilo.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/linku.json +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/src/sonatoki/sandbox.json +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/__init__.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/test_ilo.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/test_preprocessors.py +0 -0
- {sonatoki-0.1.5 → sonatoki-0.2.0}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: sonatoki
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: ilo li moku e toki li pana e sona ni: ni li toki ala toki pona?
|
5
5
|
Author-Email: "jan Kekan San (@gregdan3)" <gregory.danielson3@gmail.com>
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -8,8 +8,6 @@ Requires-Python: >=3.8
|
|
8
8
|
Requires-Dist: unidecode>=1.3.6
|
9
9
|
Requires-Dist: regex>=2023.12.25
|
10
10
|
Requires-Dist: typing-extensions>=4.11.0
|
11
|
-
Requires-Dist: nltk>=3.8.1; extra == "nltk"
|
12
|
-
Provides-Extra: nltk
|
13
11
|
Description-Content-Type: text/markdown
|
14
12
|
|
15
13
|
# sona toki
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "sonatoki"
|
3
|
-
version = "0.
|
3
|
+
version = "0.2.0"
|
4
4
|
description = "ilo li moku e toki li pana e sona ni: ni li toki ala toki pona?"
|
5
5
|
authors = [
|
6
6
|
{ name = "jan Kekan San (@gregdan3)", email = "gregory.danielson3@gmail.com" },
|
@@ -17,9 +17,6 @@ readme = "README.md"
|
|
17
17
|
text = "AGPL-3.0-or-later"
|
18
18
|
|
19
19
|
[project.optional-dependencies]
|
20
|
-
nltk = [
|
21
|
-
"nltk>=3.8.1",
|
22
|
-
]
|
23
20
|
|
24
21
|
[build-system]
|
25
22
|
requires = [
|
@@ -23,7 +23,7 @@ class RegexCleaner(Cleaner):
|
|
23
23
|
return re.sub(cls.pattern, cls.replace, token)
|
24
24
|
|
25
25
|
|
26
|
-
class ConsecutiveDuplicates(
|
26
|
+
class ConsecutiveDuplicates(Cleaner):
|
27
27
|
"""Remove consecutive duplicates from an input string, ignoring case.
|
28
28
|
|
29
29
|
The first match of any 2+ will become `\\1`, preserving initial case.
|
@@ -35,8 +35,31 @@ class ConsecutiveDuplicates(RegexCleaner):
|
|
35
35
|
This may be undesirable for moraic scripts like Hiragana, where `わわ` would be
|
36
36
|
incorrectly reduced to `わ`. This does preserve phonotactic validity, though."""
|
37
37
|
|
38
|
+
@classmethod
|
39
|
+
@override
|
40
|
+
def clean(cls, token: str) -> str:
|
41
|
+
if not token:
|
42
|
+
return token
|
43
|
+
|
44
|
+
output = token[0]
|
45
|
+
|
46
|
+
last_output = output.lower() # ignore case in comparison
|
47
|
+
for i in range(1, len(token)):
|
48
|
+
cur_char = token[i].lower()
|
49
|
+
if cur_char == last_output:
|
50
|
+
continue
|
51
|
+
output += token[i] # preserve case of string
|
52
|
+
last_output = cur_char
|
53
|
+
return output
|
54
|
+
|
55
|
+
|
56
|
+
class ConsecutiveDuplicatesRe(RegexCleaner):
|
57
|
+
"""Reference implementation for `ConsecutiveDuplicates`."""
|
58
|
+
|
38
59
|
pattern = re.compile(r"(.)\1+", flags=re.IGNORECASE)
|
39
60
|
replace = r"\1"
|
40
61
|
|
41
62
|
|
42
|
-
__all__ = [
|
63
|
+
__all__ = [
|
64
|
+
"ConsecutiveDuplicates",
|
65
|
+
]
|
@@ -21,7 +21,7 @@ from sonatoki.Filters import (
|
|
21
21
|
)
|
22
22
|
from sonatoki.Scorers import Number, Scorer, PassFail, SoftScaling, SoftPassFail
|
23
23
|
from sonatoki.Cleaners import Cleaner, ConsecutiveDuplicates
|
24
|
-
from sonatoki.Tokenizers import Tokenizer,
|
24
|
+
from sonatoki.Tokenizers import Tokenizer, WordTokenizer
|
25
25
|
from sonatoki.Preprocessors import (
|
26
26
|
URLs,
|
27
27
|
Preprocessor,
|
@@ -49,7 +49,7 @@ BaseConfig: IloConfig = {
|
|
49
49
|
"scoring_filters": [],
|
50
50
|
"scorer": PassFail,
|
51
51
|
"passing_score": 0.8,
|
52
|
-
"word_tokenizer":
|
52
|
+
"word_tokenizer": WordTokenizer,
|
53
53
|
}
|
54
54
|
|
55
55
|
|
@@ -70,11 +70,11 @@ TelegramConfig: IloConfig = deepcopy(PrefConfig)
|
|
70
70
|
ForumConfig: IloConfig = deepcopy(PrefConfig)
|
71
71
|
|
72
72
|
__all__ = [
|
73
|
-
"IloConfig",
|
74
73
|
"BaseConfig",
|
75
|
-
"PrefConfig",
|
76
|
-
"LazyConfig",
|
77
74
|
"DiscordConfig",
|
78
|
-
"TelegramConfig",
|
79
75
|
"ForumConfig",
|
76
|
+
"IloConfig",
|
77
|
+
"LazyConfig",
|
78
|
+
"PrefConfig",
|
79
|
+
"TelegramConfig",
|
80
80
|
]
|
@@ -11,16 +11,17 @@ from typing_extensions import override
|
|
11
11
|
# LOCAL
|
12
12
|
from sonatoki.constants import (
|
13
13
|
VOWELS,
|
14
|
+
NIMI_PU,
|
15
|
+
ALPHABET,
|
16
|
+
ALLOWABLES,
|
14
17
|
CONSONANTS,
|
15
|
-
|
16
|
-
|
18
|
+
NIMI_LINKU,
|
19
|
+
NIMI_PU_ALE,
|
20
|
+
POSIX_PUNCT,
|
17
21
|
UNICODE_PUNCT,
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
NIMI_LINKU_ALE_SET,
|
22
|
-
PRUNED_POSIX_PUNCT,
|
23
|
-
NIMI_LINKU_SANDBOX_SET,
|
22
|
+
NIMI_LINKU_ALE,
|
23
|
+
ALL_PUNCT_RANGES,
|
24
|
+
NIMI_LINKU_SANDBOX,
|
24
25
|
)
|
25
26
|
|
26
27
|
regex.DEFAULT_VERSION = regex.VERSION1
|
@@ -54,7 +55,7 @@ class Regex1Filter(Filter):
|
|
54
55
|
return not not regex.fullmatch(cls.pattern, token)
|
55
56
|
|
56
57
|
|
57
|
-
class
|
58
|
+
class MemberFilter(Filter):
|
58
59
|
tokens: Set[str]
|
59
60
|
|
60
61
|
@classmethod
|
@@ -64,8 +65,18 @@ class SetFilter(Filter):
|
|
64
65
|
return token.lower() in cls.tokens
|
65
66
|
|
66
67
|
|
67
|
-
class
|
68
|
-
tokens
|
68
|
+
class SubsetFilter(Filter):
|
69
|
+
tokens: Set[str]
|
70
|
+
|
71
|
+
@classmethod
|
72
|
+
@override
|
73
|
+
@cache(maxsize=None)
|
74
|
+
def filter(cls, token: str) -> bool:
|
75
|
+
return set(token.lower()).issubset(cls.tokens)
|
76
|
+
|
77
|
+
|
78
|
+
class Miscellaneous(MemberFilter):
|
79
|
+
tokens = set(ALLOWABLES)
|
69
80
|
|
70
81
|
|
71
82
|
class ProperName(Filter):
|
@@ -83,26 +94,28 @@ class ProperName(Filter):
|
|
83
94
|
@cache(maxsize=None)
|
84
95
|
def filter(cls, token: str) -> bool:
|
85
96
|
return token == token.capitalize()
|
97
|
+
# TODO: If the token is in a script which doesn't have a case distinction,
|
98
|
+
# this will errantly match.
|
86
99
|
|
87
100
|
|
88
|
-
class NimiPu(
|
89
|
-
tokens =
|
101
|
+
class NimiPu(MemberFilter):
|
102
|
+
tokens = set(NIMI_PU)
|
90
103
|
|
91
104
|
|
92
|
-
class NimiPuAle(
|
93
|
-
tokens =
|
105
|
+
class NimiPuAle(MemberFilter):
|
106
|
+
tokens = set(NIMI_PU_ALE)
|
94
107
|
|
95
108
|
|
96
|
-
class NimiLinku(
|
97
|
-
tokens =
|
109
|
+
class NimiLinku(MemberFilter):
|
110
|
+
tokens = set(NIMI_LINKU)
|
98
111
|
|
99
112
|
|
100
|
-
class NimiLinkuAle(
|
101
|
-
tokens =
|
113
|
+
class NimiLinkuAle(MemberFilter):
|
114
|
+
tokens = set(NIMI_LINKU_ALE)
|
102
115
|
|
103
116
|
|
104
|
-
class NimiLinkuSandbox(
|
105
|
-
tokens =
|
117
|
+
class NimiLinkuSandbox(MemberFilter):
|
118
|
+
tokens = set(NIMI_LINKU_SANDBOX)
|
106
119
|
|
107
120
|
|
108
121
|
class Phonotactic(RegexFilter):
|
@@ -135,13 +148,12 @@ class Syllabic(RegexFilter):
|
|
135
148
|
)
|
136
149
|
|
137
150
|
|
138
|
-
class Alphabetic(
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
return set(token.lower()).issubset(ALPHABET_SET)
|
151
|
+
class Alphabetic(SubsetFilter):
|
152
|
+
tokens = set(ALPHABET)
|
153
|
+
|
154
|
+
|
155
|
+
class AlphabeticRe(RegexFilter):
|
156
|
+
pattern = re.compile(rf"[{ALPHABET}]+", flags=re.IGNORECASE)
|
145
157
|
|
146
158
|
|
147
159
|
class Numeric(Filter):
|
@@ -160,18 +172,35 @@ class Numeric(Filter):
|
|
160
172
|
return msg.isnumeric()
|
161
173
|
|
162
174
|
|
163
|
-
class Punctuation(
|
164
|
-
|
175
|
+
class Punctuation(SubsetFilter):
|
176
|
+
"""Identify whether a token is entirely punctuation. Fastest implementation."""
|
177
|
+
|
178
|
+
tokens = set(POSIX_PUNCT + UNICODE_PUNCT)
|
179
|
+
|
180
|
+
|
181
|
+
class PunctuationRe(RegexFilter):
|
182
|
+
"""Faster implementation of `PunctuationRe1`.
|
183
|
+
Goes out of date compared to the `regex` library if UNICODE_PUNCT is not updated."""
|
184
|
+
|
185
|
+
pattern = re.compile(rf"[{ALL_PUNCT_RANGES}]+")
|
186
|
+
|
187
|
+
|
188
|
+
class PunctuationRe1(Regex1Filter):
|
189
|
+
"""Reference implementation for identifying tokens made entirely of punctuation."""
|
190
|
+
|
191
|
+
pattern = regex.compile(r"[\p{Punctuation}\p{posix_punct}]+")
|
165
192
|
|
166
193
|
|
167
194
|
__all__ = [
|
168
|
-
"
|
195
|
+
"Alphabetic",
|
169
196
|
"NimiLinku",
|
170
197
|
"NimiLinkuAle",
|
198
|
+
"NimiLinkuSandbox",
|
199
|
+
"NimiPu",
|
200
|
+
"NimiPuAle",
|
201
|
+
"Numeric",
|
171
202
|
"Phonotactic",
|
172
|
-
"Syllabic",
|
173
|
-
"Alphabetic",
|
174
203
|
"ProperName",
|
175
204
|
"Punctuation",
|
176
|
-
"
|
205
|
+
"Syllabic",
|
177
206
|
]
|
@@ -0,0 +1,139 @@
|
|
1
|
+
# STL
|
2
|
+
import re
|
3
|
+
from abc import ABC, abstractmethod
|
4
|
+
from typing import Set, List
|
5
|
+
|
6
|
+
# PDM
|
7
|
+
import regex
|
8
|
+
from typing_extensions import override
|
9
|
+
|
10
|
+
# LOCAL
|
11
|
+
from sonatoki.utils import regex_escape
|
12
|
+
from sonatoki.constants import (
|
13
|
+
POSIX_PUNCT,
|
14
|
+
UNICODE_PUNCT,
|
15
|
+
SENTENCE_PUNCT,
|
16
|
+
ALL_PUNCT_RANGES,
|
17
|
+
)
|
18
|
+
|
19
|
+
regex.DEFAULT_VERSION = regex.VERSION1
|
20
|
+
|
21
|
+
|
22
|
+
class Tokenizer(ABC):
|
23
|
+
@classmethod
|
24
|
+
@abstractmethod
|
25
|
+
def tokenize(cls, s: str) -> List[str]: ...
|
26
|
+
|
27
|
+
|
28
|
+
class SetTokenizer(Tokenizer):
|
29
|
+
delimiters: Set[str]
|
30
|
+
|
31
|
+
|
32
|
+
class RegexTokenizer(Tokenizer):
|
33
|
+
pattern: "re.Pattern[str]"
|
34
|
+
|
35
|
+
@classmethod
|
36
|
+
@override
|
37
|
+
def tokenize(cls, s: str) -> List[str]:
|
38
|
+
return [clean for word in re.split(cls.pattern, s) if (clean := word.strip())]
|
39
|
+
|
40
|
+
|
41
|
+
class Regex1Tokenizer(Tokenizer):
|
42
|
+
pattern: "regex.Pattern[str]"
|
43
|
+
|
44
|
+
@classmethod
|
45
|
+
@override
|
46
|
+
def tokenize(cls, s: str) -> List[str]:
|
47
|
+
return [
|
48
|
+
clean for word in regex.split(cls.pattern, s) if (clean := word.strip())
|
49
|
+
]
|
50
|
+
|
51
|
+
|
52
|
+
class WordTokenizer(SetTokenizer):
|
53
|
+
delimiters = set(POSIX_PUNCT + UNICODE_PUNCT)
|
54
|
+
|
55
|
+
@classmethod
|
56
|
+
@override
|
57
|
+
def tokenize(cls, s: str) -> List[str]:
|
58
|
+
if not s:
|
59
|
+
return []
|
60
|
+
|
61
|
+
tokens: List[str] = []
|
62
|
+
|
63
|
+
last_match = 0
|
64
|
+
last_membership = s[0] in cls.delimiters
|
65
|
+
for i, char in enumerate(s):
|
66
|
+
mem = char in cls.delimiters
|
67
|
+
if mem == last_membership:
|
68
|
+
continue
|
69
|
+
|
70
|
+
match = s[last_match:i].split()
|
71
|
+
# TODO: kinda sucks? what about unicode whitespace?
|
72
|
+
last_match = i
|
73
|
+
last_membership = mem
|
74
|
+
[tokens.append(t) for t in match if t]
|
75
|
+
|
76
|
+
match = s[last_match:].strip().split()
|
77
|
+
if match:
|
78
|
+
tokens.extend(match)
|
79
|
+
|
80
|
+
return tokens
|
81
|
+
|
82
|
+
|
83
|
+
class WordTokenizerRe(RegexTokenizer):
|
84
|
+
pattern = re.compile(rf"""([{ALL_PUNCT_RANGES}]+|\s+)""")
|
85
|
+
|
86
|
+
|
87
|
+
class WordTokenizerRe1(Regex1Tokenizer):
|
88
|
+
"""Reference implementation for WorkTokenizer."""
|
89
|
+
|
90
|
+
pattern = regex.compile(r"""([\p{posix_punct}\p{Punctuation}]+|\s+)""")
|
91
|
+
|
92
|
+
|
93
|
+
class SentTokenizer(SetTokenizer):
|
94
|
+
delimiters = set(SENTENCE_PUNCT + "\n") # regex does \n with a flag
|
95
|
+
|
96
|
+
@classmethod
|
97
|
+
@override
|
98
|
+
def tokenize(cls, s: str) -> List[str]:
|
99
|
+
if not s:
|
100
|
+
return []
|
101
|
+
|
102
|
+
tokens: List[str] = []
|
103
|
+
last_match = 0
|
104
|
+
for i, char in enumerate(s):
|
105
|
+
if char not in cls.delimiters:
|
106
|
+
continue
|
107
|
+
|
108
|
+
match = s[last_match : i + 1].strip()
|
109
|
+
last_match = i + 1 # newlines can strip but idc
|
110
|
+
if not match:
|
111
|
+
continue
|
112
|
+
tokens.append(match)
|
113
|
+
|
114
|
+
match = s[last_match:].strip()
|
115
|
+
if match:
|
116
|
+
tokens.append(match)
|
117
|
+
|
118
|
+
return tokens
|
119
|
+
|
120
|
+
|
121
|
+
class SentTokenizerRe(RegexTokenizer):
|
122
|
+
pattern = re.compile(
|
123
|
+
rf"""(?<=[{regex_escape(SENTENCE_PUNCT)}])|$""", flags=re.MULTILINE
|
124
|
+
)
|
125
|
+
# TODO: are <> or {} that common as *sentence* delims? [] are already a stretch
|
126
|
+
# TODO: do the typography characters matter?
|
127
|
+
# NOTE: | / and , are *not* sentence delimiters for my purpose
|
128
|
+
|
129
|
+
|
130
|
+
class SentTokenizerRe1(Regex1Tokenizer):
|
131
|
+
pattern = regex.compile(
|
132
|
+
rf"""(?<=[{regex_escape(SENTENCE_PUNCT)}]|$)""", flags=regex.MULTILINE
|
133
|
+
)
|
134
|
+
|
135
|
+
|
136
|
+
__all__ = [
|
137
|
+
"WordTokenizer",
|
138
|
+
"SentTokenizer",
|
139
|
+
]
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# STL
|
2
|
+
import json
|
3
|
+
from typing import Dict, List
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
# LOCAL
|
7
|
+
from sonatoki.utils import find_unicode_ranges
|
8
|
+
|
9
|
+
# `\p{Punctuation}` character class
|
10
|
+
UNICODE_PUNCT = r"""!"#%&'()*+,-./:;<=>?@[\]^_`{|}~¡¦§¨©«¬®¯°±´¶·¸»¿×÷˂˃˄˅˒˓˔˕˖˗˘˙˚˛˜˝˞˟˥˦˧˨˩˪˫˭˯˰˱˲˳˴˵˶˷˸˹˺˻˼˽˾˿͵;΄΅·϶҂՚՛՜՝՞՟։֊֍֎־׀׃׆׳״؆؇؈؉؊،؍؎؏؛؝؞؟٪٫٬٭۔۞۩۽۾܀܁܂܃܄܅܆܇܈܉܊܋܌܍߶߷߸߹࠰࠱࠲࠳࠴࠵࠶࠷࠸࠹࠺࠻࠼࠽࠾࡞࢈।॥॰৺৽੶૰୰௳௴௵௶௷௸௺౷౿಄൏൹෴๏๚๛༁༂༃༄༅༆༇༈༉༊་༌།༎༏༐༑༒༓༔༕༖༗༚༛༜༝༞༟༴༶༸༺༻༼༽྅྾྿࿀࿁࿂࿃࿄࿅࿇࿈࿉࿊࿋࿌࿎࿏࿐࿑࿒࿓࿔࿕࿖࿗࿘࿙࿚၊။၌၍၎၏႞႟჻፠፡።፣፤፥፦፧፨᎐᎑᎒᎓᎔᎕᎖᎗᎘᎙᐀᙭᙮᚛᚜᛫᛬᛭᜵᜶។៕៖៘៙៚᠀᠁᠂᠃᠄᠅᠆᠇᠈᠉᠊᥀᥄᥅᧞᧟᧠᧡᧢᧣᧤᧥᧦᧧᧨᧩᧪᧫᧬᧭᧮᧯᧰᧱᧲᧳᧴᧵᧶᧷᧸᧹᧺᧻᧼᧽᧾᧿᨞᨟᪠᪡᪢᪣᪤᪥᪦᪨᪩᪪᪫᪬᪭᭚᭛᭜᭝᭞᭟᭠᭡᭢᭣᭤᭥᭦᭧᭨᭩᭪᭴᭵᭶᭷᭸᭹᭺᭻᭼᭽᭾᯼᯽᯾᯿᰻᰼᰽᰾᰿᱾᱿᳀᳁᳂᳃᳄᳅᳆᳇᳓᾽᾿῀῁῍῎῏῝῞῟῭΅`´῾‐‑‒–—―‖‗‘’‚‛“”„‟†‡•‣․‥…‧‰‱′″‴‵‶‷‸‹›※‼‽‾‿⁀⁁⁂⁃⁄⁅⁆⁇⁈⁉⁊⁋⁌⁍⁎⁏⁐⁑⁒⁓⁔⁕⁖⁗⁘⁙⁚⁛⁜⁝⁞⁺⁻⁼⁽⁾₊₋₌₍₎℀℁℃℄℅℆℈℉℔№℗℘℞℟℠℡™℣℥℧℩℮℺℻⅀⅁⅂⅃⅄⅊⅋⅌⅍⅏↊↋←↑→↓↔↕↖↗↘↙↚↛↜↝↞↟↠↡↢↣↤↥↦↧↨↩↪↫↬↭↮↯↰↱↲↳↴↵↶↷↸↹↺↻↼↽↾↿⇀⇁⇂⇃⇄⇅⇆⇇⇈⇉⇊⇋⇌⇍⇎⇏⇐⇑⇒⇓⇔⇕⇖⇗⇘⇙⇚⇛⇜⇝⇞⇟⇠⇡⇢⇣⇤⇥⇦⇧⇨⇩⇪⇫⇬⇭⇮⇯⇰⇱⇲⇳⇴⇵⇶⇷⇸⇹⇺⇻⇼⇽⇾⇿∀∁∂∃∄∅∆∇∈∉∊∋∌∍∎∏∐∑−∓∔∕∖∗∘∙√∛∜∝∞∟∠∡∢∣∤∥∦∧∨∩∪∫∬∭∮∯∰∱∲∳∴∵∶∷∸∹∺∻∼∽∾∿≀≁≂≃≄≅≆≇≈≉≊≋≌≍≎≏≐≑≒≓≔≕≖≗≘≙≚≛≜≝≞≟≠≡≢≣≤≥≦≧≨≩≪≫≬≭≮≯≰≱≲≳≴≵≶≷≸≹≺≻≼≽≾≿⊀⊁⊂⊃⊄⊅⊆⊇⊈⊉⊊⊋⊌⊍⊎⊏⊐⊑⊒⊓⊔⊕⊖⊗⊘⊙⊚⊛⊜⊝⊞⊟⊠⊡⊢⊣⊤⊥⊦⊧⊨⊩⊪⊫⊬⊭⊮⊯⊰⊱⊲⊳⊴⊵⊶⊷⊸⊹⊺⊻⊼⊽⊾⊿⋀⋁⋂⋃⋄⋅⋆⋇⋈⋉⋊⋋⋌⋍⋎⋏⋐⋑⋒⋓⋔⋕⋖⋗⋘⋙⋚⋛⋜⋝⋞⋟⋠⋡⋢⋣⋤⋥⋦⋧⋨⋩⋪⋫⋬⋭⋮⋯⋰⋱⋲⋳⋴⋵⋶⋷⋸⋹⋺⋻⋼⋽⋾⋿⌀⌁⌂⌃⌄⌅⌆⌇⌈⌉⌊⌋⌌⌍⌎⌏⌐⌑⌒⌓⌔⌕⌖⌗⌘⌙⌚⌛⌜⌝⌞⌟⌠⌡⌢⌣⌤⌥⌦⌧⌨〈〉⌫⌬⌭⌮⌯⌰⌱⌲⌳⌴⌵⌶⌷⌸⌹⌺⌻⌼⌽⌾⌿⍀⍁⍂⍃⍄⍅⍆⍇⍈⍉⍊⍋⍌⍍⍎⍏⍐⍑⍒⍓⍔⍕⍖⍗⍘⍙⍚⍛⍜⍝⍞⍟⍠⍡⍢⍣⍤⍥⍦⍧⍨⍩⍪⍫⍬⍭⍮⍯⍰⍱⍲⍳⍴⍵⍶⍷⍸⍹⍺⍻⍼⍽⍾⍿⎀⎁⎂⎃⎄⎅⎆⎇⎈⎉⎊⎋⎌⎍⎎⎏⎐⎑⎒⎓⎔⎕⎖⎗⎘⎙⎚⎛⎜⎝⎞⎟⎠⎡⎢⎣⎤⎥⎦⎧⎨⎩⎪⎫⎬⎭⎮⎯⎰⎱⎲⎳⎴⎵⎶⎷⎸⎹⎺⎻⎼⎽⎾⎿⏀⏁⏂⏃⏄⏅⏆⏇⏈⏉⏊⏋⏌⏍⏎⏏⏐⏑⏒⏓⏔⏕⏖⏗⏘⏙⏚⏛⏜⏝⏞⏟⏠⏡⏢⏣⏤⏥⏦⏧⏨⏩⏪⏫⏬⏭⏮⏯⏰⏱⏲⏳⏴⏵⏶⏷⏸⏹⏺⏻⏼⏽⏾⏿␀␁␂␃␄␅␆␇␈␉␊␋␌␍␎␏␐␑␒␓␔␕␖␗␘␙␚␛␜␝␞␟␠␡␢␣␥␦⑀⑁⑂⑃⑄⑅⑆⑇⑈⑉⑊⒜⒝⒞⒟⒠⒡⒢⒣⒤⒥⒦⒧⒨⒩⒪⒫⒬⒭⒮⒯⒰⒱⒲⒳⒴⒵─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟■□▢▣▤▥▦▧▨▩▪▫▬▭▮▯▰▱▲△▴▵▶▷▸▹►▻▼▽▾▿◀◁◂◃◄◅◆◇◈◉◊○◌◍◎●◐◑◒◓◔◕◖◗◘◙◚◛◜◝◞◟◠◡◢◣◤◥◦◧◨◩◪◫◬◭◮◯◰◱◲◳◴◵◶◷◸◹◺◻◼◽◾◿☀☁☂☃☄★☆☇☈☉☊☋☌☍☎☏☐☑☒☓☔☕☖☗☘☙☚☛☜☝☞☟☠☡☢☣☤☥☦☧☨☩☪☫☬☭☮☯☰☱☲☳☴☵☶☷☸☹☺☻☼☽☾☿♀♁♂♃♄♅♆♇♈♉♊♋♌♍♎♏♐♑♒♓♔♕♖♗♘♙♚♛♜♝♞♟♠♡♢♣♤♥♦♧♨♩♪♫♬♭♮♯♰♱♲♳♴♵♶♷♸♹♺♻♼♽♾♿⚀⚁⚂⚃⚄⚅⚆⚇⚈⚉⚊⚋⚌⚍⚎⚏⚐⚑⚒⚓⚔⚕⚖⚗⚘⚙⚚⚛⚜⚝⚞⚟⚠⚡⚢⚣⚤⚥⚦⚧⚨⚩⚪⚫⚬⚭⚮⚯⚰⚱⚲⚳⚴⚵⚶⚷⚸⚹⚺⚻⚼⚽⚾⚿⛀⛁⛂⛃⛄⛅⛆⛇⛈⛉⛊⛋⛌⛍⛎⛏⛐⛑⛒⛓⛔⛕⛖⛗⛘⛙⛚⛛⛜⛝⛞⛟⛠⛡⛢⛣⛤⛥⛦⛧⛨⛩⛪⛫⛬⛭⛮⛯⛰⛱⛲⛳⛴⛵⛶⛷⛸⛹⛺⛻⛼⛽⛾⛿✀✁✂✃✄✅✆✇✈✉✊✋✌✍✎✏✐✑✒✓✔✕✖✗✘✙✚✛✜✝✞✟✠✡✢✣✤✥✦✧✨✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀❁❂❃❄❅❆❇❈❉❊❋❌❍❎❏❐❑❒❓❔❕❖❗❘❙❚❛❜❝❞❟❠❡❢❣❤❥❦❧❨❩❪❫❬❭❮❯❰❱❲❳❴❵➔➕➖➗➘➙➚➛➜➝➞➟➠➡➢➣➤➥➦➧➨➩➪➫➬➭➮➯➰➱➲➳➴➵➶➷➸➹➺➻➼➽➾➿⟀⟁⟂⟃⟄⟅⟆⟇⟈⟉⟊⟋⟌⟍⟎⟏⟐⟑⟒⟓⟔⟕⟖⟗⟘⟙⟚⟛⟜⟝⟞⟟⟠⟡⟢⟣⟤⟥⟦⟧⟨⟩⟪⟫⟬⟭⟮⟯⟰⟱⟲⟳⟴⟵⟶⟷⟸⟹⟺⟻⟼⟽⟾⟿⠀⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰⠱⠲⠳⠴⠵⠶⠷⠸⠹⠺⠻⠼⠽⠾⠿⡀⡁⡂⡃⡄⡅⡆⡇⡈⡉⡊⡋⡌⡍⡎⡏⡐⡑⡒⡓⡔⡕⡖⡗⡘⡙⡚⡛⡜⡝⡞⡟⡠⡡⡢⡣⡤⡥⡦⡧⡨⡩⡪⡫⡬⡭⡮⡯⡰⡱⡲⡳⡴⡵⡶⡷⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⢈⢉⢊⢋⢌⢍⢎⢏⢐⢑⢒⢓⢔⢕⢖⢗⢘⢙⢚⢛⢜⢝⢞⢟⢠⢡⢢⢣⢤⢥⢦⢧⢨⢩⢪⢫⢬⢭⢮⢯⢰⢱⢲⢳⢴⢵⢶⢷⢸⢹⢺⢻⢼⢽⢾⢿⣀⣁⣂⣃⣄⣅⣆⣇⣈⣉⣊⣋⣌⣍⣎⣏⣐⣑⣒⣓⣔⣕⣖⣗⣘⣙⣚⣛⣜⣝⣞⣟⣠⣡⣢⣣⣤⣥⣦⣧⣨⣩⣪⣫⣬⣭⣮⣯⣰⣱⣲⣳⣴⣵⣶⣷⣸⣹⣺⣻⣼⣽⣾⣿⤀⤁⤂⤃⤄⤅⤆⤇⤈⤉⤊⤋⤌⤍⤎⤏⤐⤑⤒⤓⤔⤕⤖⤗⤘⤙⤚⤛⤜⤝⤞⤟⤠⤡⤢⤣⤤⤥⤦⤧⤨⤩⤪⤫⤬⤭⤮⤯⤰⤱⤲⤳⤴⤵⤶⤷⤸⤹⤺⤻⤼⤽⤾⤿⥀⥁⥂⥃⥄⥅⥆⥇⥈⥉⥊⥋⥌⥍⥎⥏⥐⥑⥒⥓⥔⥕⥖⥗⥘⥙⥚⥛⥜⥝⥞⥟⥠⥡⥢⥣⥤⥥⥦⥧⥨⥩⥪⥫⥬⥭⥮⥯⥰⥱⥲⥳⥴⥵⥶⥷⥸⥹⥺⥻⥼⥽⥾⥿⦀⦁⦂⦃⦄⦅⦆⦇⦈⦉⦊⦋⦌⦍⦎⦏⦐⦑⦒⦓⦔⦕⦖⦗⦘⦙⦚⦛⦜⦝⦞⦟⦠⦡⦢⦣⦤⦥⦦⦧⦨⦩⦪⦫⦬⦭⦮⦯⦰⦱⦲⦳⦴⦵⦶⦷⦸⦹⦺⦻⦼⦽⦾⦿⧀⧁⧂⧃⧄⧅⧆⧇⧈⧉⧊⧋⧌⧍⧎⧏⧐⧑⧒⧓⧔⧕⧖⧗⧘⧙⧚⧛⧜⧝⧞⧟⧠⧡⧢⧣⧤⧥⧦⧧⧨⧩⧪⧫⧬⧭⧮⧯⧰⧱⧲⧳⧴⧵⧶⧷⧸⧹⧺⧻⧼⧽⧾⧿⨀⨁⨂⨃⨄⨅⨆⨇⨈⨉⨊⨋⨌⨍⨎⨏⨐⨑⨒⨓⨔⨕⨖⨗⨘⨙⨚⨛⨜⨝⨞⨟⨠⨡⨢⨣⨤⨥⨦⨧⨨⨩⨪⨫⨬⨭⨮⨯⨰⨱⨲⨳⨴⨵⨶⨷⨸⨹⨺⨻⨼⨽⨾⨿⩀⩁⩂⩃⩄⩅⩆⩇⩈⩉⩊⩋⩌⩍⩎⩏⩐⩑⩒⩓⩔⩕⩖⩗⩘⩙⩚⩛⩜⩝⩞⩟⩠⩡⩢⩣⩤⩥⩦⩧⩨⩩⩪⩫⩬⩭⩮⩯⩰⩱⩲⩳⩴⩵⩶⩷⩸⩹⩺⩻⩼⩽⩾⩿⪀⪁⪂⪃⪄⪅⪆⪇⪈⪉⪊⪋⪌⪍⪎⪏⪐⪑⪒⪓⪔⪕⪖⪗⪘⪙⪚⪛⪜⪝⪞⪟⪠⪡⪢⪣⪤⪥⪦⪧⪨⪩⪪⪫⪬⪭⪮⪯⪰⪱⪲⪳⪴⪵⪶⪷⪸⪹⪺⪻⪼⪽⪾⪿⫀⫁⫂⫃⫄⫅⫆⫇⫈⫉⫊⫋⫌⫍⫎⫏⫐⫑⫒⫓⫔⫕⫖⫗⫘⫙⫚⫛⫝̸⫝⫞⫟⫠⫡⫢⫣⫤⫥⫦⫧⫨⫩⫪⫫⫬⫭⫮⫯⫰⫱⫲⫳⫴⫵⫶⫷⫸⫹⫺⫻⫼⫽⫾⫿⬀⬁⬂⬃⬄⬅⬆⬇⬈⬉⬊⬋⬌⬍⬎⬏⬐⬑⬒⬓⬔⬕⬖⬗⬘⬙⬚⬛⬜⬝⬞⬟⬠⬡⬢⬣⬤⬥⬦⬧⬨⬩⬪⬫⬬⬭⬮⬯⬰⬱⬲⬳⬴⬵⬶⬷⬸⬹⬺⬻⬼⬽⬾⬿⭀⭁⭂⭃⭄⭅⭆⭇⭈⭉⭊⭋⭌⭍⭎⭏⭐⭑⭒⭓⭔⭕⭖⭗⭘⭙⭚⭛⭜⭝⭞⭟⭠⭡⭢⭣⭤⭥⭦⭧⭨⭩⭪⭫⭬⭭⭮⭯⭰⭱⭲⭳⭶⭷⭸⭹⭺⭻⭼⭽⭾⭿⮀⮁⮂⮃⮄⮅⮆⮇⮈⮉⮊⮋⮌⮍⮎⮏⮐⮑⮒⮓⮔⮕⮗⮘⮙⮚⮛⮜⮝⮞⮟⮠⮡⮢⮣⮤⮥⮦⮧⮨⮩⮪⮫⮬⮭⮮⮯⮰⮱⮲⮳⮴⮵⮶⮷⮸⮹⮺⮻⮼⮽⮾⮿⯀⯁⯂⯃⯄⯅⯆⯇⯈⯉⯊⯋⯌⯍⯎⯏⯐⯑⯒⯓⯔⯕⯖⯗⯘⯙⯚⯛⯜⯝⯞⯟⯠⯡⯢⯣⯤⯥⯦⯧⯨⯩⯪⯫⯬⯭⯮⯯⯰⯱⯲⯳⯴⯵⯶⯷⯸⯹⯺⯻⯼⯽⯾⯿⳥⳦⳧⳨⳩⳪⳹⳺⳻⳼⳾⳿⵰⸀⸁⸂⸃⸄⸅⸆⸇⸈⸉⸊⸋⸌⸍⸎⸏⸐⸑⸒⸓⸔⸕⸖⸗⸘⸙⸚⸛⸜⸝⸞⸟⸠⸡⸢⸣⸤⸥⸦⸧⸨⸩⸪⸫⸬⸭⸮⸰⸱⸲⸳⸴⸵⸶⸷⸸⸹⸺⸻⸼⸽⸾⸿⹀⹁⹂⹃⹄⹅⹆⹇⹈⹉⹊⹋⹌⹍⹎⹏⹐⹑⹒⹓⹔⹕⹖⹗⹘⹙⹚⹛⹜⹝⺀⺁⺂⺃⺄⺅⺆⺇⺈⺉⺊⺋⺌⺍⺎⺏⺐⺑⺒⺓⺔⺕⺖⺗⺘⺙⺛⺜⺝⺞⺟⺠⺡⺢⺣⺤⺥⺦⺧⺨⺩⺪⺫⺬⺭⺮⺯⺰⺱⺲⺳⺴⺵⺶⺷⺸⺹⺺⺻⺼⺽⺾⺿⻀⻁⻂⻃⻄⻅⻆⻇⻈⻉⻊⻋⻌⻍⻎⻏⻐⻑⻒⻓⻔⻕⻖⻗⻘⻙⻚⻛⻜⻝⻞⻟⻠⻡⻢⻣⻤⻥⻦⻧⻨⻩⻪⻫⻬⻭⻮⻯⻰⻱⻲⻳⼀⼁⼂⼃⼄⼅⼆⼇⼈⼉⼊⼋⼌⼍⼎⼏⼐⼑⼒⼓⼔⼕⼖⼗⼘⼙⼚⼛⼜⼝⼞⼟⼠⼡⼢⼣⼤⼥⼦⼧⼨⼩⼪⼫⼬⼭⼮⼯⼰⼱⼲⼳⼴⼵⼶⼷⼸⼹⼺⼻⼼⼽⼾⼿⽀⽁⽂⽃⽄⽅⽆⽇⽈⽉⽊⽋⽌⽍⽎⽏⽐⽑⽒⽓⽔⽕⽖⽗⽘⽙⽚⽛⽜⽝⽞⽟⽠⽡⽢⽣⽤⽥⽦⽧⽨⽩⽪⽫⽬⽭⽮⽯⽰⽱⽲⽳⽴⽵⽶⽷⽸⽹⽺⽻⽼⽽⽾⽿⾀⾁⾂⾃⾄⾅⾆⾇⾈⾉⾊⾋⾌⾍⾎⾏⾐⾑⾒⾓⾔⾕⾖⾗⾘⾙⾚⾛⾜⾝⾞⾟⾠⾡⾢⾣⾤⾥⾦⾧⾨⾩⾪⾫⾬⾭⾮⾯⾰⾱⾲⾳⾴⾵⾶⾷⾸⾹⾺⾻⾼⾽⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕⿰⿱⿲⿳⿴⿵⿶⿷⿸⿹⿺⿻、。〃〄〈〉《》「」『』【】〒〓〔〕〖〗〘〙〚〛〜〝〞〟〠〰〶〷〽〾〿゛゜゠・㆐㆑㆖㆗㆘㆙㆚㆛㆜㆝㆞㆟㇀㇁㇂㇃㇄㇅㇆㇇㇈㇉㇊㇋㇌㇍㇎㇏㇐㇑㇒㇓㇔㇕㇖㇗㇘㇙㇚㇛㇜㇝㇞㇟㇠㇡㇢㇣㈀㈁㈂㈃㈄㈅㈆㈇㈈㈉㈊㈋㈌㈍㈎㈏㈐㈑㈒㈓㈔㈕㈖㈗㈘㈙㈚㈛㈜㈝㈞㈪㈫㈬㈭㈮㈯㈰㈱㈲㈳㈴㈵㈶㈷㈸㈹㈺㈻㈼㈽㈾㈿㉀㉁㉂㉃㉄㉅㉆㉇㉐㉠㉡㉢㉣㉤㉥㉦㉧㉨㉩㉪㉫㉬㉭㉮㉯㉰㉱㉲㉳㉴㉵㉶㉷㉸㉹㉺㉻㉼㉽㉾㉿㊊㊋㊌㊍㊎㊏㊐㊑㊒㊓㊔㊕㊖㊗㊘㊙㊚㊛㊜㊝㊞㊟㊠㊡㊢㊣㊤㊥㊦㊧㊨㊩㊪㊫㊬㊭㊮㊯㊰㋀㋁㋂㋃㋄㋅㋆㋇㋈㋉㋊㋋㋌㋍㋎㋏㋐㋑㋒㋓㋔㋕㋖㋗㋘㋙㋚㋛㋜㋝㋞㋟㋠㋡㋢㋣㋤㋥㋦㋧㋨㋩㋪㋫㋬㋭㋮㋯㋰㋱㋲㋳㋴㋵㋶㋷㋸㋹㋺㋻㋼㋽㋾㋿㌀㌁㌂㌃㌄㌅㌆㌇㌈㌉㌊㌋㌌㌍㌎㌏㌐㌑㌒㌓㌔㌕㌖㌗㌘㌙㌚㌛㌜㌝㌞㌟㌠㌡㌢㌣㌤㌥㌦㌧㌨㌩㌪㌫㌬㌭㌮㌯㌰㌱㌲㌳㌴㌵㌶㌷㌸㌹㌺㌻㌼㌽㌾㌿㍀㍁㍂㍃㍄㍅㍆㍇㍈㍉㍊㍋㍌㍍㍎㍏㍐㍑㍒㍓㍔㍕㍖㍗㍘㍙㍚㍛㍜㍝㍞㍟㍠㍡㍢㍣㍤㍥㍦㍧㍨㍩㍪㍫㍬㍭㍮㍯㍰㍱㍲㍳㍴㍵㍶㍷㍸㍹㍺㍻㍼㍽㍾㍿㎀㎁㎂㎃㎄㎅㎆㎇㎈㎉㎊㎋㎌㎍㎎㎏㎐㎑㎒㎓㎔㎕㎖㎗㎘㎙㎚㎛㎜㎝㎞㎟㎠㎡㎢㎣㎤㎥㎦㎧㎨㎩㎪㎫㎬㎭㎮㎯㎰㎱㎲㎳㎴㎵㎶㎷㎸㎹㎺㎻㎼㎽㎾㎿㏀㏁㏂㏃㏄㏅㏆㏇㏈㏉㏊㏋㏌㏍㏎㏏㏐㏑㏒㏓㏔㏕㏖㏗㏘㏙㏚㏛㏜㏝㏞㏟㏠㏡㏢㏣㏤㏥㏦㏧㏨㏩㏪㏫㏬㏭㏮㏯㏰㏱㏲㏳㏴㏵㏶㏷㏸㏹㏺㏻㏼㏽㏾㏿䷀䷁䷂䷃䷄䷅䷆䷇䷈䷉䷊䷋䷌䷍䷎䷏䷐䷑䷒䷓䷔䷕䷖䷗䷘䷙䷚䷛䷜䷝䷞䷟䷠䷡䷢䷣䷤䷥䷦䷧䷨䷩䷪䷫䷬䷭䷮䷯䷰䷱䷲䷳䷴䷵䷶䷷䷸䷹䷺䷻䷼䷽䷾䷿꒐꒑꒒꒓꒔꒕꒖꒗꒘꒙꒚꒛꒜꒝꒞꒟꒠꒡꒢꒣꒤꒥꒦꒧꒨꒩꒪꒫꒬꒭꒮꒯꒰꒱꒲꒳꒴꒵꒶꒷꒸꒹꒺꒻꒼꒽꒾꒿꓀꓁꓂꓃꓄꓅꓆꓾꓿꘍꘎꘏꙳꙾꛲꛳꛴꛵꛶꛷꜀꜁꜂꜃꜄꜅꜆꜇꜈꜉꜊꜋꜌꜍꜎꜏꜐꜑꜒꜓꜔꜕꜖꜠꜡꞉꞊꠨꠩꠪꠫꠶꠷꠹꡴꡵꡶꡷꣎꣏꣸꣹꣺꣼꤮꤯꥟꧁꧂꧃꧄꧅꧆꧇꧈꧉꧊꧋꧌꧍꧞꧟꩜꩝꩞꩟꩷꩸꩹꫞꫟꫰꫱꭛꭪꭫꯫﬩﮲﮳﮴﮵﮶﮷﮸﮹﮺﮻﮼﮽﮾﮿﯀﯁﯂﴾﴿﵀﵁﵂﵃﵄﵅﵆﵇﵈﵉﵊﵋﵌﵍﵎﵏﷏﷽﷾﷿︐︑︒︓︔︕︖︗︘︙︰︱︲︳︴︵︶︷︸︹︺︻︼︽︾︿﹀﹁﹂﹃﹄﹅﹆﹇﹈﹉﹊﹋﹌﹍﹎﹏﹐﹑﹒﹔﹕﹖﹗﹘﹙﹚﹛﹜﹝﹞﹟﹠﹡﹢﹣﹤﹥﹦﹨﹪﹫!"#%&'()*+,-./:;<=>?@[\]^_`{|}~⦅⦆。「」、・¬ ̄¦│←↑→↓■○�𐄀𐄁𐄂𐄷𐄸𐄹𐄺𐄻𐄼𐄽𐄾𐄿𐅹𐅺𐅻𐅼𐅽𐅾𐅿𐆀𐆁𐆂𐆃𐆄𐆅𐆆𐆇𐆈𐆉𐆌𐆍𐆎𐆐𐆑𐆒𐆓𐆔𐆕𐆖𐆗𐆘𐆙𐆚𐆛𐆜𐆠𐇐𐇑𐇒𐇓𐇔𐇕𐇖𐇗𐇘𐇙𐇚𐇛𐇜𐇝𐇞𐇟𐇠𐇡𐇢𐇣𐇤𐇥𐇦𐇧𐇨𐇩𐇪𐇫𐇬𐇭𐇮𐇯𐇰𐇱𐇲𐇳𐇴𐇵𐇶𐇷𐇸𐇹𐇺𐇻𐇼𐎟𐏐𐕯𐡗𐡷𐡸𐤟𐤿𐩐𐩑𐩒𐩓𐩔𐩕𐩖𐩗𐩘𐩿𐫈𐫰𐫱𐫲𐫳𐫴𐫵𐫶𐬹𐬺𐬻𐬼𐬽𐬾𐬿𐮙𐮚𐮛𐮜𐺭𐽕𐽖𐽗𐽘𐽙𐾆𐾇𐾈𐾉𑁇𑁈𑁉𑁊𑁋𑁌𑁍𑂻𑂼𑂾𑂿𑃀𑃁𑅀𑅁𑅂𑅃𑅴𑅵𑇅𑇆𑇇𑇈𑇍𑇛𑇝𑇞𑇟𑈸𑈹𑈺𑈻𑈼𑈽𑊩𑑋𑑌𑑍𑑎𑑏𑑚𑑛𑑝𑓆𑗁𑗂𑗃𑗄𑗅𑗆𑗇𑗈𑗉𑗊𑗋𑗌𑗍𑗎𑗏𑗐𑗑𑗒𑗓𑗔𑗕𑗖𑗗𑙁𑙂𑙃𑙠𑙡𑙢𑙣𑙤𑙥𑙦𑙧𑙨𑙩𑙪𑙫𑙬𑚹𑜼𑜽𑜾𑜿𑠻𑥄𑥅𑥆𑧢𑨿𑩀𑩁𑩂𑩃𑩄𑩅𑩆𑪚𑪛𑪜𑪞𑪟𑪠𑪡𑪢𑬀𑬁𑬂𑬃𑬄𑬅𑬆𑬇𑬈𑬉𑱁𑱂𑱃𑱄𑱅𑱰𑱱𑻷𑻸𑽃𑽄𑽅𑽆𑽇𑽈𑽉𑽊𑽋𑽌𑽍𑽎𑽏𑿕𑿖𑿗𑿘𑿙𑿚𑿛𑿜𑿡𑿢𑿣𑿤𑿥𑿦𑿧𑿨𑿩𑿪𑿫𑿬𑿭𑿮𑿯𑿰𑿱𑿿𒑰𒑱𒑲𒑳𒑴𒿱𒿲𖩮𖩯𖫵𖬷𖬸𖬹𖬺𖬻𖬼𖬽𖬾𖬿𖭄𖭅𖺗𖺘𖺙𖺚𖿢𛲜𛲟𜽐𜽑𜽒𜽓𜽔𜽕𜽖𜽗𜽘𜽙𜽚𜽛𜽜𜽝𜽞𜽟𜽠𜽡𜽢𜽣𜽤𜽥𜽦𜽧𜽨𜽩𜽪𜽫𜽬𜽭𜽮𜽯𜽰𜽱𜽲𜽳𜽴𜽵𜽶𜽷𜽸𜽹𜽺𜽻𜽼𜽽𜽾𜽿𜾀𜾁𜾂𜾃𜾄𜾅𜾆𜾇𜾈𜾉𜾊𜾋𜾌𜾍𜾎𜾏𜾐𜾑𜾒𜾓𜾔𜾕𜾖𜾗𜾘𜾙𜾚𜾛𜾜𜾝𜾞𜾟𜾠𜾡𜾢𜾣𜾤𜾥𜾦𜾧𜾨𜾩𜾪𜾫𜾬𜾭𜾮𜾯𜾰𜾱𜾲𜾳𜾴𜾵𜾶𜾷𜾸𜾹𜾺𜾻𜾼𜾽𜾾𜾿𜿀𜿁𜿂𜿃𝀀𝀁𝀂𝀃𝀄𝀅𝀆𝀇𝀈𝀉𝀊𝀋𝀌𝀍𝀎𝀏𝀐𝀑𝀒𝀓𝀔𝀕𝀖𝀗𝀘𝀙𝀚𝀛𝀜𝀝𝀞𝀟𝀠𝀡𝀢𝀣𝀤𝀥𝀦𝀧𝀨𝀩𝀪𝀫𝀬𝀭𝀮𝀯𝀰𝀱𝀲𝀳𝀴𝀵𝀶𝀷𝀸𝀹𝀺𝀻𝀼𝀽𝀾𝀿𝁀𝁁𝁂𝁃𝁄𝁅𝁆𝁇𝁈𝁉𝁊𝁋𝁌𝁍𝁎𝁏𝁐𝁑𝁒𝁓𝁔𝁕𝁖𝁗𝁘𝁙𝁚𝁛𝁜𝁝𝁞𝁟𝁠𝁡𝁢𝁣𝁤𝁥𝁦𝁧𝁨𝁩𝁪𝁫𝁬𝁭𝁮𝁯𝁰𝁱𝁲𝁳𝁴𝁵𝁶𝁷𝁸𝁹𝁺𝁻𝁼𝁽𝁾𝁿𝂀𝂁𝂂𝂃𝂄𝂅𝂆𝂇𝂈𝂉𝂊𝂋𝂌𝂍𝂎𝂏𝂐𝂑𝂒𝂓𝂔𝂕𝂖𝂗𝂘𝂙𝂚𝂛𝂜𝂝𝂞𝂟𝂠𝂡𝂢𝂣𝂤𝂥𝂦𝂧𝂨𝂩𝂪𝂫𝂬𝂭𝂮𝂯𝂰𝂱𝂲𝂳𝂴𝂵𝂶𝂷𝂸𝂹𝂺𝂻𝂼𝂽𝂾𝂿𝃀𝃁𝃂𝃃𝃄𝃅𝃆𝃇𝃈𝃉𝃊𝃋𝃌𝃍𝃎𝃏𝃐𝃑𝃒𝃓𝃔𝃕𝃖𝃗𝃘𝃙𝃚𝃛𝃜𝃝𝃞𝃟𝃠𝃡𝃢𝃣𝃤𝃥𝃦𝃧𝃨𝃩𝃪𝃫𝃬𝃭𝃮𝃯𝃰𝃱𝃲𝃳𝃴𝃵𝄀𝄁𝄂𝄃𝄄𝄅𝄆𝄇𝄈𝄉𝄊𝄋𝄌𝄍𝄎𝄏𝄐𝄑𝄒𝄓𝄔𝄕𝄖𝄗𝄘𝄙𝄚𝄛𝄜𝄝𝄞𝄟𝄠𝄡𝄢𝄣𝄤𝄥𝄦𝄩𝄪𝄫𝄬𝄭𝄮𝄯𝄰𝄱𝄲𝄳𝄴𝄵𝄶𝄷𝄸𝄹𝄺𝄻𝄼𝄽𝄾𝄿𝅀𝅁𝅂𝅃𝅄𝅅𝅆𝅇𝅈𝅉𝅊𝅋𝅌𝅍𝅎𝅏𝅐𝅑𝅒𝅓𝅔𝅕𝅖𝅗𝅘𝅙𝅚𝅛𝅜𝅝𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱𝅘𝅥𝅲𝅪𝅫𝅬𝆃𝆄𝆌𝆍𝆎𝆏𝆐𝆑𝆒𝆓𝆔𝆕𝆖𝆗𝆘𝆙𝆚𝆛𝆜𝆝𝆞𝆟𝆠𝆡𝆢𝆣𝆤𝆥𝆦𝆧𝆨𝆩𝆮𝆯𝆰𝆱𝆲𝆳𝆴𝆵𝆶𝆷𝆸𝆹𝆺𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯𝇁𝇂𝇃𝇄𝇅𝇆𝇇𝇈𝇉𝇊𝇋𝇌𝇍𝇎𝇏𝇐𝇑𝇒𝇓𝇔𝇕𝇖𝇗𝇘𝇙𝇚𝇛𝇜𝇝𝇞𝇟𝇠𝇡𝇢𝇣𝇤𝇥𝇦𝇧𝇨𝇩𝇪𝈀𝈁𝈂𝈃𝈄𝈅𝈆𝈇𝈈𝈉𝈊𝈋𝈌𝈍𝈎𝈏𝈐𝈑𝈒𝈓𝈔𝈕𝈖𝈗𝈘𝈙𝈚𝈛𝈜𝈝𝈞𝈟𝈠𝈡𝈢𝈣𝈤𝈥𝈦𝈧𝈨𝈩𝈪𝈫𝈬𝈭𝈮𝈯𝈰𝈱𝈲𝈳𝈴𝈵𝈶𝈷𝈸𝈹𝈺𝈻𝈼𝈽𝈾𝈿𝉀𝉁𝉅𝌀𝌁𝌂𝌃𝌄𝌅𝌆𝌇𝌈𝌉𝌊𝌋𝌌𝌍𝌎𝌏𝌐𝌑𝌒𝌓𝌔𝌕𝌖𝌗𝌘𝌙𝌚𝌛𝌜𝌝𝌞𝌟𝌠𝌡𝌢𝌣𝌤𝌥𝌦𝌧𝌨𝌩𝌪𝌫𝌬𝌭𝌮𝌯𝌰𝌱𝌲𝌳𝌴𝌵𝌶𝌷𝌸𝌹𝌺𝌻𝌼𝌽𝌾𝌿𝍀𝍁𝍂𝍃𝍄𝍅𝍆𝍇𝍈𝍉𝍊𝍋𝍌𝍍𝍎𝍏𝍐𝍑𝍒𝍓𝍔𝍕𝍖𝛁𝛛𝛻𝜕𝜵𝝏𝝯𝞉𝞩𝟃𝠀𝠁𝠂𝠃𝠄𝠅𝠆𝠇𝠈𝠉𝠊𝠋𝠌𝠍𝠎𝠏𝠐𝠑𝠒𝠓𝠔𝠕𝠖𝠗𝠘𝠙𝠚𝠛𝠜𝠝𝠞𝠟𝠠𝠡𝠢𝠣𝠤𝠥𝠦𝠧𝠨𝠩𝠪𝠫𝠬𝠭𝠮𝠯𝠰𝠱𝠲𝠳𝠴𝠵𝠶𝠷𝠸𝠹𝠺𝠻𝠼𝠽𝠾𝠿𝡀𝡁𝡂𝡃𝡄𝡅𝡆𝡇𝡈𝡉𝡊𝡋𝡌𝡍𝡎𝡏𝡐𝡑𝡒𝡓𝡔𝡕𝡖𝡗𝡘𝡙𝡚𝡛𝡜𝡝𝡞𝡟𝡠𝡡𝡢𝡣𝡤𝡥𝡦𝡧𝡨𝡩𝡪𝡫𝡬𝡭𝡮𝡯𝡰𝡱𝡲𝡳𝡴𝡵𝡶𝡷𝡸𝡹𝡺𝡻𝡼𝡽𝡾𝡿𝢀𝢁𝢂𝢃𝢄𝢅𝢆𝢇𝢈𝢉𝢊𝢋𝢌𝢍𝢎𝢏𝢐𝢑𝢒𝢓𝢔𝢕𝢖𝢗𝢘𝢙𝢚𝢛𝢜𝢝𝢞𝢟𝢠𝢡𝢢𝢣𝢤𝢥𝢦𝢧𝢨𝢩𝢪𝢫𝢬𝢭𝢮𝢯𝢰𝢱𝢲𝢳𝢴𝢵𝢶𝢷𝢸𝢹𝢺𝢻𝢼𝢽𝢾𝢿𝣀𝣁𝣂𝣃𝣄𝣅𝣆𝣇𝣈𝣉𝣊𝣋𝣌𝣍𝣎𝣏𝣐𝣑𝣒𝣓𝣔𝣕𝣖𝣗𝣘𝣙𝣚𝣛𝣜𝣝𝣞𝣟𝣠𝣡𝣢𝣣𝣤𝣥𝣦𝣧𝣨𝣩𝣪𝣫𝣬𝣭𝣮𝣯𝣰𝣱𝣲𝣳𝣴𝣵𝣶𝣷𝣸𝣹𝣺𝣻𝣼𝣽𝣾𝣿𝤀𝤁𝤂𝤃𝤄𝤅𝤆𝤇𝤈𝤉𝤊𝤋𝤌𝤍𝤎𝤏𝤐𝤑𝤒𝤓𝤔𝤕𝤖𝤗𝤘𝤙𝤚𝤛𝤜𝤝𝤞𝤟𝤠𝤡𝤢𝤣𝤤𝤥𝤦𝤧𝤨𝤩𝤪𝤫𝤬𝤭𝤮𝤯𝤰𝤱𝤲𝤳𝤴𝤵𝤶𝤷𝤸𝤹𝤺𝤻𝤼𝤽𝤾𝤿𝥀𝥁𝥂𝥃𝥄𝥅𝥆𝥇𝥈𝥉𝥊𝥋𝥌𝥍𝥎𝥏𝥐𝥑𝥒𝥓𝥔𝥕𝥖𝥗𝥘𝥙𝥚𝥛𝥜𝥝𝥞𝥟𝥠𝥡𝥢𝥣𝥤𝥥𝥦𝥧𝥨𝥩𝥪𝥫𝥬𝥭𝥮𝥯𝥰𝥱𝥲𝥳𝥴𝥵𝥶𝥷𝥸𝥹𝥺𝥻𝥼𝥽𝥾𝥿𝦀𝦁𝦂𝦃𝦄𝦅𝦆𝦇𝦈𝦉𝦊𝦋𝦌𝦍𝦎𝦏𝦐𝦑𝦒𝦓𝦔𝦕𝦖𝦗𝦘𝦙𝦚𝦛𝦜𝦝𝦞𝦟𝦠𝦡𝦢𝦣𝦤𝦥𝦦𝦧𝦨𝦩𝦪𝦫𝦬𝦭𝦮𝦯𝦰𝦱𝦲𝦳𝦴𝦵𝦶𝦷𝦸𝦹𝦺𝦻𝦼𝦽𝦾𝦿𝧀𝧁𝧂𝧃𝧄𝧅𝧆𝧇𝧈𝧉𝧊𝧋𝧌𝧍𝧎𝧏𝧐𝧑𝧒𝧓𝧔𝧕𝧖𝧗𝧘𝧙𝧚𝧛𝧜𝧝𝧞𝧟𝧠𝧡𝧢𝧣𝧤𝧥𝧦𝧧𝧨𝧩𝧪𝧫𝧬𝧭𝧮𝧯𝧰𝧱𝧲𝧳𝧴𝧵𝧶𝧷𝧸𝧹𝧺𝧻𝧼𝧽𝧾𝧿𝨷𝨸𝨹𝨺𝩭𝩮𝩯𝩰𝩱𝩲𝩳𝩴𝩶𝩷𝩸𝩹𝩺𝩻𝩼𝩽𝩾𝩿𝪀𝪁𝪂𝪃𝪅𝪆𝪇𝪈𝪉𝪊𝪋𞅏𞥞𞥟𞲬𞴮𞻰𞻱🀀🀁🀂🀃🀄🀅🀆🀇🀈🀉🀊🀋🀌🀍🀎🀏🀐🀑🀒🀓🀔🀕🀖🀗🀘🀙🀚🀛🀜🀝🀞🀟🀠🀡🀢🀣🀤🀥🀦🀧🀨🀩🀪🀫🀰🀱🀲🀳🀴🀵🀶🀷🀸🀹🀺🀻🀼🀽🀾🀿🁀🁁🁂🁃🁄🁅🁆🁇🁈🁉🁊🁋🁌🁍🁎🁏🁐🁑🁒🁓🁔🁕🁖🁗🁘🁙🁚🁛🁜🁝🁞🁟🁠🁡🁢🁣🁤🁥🁦🁧🁨🁩🁪🁫🁬🁭🁮🁯🁰🁱🁲🁳🁴🁵🁶🁷🁸🁹🁺🁻🁼🁽🁾🁿🂀🂁🂂🂃🂄🂅🂆🂇🂈🂉🂊🂋🂌🂍🂎🂏🂐🂑🂒🂓🂠🂡🂢🂣🂤🂥🂦🂧🂨🂩🂪🂫🂬🂭🂮🂱🂲🂳🂴🂵🂶🂷🂸🂹🂺🂻🂼🂽🂾🂿🃁🃂🃃🃄🃅🃆🃇🃈🃉🃊🃋🃌🃍🃎🃏🃑🃒🃓🃔🃕🃖🃗🃘🃙🃚🃛🃜🃝🃞🃟🃠🃡🃢🃣🃤🃥🃦🃧🃨🃩🃪🃫🃬🃭🃮🃯🃰🃱🃲🃳🃴🃵🄍🄎🄏🄐🄑🄒🄓🄔🄕🄖🄗🄘🄙🄚🄛🄜🄝🄞🄟🄠🄡🄢🄣🄤🄥🄦🄧🄨🄩🄪🄫🄬🄭🄮🄯🅊🅋🅌🅍🅎🅏🅪🅫🅬🅭🅮🅯🆊🆋🆌🆍🆎🆏🆐🆑🆒🆓🆔🆕🆖🆗🆘🆙🆚🆛🆜🆝🆞🆟🆠🆡🆢🆣🆤🆥🆦🆧🆨🆩🆪🆫🆬🆭🇦🇧🇨🇩🇪🇫🇬🇭🇮🇯🇰🇱🇲🇳🇴🇵🇶🇷🇸🇹🇺🇻🇼🇽🇾🇿🈀🈁🈂🈐🈑🈒🈓🈔🈕🈖🈗🈘🈙🈚🈛🈜🈝🈞🈟🈠🈡🈢🈣🈤🈥🈦🈧🈨🈩🈪🈫🈬🈭🈮🈯🈰🈱🈲🈳🈴🈵🈶🈷🈸🈹🈺🈻🉀🉁🉂🉃🉄🉅🉆🉇🉈🉐🉑🉠🉡🉢🉣🉤🉥🌀🌁🌂🌃🌄🌅🌆🌇🌈🌉🌊🌋🌌🌍🌎🌏🌐🌑🌒🌓🌔🌕🌖🌗🌘🌙🌚🌛🌜🌝🌞🌟🌠🌡🌢🌣🌤🌥🌦🌧🌨🌩🌪🌫🌬🌭🌮🌯🌰🌱🌲🌳🌴🌵🌶🌷🌸🌹🌺🌻🌼🌽🌾🌿🍀🍁🍂🍃🍄🍅🍆🍇🍈🍉🍊🍋🍌🍍🍎🍏🍐🍑🍒🍓🍔🍕🍖🍗🍘🍙🍚🍛🍜🍝🍞🍟🍠🍡🍢🍣🍤🍥🍦🍧🍨🍩🍪🍫🍬🍭🍮🍯🍰🍱🍲🍳🍴🍵🍶🍷🍸🍹🍺🍻🍼🍽🍾🍿🎀🎁🎂🎃🎄🎅🎆🎇🎈🎉🎊🎋🎌🎍🎎🎏🎐🎑🎒🎓🎔🎕🎖🎗🎘🎙🎚🎛🎜🎝🎞🎟🎠🎡🎢🎣🎤🎥🎦🎧🎨🎩🎪🎫🎬🎭🎮🎯🎰🎱🎲🎳🎴🎵🎶🎷🎸🎹🎺🎻🎼🎽🎾🎿🏀🏁🏂🏃🏄🏅🏆🏇🏈🏉🏊🏋🏌🏍🏎🏏🏐🏑🏒🏓🏔🏕🏖🏗🏘🏙🏚🏛🏜🏝🏞🏟🏠🏡🏢🏣🏤🏥🏦🏧🏨🏩🏪🏫🏬🏭🏮🏯🏰🏱🏲🏳🏴🏵🏶🏷🏸🏹🏺🏻🏼🏽🏾🏿🐀🐁🐂🐃🐄🐅🐆🐇🐈🐉🐊🐋🐌🐍🐎🐏🐐🐑🐒🐓🐔🐕🐖🐗🐘🐙🐚🐛🐜🐝🐞🐟🐠🐡🐢🐣🐤🐥🐦🐧🐨🐩🐪🐫🐬🐭🐮🐯🐰🐱🐲🐳🐴🐵🐶🐷🐸🐹🐺🐻🐼🐽🐾🐿👀👁👂👃👄👅👆👇👈👉👊👋👌👍👎👏👐👑👒👓👔👕👖👗👘👙👚👛👜👝👞👟👠👡👢👣👤👥👦👧👨👩👪👫👬👭👮👯👰👱👲👳👴👵👶👷👸👹👺👻👼👽👾👿💀💁💂💃💄💅💆💇💈💉💊💋💌💍💎💏💐💑💒💓💔💕💖💗💘💙💚💛💜💝💞💟💠💡💢💣💤💥💦💧💨💩💪💫💬💭💮💯💰💱💲💳💴💵💶💷💸💹💺💻💼💽💾💿📀📁📂📃📄📅📆📇📈📉📊📋📌📍📎📏📐📑📒📓📔📕📖📗📘📙📚📛📜📝📞📟📠📡📢📣📤📥📦📧📨📩📪📫📬📭📮📯📰📱📲📳📴📵📶📷📸📹📺📻📼📽📾📿🔀🔁🔂🔃🔄🔅🔆🔇🔈🔉🔊🔋🔌🔍🔎🔏🔐🔑🔒🔓🔔🔕🔖🔗🔘🔙🔚🔛🔜🔝🔞🔟🔠🔡🔢🔣🔤🔥🔦🔧🔨🔩🔪🔫🔬🔭🔮🔯🔰🔱🔲🔳🔴🔵🔶🔷🔸🔹🔺🔻🔼🔽🔾🔿🕀🕁🕂🕃🕄🕅🕆🕇🕈🕉🕊🕋🕌🕍🕎🕏🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚🕛🕜🕝🕞🕟🕠🕡🕢🕣🕤🕥🕦🕧🕨🕩🕪🕫🕬🕭🕮🕯🕰🕱🕲🕳🕴🕵🕶🕷🕸🕹🕺🕻🕼🕽🕾🕿🖀🖁🖂🖃🖄🖅🖆🖇🖈🖉🖊🖋🖌🖍🖎🖏🖐🖑🖒🖓🖔🖕🖖🖗🖘🖙🖚🖛🖜🖝🖞🖟🖠🖡🖢🖣🖤🖥🖦🖧🖨🖩🖪🖫🖬🖭🖮🖯🖰🖱🖲🖳🖴🖵🖶🖷🖸🖹🖺🖻🖼🖽🖾🖿🗀🗁🗂🗃🗄🗅🗆🗇🗈🗉🗊🗋🗌🗍🗎🗏🗐🗑🗒🗓🗔🗕🗖🗗🗘🗙🗚🗛🗜🗝🗞🗟🗠🗡🗢🗣🗤🗥🗦🗧🗨🗩🗪🗫🗬🗭🗮🗯🗰🗱🗲🗳🗴🗵🗶🗷🗸🗹🗺🗻🗼🗽🗾🗿😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿🙀🙁🙂🙃🙄🙅🙆🙇🙈🙉🙊🙋🙌🙍🙎🙏🙐🙑🙒🙓🙔🙕🙖🙗🙘🙙🙚🙛🙜🙝🙞🙟🙠🙡🙢🙣🙤🙥🙦🙧🙨🙩🙪🙫🙬🙭🙮🙯🙰🙱🙲🙳🙴🙵🙶🙷🙸🙹🙺🙻🙼🙽🙾🙿🚀🚁🚂🚃🚄🚅🚆🚇🚈🚉🚊🚋🚌🚍🚎🚏🚐🚑🚒🚓🚔🚕🚖🚗🚘🚙🚚🚛🚜🚝🚞🚟🚠🚡🚢🚣🚤🚥🚦🚧🚨🚩🚪🚫🚬🚭🚮🚯🚰🚱🚲🚳🚴🚵🚶🚷🚸🚹🚺🚻🚼🚽🚾🚿🛀🛁🛂🛃🛄🛅🛆🛇🛈🛉🛊🛋🛌🛍🛎🛏🛐🛑🛒🛓🛔🛕🛖🛗🛜🛝🛞🛟🛠🛡🛢🛣🛤🛥🛦🛧🛨🛩🛪🛫🛬🛰🛱🛲🛳🛴🛵🛶🛷🛸🛹🛺🛻🛼🜀🜁🜂🜃🜄🜅🜆🜇🜈🜉🜊🜋🜌🜍🜎🜏🜐🜑🜒🜓🜔🜕🜖🜗🜘🜙🜚🜛🜜🜝🜞🜟🜠🜡🜢🜣🜤🜥🜦🜧🜨🜩🜪🜫🜬🜭🜮🜯🜰🜱🜲🜳🜴🜵🜶🜷🜸🜹🜺🜻🜼🜽🜾🜿🝀🝁🝂🝃🝄🝅🝆🝇🝈🝉🝊🝋🝌🝍🝎🝏🝐🝑🝒🝓🝔🝕🝖🝗🝘🝙🝚🝛🝜🝝🝞🝟🝠🝡🝢🝣🝤🝥🝦🝧🝨🝩🝪🝫🝬🝭🝮🝯🝰🝱🝲🝳🝴🝵🝶🝻🝼🝽🝾🝿🞀🞁🞂🞃🞄🞅🞆🞇🞈🞉🞊🞋🞌🞍🞎🞏🞐🞑🞒🞓🞔🞕🞖🞗🞘🞙🞚🞛🞜🞝🞞🞟🞠🞡🞢🞣🞤🞥🞦🞧🞨🞩🞪🞫🞬🞭🞮🞯🞰🞱🞲🞳🞴🞵🞶🞷🞸🞹🞺🞻🞼🞽🞾🞿🟀🟁🟂🟃🟄🟅🟆🟇🟈🟉🟊🟋🟌🟍🟎🟏🟐🟑🟒🟓🟔🟕🟖🟗🟘🟙🟠🟡🟢🟣🟤🟥🟦🟧🟨🟩🟪🟫🟰🠀🠁🠂🠃🠄🠅🠆🠇🠈🠉🠊🠋🠐🠑🠒🠓🠔🠕🠖🠗🠘🠙🠚🠛🠜🠝🠞🠟🠠🠡🠢🠣🠤🠥🠦🠧🠨🠩🠪🠫🠬🠭🠮🠯🠰🠱🠲🠳🠴🠵🠶🠷🠸🠹🠺🠻🠼🠽🠾🠿🡀🡁🡂🡃🡄🡅🡆🡇🡐🡑🡒🡓🡔🡕🡖🡗🡘🡙🡠🡡🡢🡣🡤🡥🡦🡧🡨🡩🡪🡫🡬🡭🡮🡯🡰🡱🡲🡳🡴🡵🡶🡷🡸🡹🡺🡻🡼🡽🡾🡿🢀🢁🢂🢃🢄🢅🢆🢇🢐🢑🢒🢓🢔🢕🢖🢗🢘🢙🢚🢛🢜🢝🢞🢟🢠🢡🢢🢣🢤🢥🢦🢧🢨🢩🢪🢫🢬🢭🢰🢱🤀🤁🤂🤃🤄🤅🤆🤇🤈🤉🤊🤋🤌🤍🤎🤏🤐🤑🤒🤓🤔🤕🤖🤗🤘🤙🤚🤛🤜🤝🤞🤟🤠🤡🤢🤣🤤🤥🤦🤧🤨🤩🤪🤫🤬🤭🤮🤯🤰🤱🤲🤳🤴🤵🤶🤷🤸🤹🤺🤻🤼🤽🤾🤿🥀🥁🥂🥃🥄🥅🥆🥇🥈🥉🥊🥋🥌🥍🥎🥏🥐🥑🥒🥓🥔🥕🥖🥗🥘🥙🥚🥛🥜🥝🥞🥟🥠🥡🥢🥣🥤🥥🥦🥧🥨🥩🥪🥫🥬🥭🥮🥯🥰🥱🥲🥳🥴🥵🥶🥷🥸🥹🥺🥻🥼🥽🥾🥿🦀🦁🦂🦃🦄🦅🦆🦇🦈🦉🦊🦋🦌🦍🦎🦏🦐🦑🦒🦓🦔🦕🦖🦗🦘🦙🦚🦛🦜🦝🦞🦟🦠🦡🦢🦣🦤🦥🦦🦧🦨🦩🦪🦫🦬🦭🦮🦯🦰🦱🦲🦳🦴🦵🦶🦷🦸🦹🦺🦻🦼🦽🦾🦿🧀🧁🧂🧃🧄🧅🧆🧇🧈🧉🧊🧋🧌🧍🧎🧏🧐🧑🧒🧓🧔🧕🧖🧗🧘🧙🧚🧛🧜🧝🧞🧟🧠🧡🧢🧣🧤🧥🧦🧧🧨🧩🧪🧫🧬🧭🧮🧯🧰🧱🧲🧳🧴🧵🧶🧷🧸🧹🧺🧻🧼🧽🧾🧿🨀🨁🨂🨃🨄🨅🨆🨇🨈🨉🨊🨋🨌🨍🨎🨏🨐🨑🨒🨓🨔🨕🨖🨗🨘🨙🨚🨛🨜🨝🨞🨟🨠🨡🨢🨣🨤🨥🨦🨧🨨🨩🨪🨫🨬🨭🨮🨯🨰🨱🨲🨳🨴🨵🨶🨷🨸🨹🨺🨻🨼🨽🨾🨿🩀🩁🩂🩃🩄🩅🩆🩇🩈🩉🩊🩋🩌🩍🩎🩏🩐🩑🩒🩓🩠🩡🩢🩣🩤🩥🩦🩧🩨🩩🩪🩫🩬🩭🩰🩱🩲🩳🩴🩵🩶🩷🩸🩹🩺🩻🩼🪀🪁🪂🪃🪄🪅🪆🪇🪈🪐🪑🪒🪓🪔🪕🪖🪗🪘🪙🪚🪛🪜🪝🪞🪟🪠🪡🪢🪣🪤🪥🪦🪧🪨🪩🪪🪫🪬🪭🪮🪯🪰🪱🪲🪳🪴🪵🪶🪷🪸🪹🪺🪻🪼🪽🪿🫀🫁🫂🫃🫄🫅🫎🫏🫐🫑🫒🫓🫔🫕🫖🫗🫘🫙🫚🫛🫠🫡🫢🫣🫤🫥🫦🫧🫨🫰🫱🫲🫳🫴🫵🫶🫷🫸🬀🬁🬂🬃🬄🬅🬆🬇🬈🬉🬊🬋🬌🬍🬎🬏🬐🬑🬒🬓🬔🬕🬖🬗🬘🬙🬚🬛🬜🬝🬞🬟🬠🬡🬢🬣🬤🬥🬦🬧🬨🬩🬪🬫🬬🬭🬮🬯🬰🬱🬲🬳🬴🬵🬶🬷🬸🬹🬺🬻🬼🬽🬾🬿🭀🭁🭂🭃🭄🭅🭆🭇🭈🭉🭊🭋🭌🭍🭎🭏🭐🭑🭒🭓🭔🭕🭖🭗🭘🭙🭚🭛🭜🭝🭞🭟🭠🭡🭢🭣🭤🭥🭦🭧🭨🭩🭪🭫🭬🭭🭮🭯🭰🭱🭲🭳🭴🭵🭶🭷🭸🭹🭺🭻🭼🭽🭾🭿🮀🮁🮂🮃🮄🮅🮆🮇🮈🮉🮊🮋🮌🮍🮎🮏🮐🮑🮒🮔🮕🮖🮗🮘🮙🮚🮛🮜🮝🮞🮟🮠🮡🮢🮣🮤🮥🮦🮧🮨🮩🮪🮫🮬🮭🮮🮯🮰🮱🮲🮳🮴🮵🮶🮷🮸🮹🮺🮻🮼🮽🮾🮿🯀🯁🯂🯃🯄🯅🯆🯇🯈🯉🯊"""
|
11
|
+
# https://www.compart.com/en/unicode/category
|
12
|
+
# https://unicode.org/Public/UNIDATA/UnicodeData.txt
|
13
|
+
|
14
|
+
# `\p{posix_punct}` character class
|
15
|
+
POSIX_PUNCT = r"""-!"#$%&'()*+,./:;<=>?@[\]^_`{|}~"""
|
16
|
+
ALL_PUNCT_RANGES = "".join(find_unicode_ranges(POSIX_PUNCT + UNICODE_PUNCT))
|
17
|
+
SENTENCE_PUNCT = """.?!:;'"()[-]“”·…"""
|
18
|
+
|
19
|
+
|
20
|
+
LINKU = Path(__file__).resolve().parent / Path("linku.json")
|
21
|
+
SANDBOX = Path(__file__).resolve().parent / Path("sandbox.json")
|
22
|
+
|
23
|
+
VOWELS = "aeiou"
|
24
|
+
CONSONANTS = "jklmnpstw"
|
25
|
+
ALPHABET = VOWELS + CONSONANTS
|
26
|
+
|
27
|
+
LANGUAGE = "english" # for NLTK
|
28
|
+
|
29
|
+
"""Commonly occurring strings which are some kind of valid Toki Pona or external token"""
|
30
|
+
ALLOWABLES = {
|
31
|
+
"cw", # Content Warning
|
32
|
+
"x", # ala
|
33
|
+
"y", # anu
|
34
|
+
"kxk", # ken ala ken
|
35
|
+
"wxw", # wile ala wile
|
36
|
+
}
|
37
|
+
|
38
|
+
with open(LINKU) as f:
|
39
|
+
linku: Dict[str, Dict[str, str]] = json.loads(f.read())
|
40
|
+
NIMI_PU: List[str] = [d["word"] for d in linku.values() if d["book"] == "pu"]
|
41
|
+
NIMI_PU_ALE: List[str] = NIMI_PU + ["namako", "kin", "oko"]
|
42
|
+
NIMI_LINKU: List[str] = [
|
43
|
+
d["word"] for d in linku.values() if d["usage_category"] in ["core", "common"]
|
44
|
+
]
|
45
|
+
NIMI_LINKU_ALE: List[str] = [d["word"] for d in linku.values()]
|
46
|
+
|
47
|
+
with open(SANDBOX) as f:
|
48
|
+
sandbox: Dict[str, Dict[str, str]] = json.loads(f.read())
|
49
|
+
NIMI_LINKU_SANDBOX: List[str] = [d["word"] for d in sandbox.values()]
|
50
|
+
|
51
|
+
del linku
|
52
|
+
del sandbox
|
53
|
+
|
54
|
+
__all__ = [
|
55
|
+
"ALPHABET",
|
56
|
+
"CONSONANTS",
|
57
|
+
"NIMI_LINKU",
|
58
|
+
"NIMI_LINKU_ALE",
|
59
|
+
"NIMI_LINKU_SANDBOX",
|
60
|
+
"NIMI_PU",
|
61
|
+
"NIMI_PU_ALE",
|
62
|
+
"VOWELS",
|
63
|
+
"UNICODE_PUNCT",
|
64
|
+
"ALLOWABLES",
|
65
|
+
"POSIX_PUNCT",
|
66
|
+
"",
|
67
|
+
]
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# STL
|
2
|
+
import re
|
3
|
+
from typing import List
|
4
|
+
|
5
|
+
TO_ESCAPE = ["^", "]", "\\"]
|
6
|
+
|
7
|
+
|
8
|
+
def regex_escape(s: str) -> str:
|
9
|
+
"""Escape all characters which must be escaped when embedded in a character class."""
|
10
|
+
for c in TO_ESCAPE:
|
11
|
+
s = s.replace(c, f"\\{c}") # one backslash
|
12
|
+
return s
|
13
|
+
|
14
|
+
|
15
|
+
def to_range(start: int, prev: int) -> str:
|
16
|
+
if start == prev:
|
17
|
+
return rf"\U{start:08x}"
|
18
|
+
return rf"\U{start:08x}-\U{prev:08x}"
|
19
|
+
|
20
|
+
|
21
|
+
def find_unicode_ranges(chars: str) -> List[str]:
|
22
|
+
if not chars:
|
23
|
+
return []
|
24
|
+
|
25
|
+
s_chars = sorted(set(chars))
|
26
|
+
|
27
|
+
ranges: List[str] = []
|
28
|
+
start = ord(s_chars[0])
|
29
|
+
prev = start
|
30
|
+
|
31
|
+
for i in range(1, len(s_chars)):
|
32
|
+
cur = ord(s_chars[i])
|
33
|
+
if cur == prev + 1: # range is still contiguous
|
34
|
+
prev = cur
|
35
|
+
continue
|
36
|
+
|
37
|
+
ranges.append(to_range(start, prev))
|
38
|
+
start = prev = cur
|
39
|
+
|
40
|
+
last = ord(s_chars[-1])
|
41
|
+
ranges.append(to_range(start, last))
|
42
|
+
|
43
|
+
return ranges
|
44
|
+
|
45
|
+
|
46
|
+
if __name__ == "__main__":
|
47
|
+
"""
|
48
|
+
Helper script to fetch UNICODE_PUNCT in constants.py
|
49
|
+
"""
|
50
|
+
|
51
|
+
PUNCT_CATEGORIES = {"Pc", "Pd", "Pe", "Pf", "Pi", "Po", "Ps", "Sm", "Sk", "So"}
|
52
|
+
# Connector, Dash, Close (end), Final, Initial, Other, Open (sOpen), Symbol math, Symbol kmodifier, Symbol other
|
53
|
+
|
54
|
+
# NOTE: UnicodeData.txt lists character ranges if there would be many characters.
|
55
|
+
# (e.g. CJK Ideograph, First at 4E00 and CJK Ideograph, Last at 9FFF).
|
56
|
+
# This does not apply to any currently defined punctuation category.
|
57
|
+
|
58
|
+
EXCEPTION_RANGES = re.compile(r"""[Ⓐ-ⓩ🄰-🅉🅐-🅩🅰-🆉]+""")
|
59
|
+
# These groups are in Symbol other (So) but are not part of `\p{Punctuation}`
|
60
|
+
# NOTE: There are many characters which look like writing characters but are not. Examples:
|
61
|
+
# - kangxi radicals from ⺀ to ⿕ which are for demonstration
|
62
|
+
# - circled katakana from to ㋾ which... shouldn't be in \p{Punctuation} but oh well
|
63
|
+
|
64
|
+
def is_punctuation(data: List[str]):
|
65
|
+
return data[2] in PUNCT_CATEGORIES
|
66
|
+
|
67
|
+
def get_character(data: List[str]):
|
68
|
+
return chr(int(data[0], 16))
|
69
|
+
|
70
|
+
def is_exception(c: str):
|
71
|
+
return not not re.fullmatch(EXCEPTION_RANGES, c)
|
72
|
+
|
73
|
+
# http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
|
74
|
+
#
|
75
|
+
|
76
|
+
unicode_punctuation = ""
|
77
|
+
with open("UnicodeData.txt", "r") as f:
|
78
|
+
for line in f:
|
79
|
+
data = line.split(";")
|
80
|
+
if not is_punctuation(data):
|
81
|
+
continue
|
82
|
+
|
83
|
+
char = get_character(data)
|
84
|
+
if is_exception(char):
|
85
|
+
continue
|
86
|
+
|
87
|
+
unicode_punctuation += char
|
88
|
+
|
89
|
+
with open("UnicodePunctuation.txt", "w") as f:
|
90
|
+
_ = f.write(unicode_punctuation)
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# STL
|
2
|
+
|
3
|
+
# PDM
|
4
|
+
import hypothesis.strategies as st
|
5
|
+
from hypothesis import given, assume, example
|
6
|
+
|
7
|
+
# LOCAL
|
8
|
+
from sonatoki.Cleaners import ConsecutiveDuplicates, ConsecutiveDuplicatesRe
|
9
|
+
|
10
|
+
# FILESYSTEM
|
11
|
+
from .test_utils import overlapping_pairs
|
12
|
+
|
13
|
+
|
14
|
+
@given(st.from_regex(ConsecutiveDuplicatesRe.pattern.pattern))
|
15
|
+
@example("tooooki a")
|
16
|
+
@example("muuuuuu")
|
17
|
+
@example("nnn")
|
18
|
+
@example("")
|
19
|
+
@example("manna") # syllabically but not phonotactically valid
|
20
|
+
def test_ConsecutiveDuplicatesRe(s: str):
|
21
|
+
_ = assume("\n" not in s)
|
22
|
+
res = ConsecutiveDuplicatesRe.clean(s)
|
23
|
+
for a, b in overlapping_pairs(res):
|
24
|
+
assert a.lower() != b.lower(), (s, res)
|
25
|
+
|
26
|
+
|
27
|
+
@given(st.from_regex(ConsecutiveDuplicatesRe.pattern.pattern))
|
28
|
+
@example("Aaa")
|
29
|
+
@example("aAa")
|
30
|
+
@example("aaA")
|
31
|
+
@example("BbbbrrrRRRUUuuuhHhHhH")
|
32
|
+
def test_ConsecutiveDuplicates(s: str):
|
33
|
+
_ = assume("\n" not in s)
|
34
|
+
res_re = ConsecutiveDuplicatesRe.clean(s)
|
35
|
+
res_fn = ConsecutiveDuplicates.clean(s)
|
36
|
+
assert res_re == res_fn, repr(s)
|