sonatoki 0.9.2__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sonatoki/Tokenizers.py CHANGED
@@ -12,9 +12,13 @@ from sonatoki.utils import regex_escape
12
12
  from sonatoki.Filters import NimiUCSUR # seriously this sucks
13
13
  from sonatoki.constants import (
14
14
  ALL_PUNCT,
15
- SENTENCE_PUNCT,
16
15
  INTRA_WORD_PUNCT,
16
+ ALL_SENTENCE_PUNCT,
17
+ UNICODE_WHITESPACE,
17
18
  ALL_PUNCT_RANGES_STR,
19
+ UCSUR_CARTOUCHE_LEFT,
20
+ UCSUR_CARTOUCHE_RIGHT,
21
+ UCSUR_MINUS_CARTOUCHE,
18
22
  )
19
23
 
20
24
  regex.DEFAULT_VERSION = regex.VERSION1
@@ -146,7 +150,9 @@ class WordTokenizerRe1(Regex1Tokenizer):
146
150
 
147
151
 
148
152
  class SentTokenizer(SetTokenizer):
149
- delimiters = set(SENTENCE_PUNCT + "\n") # regex does \n with a flag
153
+ delimiters: Set[str] = set(ALL_SENTENCE_PUNCT + "\n") # regex does \n with a flag
154
+ intra_word_punct: Set[str] = set(INTRA_WORD_PUNCT)
155
+ all_punct: Set[str] = set(ALL_PUNCT + UNICODE_WHITESPACE)
150
156
 
151
157
  @classmethod
152
158
  @override
@@ -155,16 +161,43 @@ class SentTokenizer(SetTokenizer):
155
161
  return []
156
162
 
157
163
  tokens: List[str] = []
164
+
165
+ slen = len(s)
158
166
  last_match = 0
159
- for i, char in enumerate(s):
160
- if char not in cls.delimiters:
167
+ i = 0
168
+ while i < slen:
169
+ # if a cartouche appears, we do not want to split on its punctuation
170
+ if s[i] == UCSUR_CARTOUCHE_LEFT:
171
+ right_i = s.find(UCSUR_CARTOUCHE_RIGHT, i)
172
+ contained: set[str] = set()
173
+ if right_i > 0:
174
+ contained = set(s[i + 1 : right_i])
175
+ # but it must contain only non-cartouche UCSUR chars
176
+ if contained and contained.issubset(UCSUR_MINUS_CARTOUCHE):
177
+ i = right_i + 1
178
+ continue
179
+ if s[i] not in cls.delimiters:
180
+ i += 1
161
181
  continue
182
+ if s[i] in cls.intra_word_punct:
183
+ prev = s[i - 1] if i > 0 else ""
184
+ next = s[i + 1] if i + 1 < slen else ""
185
+ if (
186
+ prev
187
+ and next
188
+ and prev not in cls.all_punct
189
+ and next not in cls.all_punct
190
+ ):
191
+ i += 2
192
+ continue
162
193
 
163
194
  match = s[last_match : i + 1].strip()
164
195
  last_match = i + 1 # newlines can strip but idc
165
196
  if not match:
197
+ i += 1
166
198
  continue
167
199
  tokens.append(match)
200
+ i += 1
168
201
 
169
202
  match = s[last_match:].strip()
170
203
  if match:
@@ -173,18 +206,24 @@ class SentTokenizer(SetTokenizer):
173
206
  return tokens
174
207
 
175
208
 
209
+ @deprecated(
210
+ "SentTokenizerRe is a previous reference implementation. Its behavior has diverged from SentTokenizer and it may not be restored."
211
+ )
176
212
  class SentTokenizerRe(RegexTokenizer):
177
213
  pattern = re.compile(
178
- rf"""(?<=[{regex_escape(SENTENCE_PUNCT)}])|$""", flags=re.MULTILINE
214
+ rf"""(?<=[{regex_escape(ALL_SENTENCE_PUNCT)}])|$""", flags=re.MULTILINE
179
215
  )
180
216
  # TODO: are <> or {} that common as *sentence* delims? [] are already a stretch
181
217
  # TODO: do the typography characters matter?
182
218
  # NOTE: | / and , are *not* sentence delimiters for my purpose
183
219
 
184
220
 
221
+ @deprecated(
222
+ "SentTokenizerRe1 is a previous reference implementation. Its behavior has diverged from SentTokenizer and it may not be restored."
223
+ )
185
224
  class SentTokenizerRe1(Regex1Tokenizer):
186
225
  pattern = regex.compile(
187
- rf"""(?<=[{regex_escape(SENTENCE_PUNCT)}]|$)""", flags=regex.MULTILINE
226
+ rf"""(?<=[{regex_escape(ALL_SENTENCE_PUNCT)}]|$)""", flags=regex.MULTILINE
188
227
  )
189
228
 
190
229
 
sonatoki/__main__.py CHANGED
@@ -24,6 +24,7 @@ from sonatoki.Cleaners import ConsecutiveDuplicates
24
24
  from sonatoki.constants import (
25
25
  UCSUR_PUNCT_RANGES,
26
26
  UNICODE_PUNCT_RANGES,
27
+ UNICODE_WHITESPACE_RANGES,
27
28
  EMOJI_VARIATION_SELECTOR_RANGES,
28
29
  )
29
30
 
@@ -121,6 +122,11 @@ def regen_unicode_data():
121
122
  "Sc", # Currency
122
123
  "So", # Other
123
124
  }
125
+ WHITESPACE_CATEGORIES = {
126
+ "Zl", # Line Separator
127
+ "Zp", # Paragraph Separator
128
+ "Zs", # Space Separator
129
+ }
124
130
  r"""These characters are in Symbol other (So) but are not in
125
131
  `\p{Punctuation}` However, I began excluding them again, because it turns
126
132
  out that some sequences of latin alphabet emoji."""
@@ -134,11 +140,15 @@ def regen_unicode_data():
134
140
  def is_punctuation(data: List[str]):
135
141
  return data[2] in PUNCT_CATEGORIES
136
142
 
143
+ def is_whitespace(data: List[str]):
144
+ return data[2] in WHITESPACE_CATEGORIES
145
+
137
146
  def get_character(data: List[str]):
138
147
  return chr(int(data[0], 16))
139
148
 
140
149
  unicode_data = download(UNICODE_DATA)
141
150
  unicode_punctuation = ""
151
+ unicode_whitespace = ""
142
152
  for line in unicode_data.split("\n"):
143
153
  if not line: # damn you, trailing newline
144
154
  continue
@@ -147,24 +157,35 @@ def regen_unicode_data():
147
157
  # This does not apply to any currently defined punctuation category.
148
158
 
149
159
  unicode_data = line.split(";")
150
- if not is_punctuation(unicode_data):
160
+ if is_punctuation(unicode_data):
161
+ char = get_character(unicode_data)
162
+ unicode_punctuation += char
163
+ continue
164
+ if is_whitespace((unicode_data)):
165
+ char = get_character(unicode_data)
166
+ unicode_whitespace += char
151
167
  continue
152
-
153
- char = get_character(unicode_data)
154
-
155
- unicode_punctuation += char
156
168
 
157
169
  unicode_punctuation = emoji.replace_emoji(unicode_punctuation)
158
170
 
159
- unicode_ranges = find_unicode_ranges(unicode_punctuation)
160
- unicode_ranges.extend(UCSUR_PUNCT_RANGES)
161
- # unicode_ranges.extend(EMOJI_VARIATION_SELECTOR_RANGES) # made unnecessary by emoji library
162
- unicode_ranges = sorted(unicode_ranges)
171
+ unicode_punct_ranges = find_unicode_ranges(unicode_punctuation)
172
+ unicode_punct_ranges.extend(UCSUR_PUNCT_RANGES)
173
+ unicode_punct_ranges = sorted(unicode_punct_ranges)
163
174
  # sorted in case my manual additions are out of order
164
175
 
165
- if unicode_ranges != UNICODE_PUNCT_RANGES:
166
- output = json.dumps(unicode_ranges, indent=4, ensure_ascii=True)
167
- print(output)
176
+ # TODO: can i push these outputs directly into the constants.py file?
177
+
178
+ if unicode_punct_ranges != UNICODE_PUNCT_RANGES:
179
+ output = json.dumps(unicode_punct_ranges, indent=4, ensure_ascii=True)
180
+ with open("updated_unicode_punct_ranges.txt", "w") as f:
181
+ f.write(output)
182
+
183
+ unicode_whitespace_ranges = find_unicode_ranges(unicode_whitespace)
184
+ unicode_whitespace_ranges = sorted(unicode_whitespace_ranges)
185
+ if unicode_whitespace_ranges != UNICODE_WHITESPACE_RANGES:
186
+ output = json.dumps(unicode_whitespace_ranges, indent=4, ensure_ascii=True)
187
+ with open("updated_unicode_whitespace_ranges.txt", "w") as f:
188
+ f.write(output)
168
189
 
169
190
 
170
191
  def main(argv: argparse.Namespace):
sonatoki/constants.py CHANGED
@@ -109,8 +109,9 @@ UNICODE_PUNCT_RANGES = [
109
109
  "\\U00001a1e-\\U00001a1f",
110
110
  "\\U00001aa0-\\U00001aa6",
111
111
  "\\U00001aa8-\\U00001aad",
112
+ "\\U00001b4e-\\U00001b4f",
112
113
  "\\U00001b5a-\\U00001b6a",
113
- "\\U00001b74-\\U00001b7e",
114
+ "\\U00001b74-\\U00001b7f",
114
115
  "\\U00001bfc-\\U00001bff",
115
116
  "\\U00001c3b-\\U00001c3f",
116
117
  "\\U00001c7e-\\U00001c7f",
@@ -152,7 +153,7 @@ UNICODE_PUNCT_RANGES = [
152
153
  "\\U00002329-\\U000023ce",
153
154
  "\\U000023d0-\\U000023e8",
154
155
  "\\U000023f4-\\U000023f7",
155
- "\\U000023fb-\\U00002426",
156
+ "\\U000023fb-\\U00002429",
156
157
  "\\U00002440-\\U0000244a",
157
158
  "\\U0000249c-\\U000024c1",
158
159
  "\\U000024c3-\\U000024e9",
@@ -248,7 +249,7 @@ UNICODE_PUNCT_RANGES = [
248
249
  "\\U000030fb",
249
250
  "\\U00003190-\\U00003191",
250
251
  "\\U00003196-\\U0000319f",
251
- "\\U000031c0-\\U000031e3",
252
+ "\\U000031c0-\\U000031e5",
252
253
  "\\U000031ef",
253
254
  "\\U00003200-\\U0000321e",
254
255
  "\\U0000322a-\\U00003247",
@@ -321,6 +322,8 @@ UNICODE_PUNCT_RANGES = [
321
322
  "\\U00010af0-\\U00010af6",
322
323
  "\\U00010b39-\\U00010b3f",
323
324
  "\\U00010b99-\\U00010b9c",
325
+ "\\U00010d6e",
326
+ "\\U00010d8e-\\U00010d8f",
324
327
  "\\U00010ead",
325
328
  "\\U00010f55-\\U00010f59",
326
329
  "\\U00010f86-\\U00010f89",
@@ -335,6 +338,8 @@ UNICODE_PUNCT_RANGES = [
335
338
  "\\U000111dd-\\U000111df",
336
339
  "\\U00011238-\\U0001123d",
337
340
  "\\U000112a9",
341
+ "\\U000113d4-\\U000113d5",
342
+ "\\U000113d7-\\U000113d8",
338
343
  "\\U0001144b-\\U0001144f",
339
344
  "\\U0001145a-\\U0001145b",
340
345
  "\\U0001145d",
@@ -351,6 +356,7 @@ UNICODE_PUNCT_RANGES = [
351
356
  "\\U00011a9a-\\U00011a9c",
352
357
  "\\U00011a9e-\\U00011aa2",
353
358
  "\\U00011b00-\\U00011b09",
359
+ "\\U00011be1",
354
360
  "\\U00011c41-\\U00011c45",
355
361
  "\\U00011c70-\\U00011c71",
356
362
  "\\U00011ef7-\\U00011ef8",
@@ -363,10 +369,13 @@ UNICODE_PUNCT_RANGES = [
363
369
  "\\U00016af5",
364
370
  "\\U00016b37-\\U00016b3f",
365
371
  "\\U00016b44-\\U00016b45",
372
+ "\\U00016d6d-\\U00016d6f",
366
373
  "\\U00016e97-\\U00016e9a",
367
374
  "\\U00016fe2",
368
375
  "\\U0001bc9c",
369
376
  "\\U0001bc9f",
377
+ "\\U0001cc00-\\U0001ccef",
378
+ "\\U0001cd00-\\U0001ceb3",
370
379
  "\\U0001cf50-\\U0001cfc3",
371
380
  "\\U0001d000-\\U0001d0f5",
372
381
  "\\U0001d100-\\U0001d126",
@@ -395,6 +404,7 @@ UNICODE_PUNCT_RANGES = [
395
404
  "\\U0001da85-\\U0001da8b",
396
405
  "\\U0001e14f",
397
406
  "\\U0001e2ff",
407
+ "\\U0001e5ff",
398
408
  "\\U0001e95e-\\U0001e95f",
399
409
  "\\U0001ecac",
400
410
  "\\U0001ecb0",
@@ -464,16 +474,41 @@ UNICODE_PUNCT_RANGES = [
464
474
  "\\U0001f850-\\U0001f859",
465
475
  "\\U0001f860-\\U0001f887",
466
476
  "\\U0001f890-\\U0001f8ad",
467
- "\\U0001f8b0-\\U0001f8b1",
477
+ "\\U0001f8b0-\\U0001f8bb",
478
+ "\\U0001f8c0-\\U0001f8c1",
468
479
  "\\U0001f900-\\U0001f90b",
469
480
  "\\U0001f93b",
470
481
  "\\U0001f946",
471
482
  "\\U0001fa00-\\U0001fa53",
472
483
  "\\U0001fa60-\\U0001fa6d",
473
484
  "\\U0001fb00-\\U0001fb92",
474
- "\\U0001fb94-\\U0001fbca",
485
+ "\\U0001fb94-\\U0001fbef",
475
486
  "\\U000f1990-\\U000f199d",
476
487
  ]
488
+ UNICODE_PUNCT = find_unicode_chars(UNICODE_PUNCT_RANGES)
489
+ # this is a large string.
490
+
491
+ # `\p{posix_punct}` character class
492
+ POSIX_PUNCT = r"""-!"#$%&'()*+,./:;<=>?@[\]^_`{|}~"""
493
+ POSIX_PUNCT_RANGES = find_unicode_ranges(POSIX_PUNCT)
494
+
495
+ ALL_PUNCT = "".join(sorted(list(set(POSIX_PUNCT + UNICODE_PUNCT))))
496
+ ALL_PUNCT_RANGES_STR = "".join(find_unicode_ranges(ALL_PUNCT))
497
+ # combined bc the result could be simpler
498
+
499
+
500
+ UNICODE_WHITESPACE_RANGES = [
501
+ "\\U00000020",
502
+ "\\U000000a0",
503
+ "\\U00001680",
504
+ "\\U00002000-\\U0000200a",
505
+ "\\U00002028-\\U00002029",
506
+ "\\U0000202f",
507
+ "\\U0000205f",
508
+ "\\U00003000",
509
+ ]
510
+ UNICODE_WHITESPACE = find_unicode_chars(UNICODE_WHITESPACE_RANGES)
511
+ UNICODE_WHITESPACE_RANGES_STR = "".join(UNICODE_WHITESPACE_RANGES)
477
512
 
478
513
 
479
514
  NOT_IN_PUNCT_CLASS = r"Ⓐ-ⓩ🄰-🅉🅐-🅩🅰-🆉"
@@ -482,9 +517,7 @@ EMOJI_VARIATION_SELECTOR_RANGES = ["\\U0000fe0e-\\U0000fe0f"]
482
517
  EMOJI_VARIATION_SELECTOR_RANGES_STR = "".join(EMOJI_VARIATION_SELECTOR_RANGES)
483
518
  """All variation selectors are in Nonspacing Mark (Mn), but it is more apt to
484
519
  mark these two as punctuation, since they are used exclusively for rendering
485
- emoji.
486
-
487
- But it's even better to use the Emoji filter.
520
+ emoji. But it's best to use the Emoji filter.
488
521
  """
489
522
 
490
523
  UCSUR_PUNCT_RANGES = ["\\U000f1990-\\U000f199d"]
@@ -492,20 +525,15 @@ UCSUR_PUNCT_RANGES_STR = "".join(UCSUR_PUNCT_RANGES)
492
525
  """Private Use Area glyphs are given the apt but unhelpful 'Private Use'
493
526
  class."""
494
527
 
495
- UNICODE_PUNCT = find_unicode_chars(UNICODE_PUNCT_RANGES)
496
- # this is a large string.
497
-
498
- # `\p{posix_punct}` character class
499
- POSIX_PUNCT = r"""-!"#$%&'()*+,./:;<=>?@[\]^_`{|}~"""
500
- POSIX_PUNCT_RANGES = find_unicode_ranges(POSIX_PUNCT)
501
-
502
- ALL_PUNCT = "".join(sorted(list(set(POSIX_PUNCT + UNICODE_PUNCT))))
503
- ALL_PUNCT_RANGES_STR = "".join(find_unicode_ranges(ALL_PUNCT))
504
- # combined bc the result could be simpler
528
+ UCSUR_CARTOUCHE_LEFT = "󱦐"
529
+ UCSUR_CARTOUCHE_RIGHT = "󱦑"
505
530
 
506
- SENTENCE_PUNCT = """.?!:;"()[-]«»‹›“”‟„⹂‽·•…「」『』"""
507
531
  # single quotes are word boundaries if not intra-word, but double quotes are sentence
508
532
  # boundaries
533
+ BASIC_SENTENCE_PUNCT = """.?!:;()[-]‽·•…"""
534
+ QUOTATIVE_PUNCT = """"«»‹›“”‟„⹂「」『』"""
535
+ UCSUR_SENTENCE_PUNCT = """󱦜󱦝"""
536
+ ALL_SENTENCE_PUNCT = BASIC_SENTENCE_PUNCT + UCSUR_SENTENCE_PUNCT
509
537
 
510
538
  INTRA_WORD_PUNCT = """-'’."""
511
539
 
@@ -691,7 +719,11 @@ UCSUR_RANGES = [
691
719
  "\\U000F19A0-\\U000F19A3", # ku lili
692
720
  ]
693
721
  NIMI_UCSUR = find_unicode_chars(UCSUR_RANGES)
694
-
722
+ ALL_UCSUR = NIMI_UCSUR + find_unicode_chars(UCSUR_PUNCT_RANGES)
723
+ UCSUR_MINUS_CARTOUCHE = set(ALL_UCSUR).difference(
724
+ {UCSUR_CARTOUCHE_LEFT, UCSUR_CARTOUCHE_RIGHT}
725
+ )
726
+ print(UCSUR_MINUS_CARTOUCHE)
695
727
 
696
728
  # NIMI_PU_UCSUR_RANGES = ["\\U000F1900-\\U000F1977"]
697
729
  # NIMI_PU_ALE_UCSUR_RANGES = NIMI_PU_UCSUR_RANGES + ["\\U000F1978-\\U000F197A"]
@@ -757,7 +789,9 @@ __all__ = [
757
789
  "POSIX_PUNCT_RANGES",
758
790
  "UCSUR_PUNCT_RANGES",
759
791
  "UCSUR_PUNCT_RANGES_STR",
792
+ "UCSUR_SENTENCE_PUNCT",
760
793
  "UNICODE_PUNCT",
761
794
  "UNICODE_PUNCT_RANGES",
795
+ "UNICODE_WHITESPACE",
762
796
  "VOWELS",
763
797
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sonatoki
3
- Version: 0.9.2
3
+ Version: 0.10.0
4
4
  Summary: ilo li moku e toki li pana e sona ni: ni li toki ala toki pona?
5
5
  Author-Email: "jan Kekan San (@gregdan3)" <gregory.danielson3@gmail.com>
6
6
  License: AGPL-3.0-or-later
@@ -1,17 +1,17 @@
1
- sonatoki-0.9.2.dist-info/METADATA,sha256=nTqR-hm823FWnDVMJCgoWwmhSU4RaE2fdayXQcixd4o,6893
2
- sonatoki-0.9.2.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- sonatoki-0.9.2.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- sonatoki-0.9.2.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ sonatoki-0.10.0.dist-info/METADATA,sha256=FS4LM5QUcxvHUY5Zq1IyT85MRAtJiq_sNWArztUI8D8,6894
2
+ sonatoki-0.10.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ sonatoki-0.10.0.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ sonatoki-0.10.0.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
5
5
  sonatoki/Cleaners.py,sha256=x2dT3MpDUfbrHA0EP2D3n1sTiKFFi5jw9ha-1dX973o,1958
6
6
  sonatoki/Configs.py,sha256=6TY-G1nZFGv5EcElatWvI5MagwVCo92D5TTl7s2PX_s,4899
7
7
  sonatoki/Filters.py,sha256=8HAtR6_Rk6GPboaS_MHwSjZBJxYnAA8kYbRPI0eR6sM,14823
8
8
  sonatoki/Preprocessors.py,sha256=RmzkvPVo6Kdx1rZ5HeR9cTtx6oxpp2iLKrOMCUEqIrM,7107
9
9
  sonatoki/Scorers.py,sha256=zkdWc0hbtCX1HPdhI2tu2mL4Z5_S5sv7T83MefE4Yik,7756
10
- sonatoki/Tokenizers.py,sha256=cfWWZCfvn2tNJChDrofHrORZExp17g0rPmH5ydWgTQY,5219
10
+ sonatoki/Tokenizers.py,sha256=yAHqVF7G-bH5i7nsvYH-dMV2qjeKvLW2W2F-fgyUnR4,6783
11
11
  sonatoki/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- sonatoki/__main__.py,sha256=QIWRLYS1jb7OBUBK5s8kYoeiMv6MLBlt_I7H7tIVjpU,5745
12
+ sonatoki/__main__.py,sha256=394ldEB4tFpw1UJLV4S4jJ55NfyLgH8rE7o3VWJoGik,6650
13
13
  sonatoki/alphabetic.txt,sha256=duyqAKilD2vLIr75RShCIAnktNJcGeEoQIk18V6czmg,11702
14
- sonatoki/constants.py,sha256=BrU45haroW-ya3qmFsVk7fdTUGyoYVw1MdVVnpiWjt8,19517
14
+ sonatoki/constants.py,sha256=0MWyk7a6Hq8hVxV-WJ6LEkuq8LnJy-qKUah6I3PbRns,20602
15
15
  sonatoki/ilo.py,sha256=Dsn0yagkwjqpAQoCj6mkZ6NqWeanRF2lxNDNoqjWGLo,5993
16
16
  sonatoki/linku.json,sha256=U5KVxFJSageQydXXDsQCT8X_QoNAK2OaZhJmbu0eoZo,299939
17
17
  sonatoki/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -19,4 +19,4 @@ sonatoki/sandbox.json,sha256=QAviQZ7_nwstUr1ejKegxiIoYmBL2YJIoiZovDYNFL8,147485
19
19
  sonatoki/syllabic.txt,sha256=HnqY4TrZ3tPcHah3TsvG9F9gjMrnAGdJ8hHJNHyyUPc,1712
20
20
  sonatoki/types.py,sha256=VjYSGAzsbR_d3mg8n-VHg__7LyXpmGdEIMDsbPHyxFw,1265
21
21
  sonatoki/utils.py,sha256=sT5xLMEj0aLpy8GP92HKblJU1Wt1m8NUlMgCFWB32xQ,2265
22
- sonatoki-0.9.2.dist-info/RECORD,,
22
+ sonatoki-0.10.0.dist-info/RECORD,,