sonatoki 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sonatoki/Configs.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # STL
2
2
  from copy import deepcopy
3
- from typing import List, Type, Union, TypedDict
3
+ from typing import List, Type, TypedDict
4
4
 
5
5
  # LOCAL
6
6
  from sonatoki.Filters import (
@@ -9,16 +9,19 @@ from sonatoki.Filters import (
9
9
  Numeric,
10
10
  OrFilter,
11
11
  Syllabic,
12
- NimiLinku,
13
- NimiPuAle,
14
12
  NimiUCSUR,
15
13
  Alphabetic,
16
14
  ProperName,
17
15
  Phonotactic,
18
16
  Punctuation,
19
- NimiLinkuAle,
17
+ NimiLinkuCore,
18
+ NimiPuSynonyms,
19
+ OrMemberFilter,
20
+ NimiLinkuCommon,
21
+ NimiLinkuObscure,
20
22
  NimiLinkuSandbox,
21
23
  EnglishIgnorables,
24
+ NimiLinkuUncommon,
22
25
  )
23
26
  from sonatoki.Scorers import Number, Scorer, PassFail, SoftScaling, SoftPassFail
24
27
  from sonatoki.Cleaners import Cleaner, ConsecutiveDuplicates
@@ -63,7 +66,7 @@ PrefConfig: IloConfig = {
63
66
  "cleaners": [ConsecutiveDuplicates],
64
67
  "ignoring_filters": [Numeric, Punctuation, EnglishIgnorables],
65
68
  "scoring_filters": [
66
- OrFilter(NimiLinku, NimiUCSUR),
69
+ OrMemberFilter(NimiLinkuCore, NimiLinkuCommon, NimiUCSUR),
67
70
  Syllabic,
68
71
  ProperName,
69
72
  Alphabetic,
@@ -78,7 +81,14 @@ CorpusConfig: IloConfig = {
78
81
  "cleaners": [ConsecutiveDuplicates],
79
82
  "ignoring_filters": [Numeric, Punctuation, EnglishIgnorables],
80
83
  "scoring_filters": [
81
- OrFilter(NimiLinkuSandbox, NimiUCSUR),
84
+ OrMemberFilter(
85
+ NimiLinkuCore,
86
+ NimiLinkuCommon,
87
+ NimiLinkuUncommon,
88
+ NimiLinkuObscure,
89
+ NimiLinkuSandbox,
90
+ NimiUCSUR,
91
+ ),
82
92
  Syllabic,
83
93
  ProperName,
84
94
  Alphabetic,
@@ -104,7 +114,7 @@ DiscordConfig: IloConfig = {
104
114
  "cleaners": [ConsecutiveDuplicates],
105
115
  "ignoring_filters": [Numeric, Punctuation, EnglishIgnorables],
106
116
  "scoring_filters": [
107
- OrFilter(NimiLinku, NimiUCSUR),
117
+ OrMemberFilter(NimiLinkuCore, NimiLinkuCommon, NimiUCSUR),
108
118
  Syllabic,
109
119
  ProperName,
110
120
  Alphabetic,
sonatoki/Filters.py CHANGED
@@ -9,6 +9,7 @@ import regex
9
9
  from typing_extensions import override
10
10
 
11
11
  # LOCAL
12
+ from sonatoki.utils import prep_dictionary
12
13
  from sonatoki.constants import (
13
14
  VOWELS,
14
15
  NIMI_PU,
@@ -17,13 +18,17 @@ from sonatoki.constants import (
17
18
  ALLOWABLES,
18
19
  CONSONANTS,
19
20
  IGNORABLES,
20
- NIMI_LINKU,
21
21
  NIMI_UCSUR,
22
- NIMI_LINKU_LILI,
22
+ NIMI_KU_LILI,
23
+ NIMI_KU_SULI,
24
+ NIMI_LINKU_CORE,
23
25
  ALL_PUNCT_RANGES,
24
26
  NIMI_PU_SYNONYMS,
27
+ NIMI_LINKU_COMMON,
28
+ NIMI_LINKU_OBSCURE,
25
29
  NIMI_LINKU_SANDBOX,
26
30
  UCSUR_PUNCT_RANGES,
31
+ NIMI_LINKU_UNCOMMON,
27
32
  )
28
33
 
29
34
  regex.DEFAULT_VERSION = regex.VERSION1
@@ -105,27 +110,43 @@ class ProperName(Filter):
105
110
 
106
111
 
107
112
  class NimiPu(MemberFilter):
108
- tokens = set(NIMI_PU)
113
+ tokens = prep_dictionary(NIMI_PU)
109
114
 
110
115
 
111
- class NimiPuAle(MemberFilter):
112
- tokens = set(NIMI_PU + NIMI_PU_SYNONYMS)
116
+ class NimiPuSynonyms(MemberFilter):
117
+ tokens = prep_dictionary(NIMI_PU_SYNONYMS)
113
118
 
114
119
 
115
- class NimiLinku(MemberFilter):
116
- tokens = set(NIMI_LINKU)
120
+ class NimiKuSuli(MemberFilter):
121
+ tokens = prep_dictionary(NIMI_KU_SULI)
117
122
 
118
123
 
119
- class NimiLinkuAle(MemberFilter):
120
- tokens = set(NIMI_LINKU + NIMI_LINKU_LILI)
124
+ class NimiKuLili(MemberFilter):
125
+ tokens = prep_dictionary(NIMI_KU_LILI)
126
+
127
+
128
+ class NimiLinkuCore(MemberFilter):
129
+ tokens = prep_dictionary(NIMI_LINKU_CORE)
130
+
131
+
132
+ class NimiLinkuCommon(MemberFilter):
133
+ tokens = prep_dictionary(NIMI_LINKU_COMMON)
134
+
135
+
136
+ class NimiLinkuUncommon(MemberFilter):
137
+ tokens = prep_dictionary(NIMI_LINKU_UNCOMMON)
138
+
139
+
140
+ class NimiLinkuObscure(MemberFilter):
141
+ tokens = prep_dictionary(NIMI_LINKU_OBSCURE)
121
142
 
122
143
 
123
144
  class NimiLinkuSandbox(MemberFilter):
124
- tokens = set(NIMI_LINKU + NIMI_LINKU_LILI + NIMI_LINKU_SANDBOX)
145
+ tokens = prep_dictionary(NIMI_LINKU_SANDBOX)
125
146
 
126
147
 
127
148
  class NimiUCSUR(MemberFilter):
128
- tokens = set(NIMI_UCSUR)
149
+ tokens = prep_dictionary(NIMI_UCSUR)
129
150
 
130
151
 
131
152
  class Phonotactic(RegexFilter):
@@ -224,11 +245,10 @@ class OrFilter:
224
245
  Instead, the user is responsible for building an OrFilter out of their desired filters.
225
246
  """
226
247
 
227
- def __new__(cls, *filters_: Type[Filter]) -> Type[Filter]:
228
- if not len(filters_) >= 2:
229
- raise ValueError("Must provide at least two Filters to OrFilter.")
248
+ @staticmethod
249
+ def __generic_filter(*filters_: Type[Filter]) -> Type[Filter]:
230
250
 
231
- class AnonymousOrFilter(Filter):
251
+ class CombinedFilter(Filter):
232
252
  filters: List[Type[Filter]] = list(filters_) # TODO: tuple better?
233
253
 
234
254
  @classmethod
@@ -240,7 +260,39 @@ class OrFilter:
240
260
  return True
241
261
  return False
242
262
 
243
- return AnonymousOrFilter
263
+ return CombinedFilter
264
+
265
+ def __new__(cls, *filters: Type[Filter]) -> Type[Filter]:
266
+ if not len(filters) >= 2:
267
+ raise ValueError("Provide at least two Filters to OrFilter.")
268
+
269
+ subset_filters = [f for f in filters if issubclass(f, MemberFilter)]
270
+ if len(subset_filters) >= 2:
271
+ raise Warning(
272
+ "Prefer OrMemberFilter for combining two or more MemberFilters."
273
+ )
274
+
275
+ filter = cls.__generic_filter(*filters)
276
+
277
+ return filter
278
+
279
+
280
+ class OrMemberFilter:
281
+ @staticmethod
282
+ def __subset_filter(*filters: Type[MemberFilter]) -> Type[MemberFilter]:
283
+ all_token_sets: List[Set[str]] = [f.tokens for f in filters]
284
+ all_tokens: Set[str] = set().union(*all_token_sets)
285
+
286
+ class CombinedFilter(MemberFilter):
287
+ tokens = all_tokens
288
+
289
+ return CombinedFilter
290
+
291
+ def __new__(cls, *filters_: Type[MemberFilter]) -> Type[MemberFilter]:
292
+ if not len(filters_) >= 2:
293
+ raise ValueError("Provide two or more MemberFilters to OrMemberFilter.")
294
+ filter = cls.__subset_filter(*filters_)
295
+ return filter
244
296
 
245
297
 
246
298
  class AndFilter(Filter):
@@ -271,11 +323,10 @@ __all__ = [
271
323
  "Alphabetic",
272
324
  "AndFilter",
273
325
  "EnglishIgnorables",
274
- "NimiLinku",
275
- "NimiLinkuAle",
326
+ "NimiLinkuCore",
276
327
  "NimiLinkuSandbox",
277
328
  "NimiPu",
278
- "NimiPuAle",
329
+ "NimiPuSynonyms",
279
330
  "NimiUCSUR",
280
331
  "Numeric",
281
332
  "OrFilter",
sonatoki/constants.py CHANGED
@@ -421,24 +421,31 @@ UCSUR_RANGES = [
421
421
  ]
422
422
  NIMI_UCSUR = find_unicode_chars(UCSUR_RANGES)
423
423
 
424
+
425
+ # NIMI_PU_UCSUR_RANGES = ["\\U000F1900-\\U000F1977"]
426
+ # NIMI_PU_ALE_UCSUR_RANGES = NIMI_PU_UCSUR_RANGES + ["\\U000F1978-\\U000F197A"]
427
+
428
+
429
+ def category_helper(data: Dict[str, Dict[str, str]], key: str, value: str) -> List[str]:
430
+ return [d["word"] for d in data.values() if d[key] == value]
431
+
432
+
424
433
  with open(LINKU) as f:
425
434
  linku: Dict[str, Dict[str, str]] = json.loads(f.read())
426
- NIMI_PU: List[str] = [d["word"] for d in linku.values() if d["book"] == "pu"]
435
+ NIMI_PU: List[str] = category_helper(linku, "book", "pu")
427
436
  NIMI_PU_SYNONYMS: List[str] = ["namako", "kin", "oko"]
428
- NIMI_LINKU: List[str] = [
429
- d["word"] for d in linku.values() if d["usage_category"] in ["core", "common"]
430
- ]
431
- NIMI_LINKU_LILI: List[str] = [
432
- d["word"]
433
- for d in linku.values()
434
- if d["usage_category"] not in ["core", "common"]
435
- ]
437
+
438
+ NIMI_KU_SULI = category_helper(linku, "book", "ku suli")
439
+ NIMI_KU_LILI = category_helper(linku, "book", "ku lili")
440
+
441
+ NIMI_LINKU_CORE = category_helper(linku, "usage_category", "core")
442
+ NIMI_LINKU_COMMON = category_helper(linku, "usage_category", "common")
443
+ NIMI_LINKU_UNCOMMON = category_helper(linku, "usage_category", "uncommon")
444
+ NIMI_LINKU_OBSCURE = category_helper(linku, "usage_category", "obscure")
436
445
 
437
446
  with open(SANDBOX) as f:
438
447
  sandbox: Dict[str, Dict[str, str]] = json.loads(f.read())
439
- NIMI_LINKU_SANDBOX: List[str] = NIMI_LINKU_LILI + [
440
- d["word"] for d in sandbox.values()
441
- ]
448
+ NIMI_LINKU_SANDBOX: List[str] = [d["word"] for d in sandbox.values()]
442
449
 
443
450
  del linku
444
451
  del sandbox
@@ -449,9 +456,13 @@ __all__ = [
449
456
  "ALL_PUNCT_RANGES",
450
457
  "ALPHABET",
451
458
  "CONSONANTS",
452
- "NIMI_LINKU",
453
- "NIMI_LINKU_LILI",
459
+ "NIMI_KU_LILI",
460
+ "NIMI_KU_SULI",
461
+ "NIMI_LINKU_COMMON",
462
+ "NIMI_LINKU_CORE",
463
+ "NIMI_LINKU_OBSCURE",
454
464
  "NIMI_LINKU_SANDBOX",
465
+ "NIMI_LINKU_UNCOMMON",
455
466
  "NIMI_PU",
456
467
  "NIMI_PU_SYNONYMS",
457
468
  "POSIX_PUNCT",
sonatoki/utils.py CHANGED
@@ -1,10 +1,23 @@
1
1
  # STL
2
2
  import re
3
- from typing import List
3
+ from typing import Set, List, Iterable
4
+
5
+ # LOCAL
6
+ from sonatoki.Cleaners import Lowercase, ConsecutiveDuplicates
4
7
 
5
8
  TO_ESCAPE = ["\\", "^", "[", "]", "-"]
6
9
 
7
10
 
11
+ def prep_dictionary(words: Iterable[str]) -> Set[str]:
12
+ out: Set[str] = set()
13
+ cleaners = [Lowercase, ConsecutiveDuplicates]
14
+ for word in words:
15
+ for c in cleaners:
16
+ word = c.clean(word)
17
+ out.add(word)
18
+ return out
19
+
20
+
8
21
  def regex_escape(s: str) -> str:
9
22
  """Escape all characters which must be escaped when embedded in a character class."""
10
23
  for c in TO_ESCAPE:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sonatoki
3
- Version: 0.3.0
3
+ Version: 0.3.1
4
4
  Summary: ilo li moku e toki li pana e sona ni: ni li toki ala toki pona?
5
5
  Author-Email: "jan Kekan San (@gregdan3)" <gregory.danielson3@gmail.com>
6
6
  License: AGPL-3.0-or-later
@@ -12,15 +12,22 @@ Description-Content-Type: text/markdown
12
12
 
13
13
  # sona toki
14
14
 
15
+ <div align="center">
16
+
17
+ ![Test workflow for this library](https://github.com/gregdan3/sona-toki/workflows/Tests/badge.svg)
18
+ [![Version number for this library](https://img.shields.io/pypi/v/sonatoki?logo=python&logoColor=%23cccccc)](https://pypi.org/project/sonatoki)
19
+
20
+ </div>
21
+
15
22
  ## What is **sona toki**?
16
23
 
17
- This library, "Language Knowledge," helps you identify whether a message is in Toki Pona. No grammar checking, yet, which means this more checks whether a given message has enough Toki Pona words.
24
+ This library, "Language Knowledge," helps you identify whether a message is in Toki Pona. It does so by determining whether a large enough number of words in a statement are "in Toki Pona". No grammar checking, yet.
18
25
 
19
- I wrote it with a variety of scraps and lessons learned from a prior project, [ilo pi toki pona taso, "toki-pona-only tool"](https://github.com/gregdan3/ilo-pi-toki-pona-taso). That tool will be rewritten to use this library shortly.
26
+ I wrote this library with a variety of scraps and lessons learned from a prior project, [ilo pi toki pona taso, "toki-pona-only tool"](https://github.com/gregdan3/ilo-pi-toki-pona-taso). That tool now uses this library to great success!
20
27
 
21
- If you've ever worked on a similar project, you know the question "is this message in [language]" is not a consistent one- the environment, time, preferences of the speaker, and much more, can all alter whether a given message is "in" any specific language, and this question applies to Toki Pona too.
28
+ If you've ever worked on a similar project, you know the question "is this message in [language]" is not a consistent one- the environment, time, preferences of the speaker, and much more, can all alter whether a given message is "in" any specific language. This complexity applies to Toki Pona too.
22
29
 
23
- This project "solves" that complex problem by offering a highly configurable parser, so you can tune it to your preferences and goals.
30
+ So, this project "solves" that complex problem by offering an opinionated tokenizer and a configurable parser, allowing you to tune its output to your preferences and goals. [Even silly ones.](https://sona.pona.la/wiki/isipin_epiku).
24
31
 
25
32
  ## Quick Start
26
33
 
@@ -53,12 +60,12 @@ Or if you'd prefer to configure on your own:
53
60
  from copy import deepcopy
54
61
  from sonatoki.ilo import Ilo
55
62
  from sonatoki.Configs import BaseConfig
56
- from sonatoki.Filters import NimiPuAle, Phonotactic, ProperName
63
+ from sonatoki.Filters import NimiLinkuCore, Phonotactic, ProperName
57
64
  from sonatoki.Scorers import SoftPassFail
58
65
 
59
66
  def main():
60
67
  config = deepcopy(BaseConfig)
61
- config["scoring_filters"].extend([NimiPuAle, Phonotactic, ProperName])
68
+ config["scoring_filters"].extend([NimiLinkuCore, Phonotactic, ProperName])
62
69
  config["scorer"] = SoftPassFail
63
70
 
64
71
  ilo = Ilo(**config)
@@ -88,24 +95,28 @@ After our proposal has been examined and a result given by the committee, I will
88
95
 
89
96
  ### What's the deal with the tokenizers?
90
97
 
91
- The Toki Pona tokenizer `word_tokenize_tok` is very specific in always separating writing characters from punctuation, and leaving contiguous punctuation as contiguous- this is a level of precision that NLTK's English tokenizer does not want for several reasons, such as that English words can have "punctuation" characters in them.
92
-
93
- Toki Pona doesn't have any mid-word symbols when rendered in the Latin alphabet, so a more aggressive tokenizer is highly desirable.
98
+ The Toki Pona tokenizer `sonatoki.Tokenizers.WordTokenizer` has the goal of tokenizing statements such that every token either represents a word candidate ("toki", "mumumu") or a complete non-candidate ("..!", "123").
99
+ This design is highly undesirable for NLTK's English tokenizer because English words can have "punctuation" characters in them.
100
+ But Toki Pona doesn't have any mid-word symbols when rendered in the Latin alphabet or in [Private Use Area Unicode characters](https://www.kreativekorp.com/ucsur/), so a more aggressive tokenizer is highly desirable.
94
101
 
95
- The other tokenizers are provided as a comparison case more than anything. I do not recommend their use.
102
+ The goal of splitting into word candidates and non-candidates is important, because any [encoding of Toki Pona's logographic script](https://www.kreativekorp.com/ucsur/charts/sitelen.html) will require each character be split into its own token, where the default behavior would be to leave consecutive non-punctuation together.
96
103
 
97
104
  ### Aren't there a lot of false positives?
98
105
 
99
- Yes. It's up to you to use this tool responsibly on input you've done your best to clean, and better, use stronger filters before weaker ones. For now though, here's a list of relevant false positives:
106
+ Yes, depending on the filter you choose and how you apply it.
107
+ It's up to you to use this tool responsibly on input you've done your best to clean, such as by using stronger filters before weaker ones.
108
+ For now though, here's a list of relevant false positives:
100
109
 
101
- - `ProperName` will errantly match text in languages without a capital/lowercase distinction, artificially inflating the scores.
102
- - `Alphabetic` will match a _lot_ of undesirable text- it essentially allows 14 letters of the English alphabet.
110
+ - `ProperName` will errantly match text in languages without a capital/lowercase distinction, artificially increasing scores.
111
+ - `Alphabetic` will match a _lot_ of undesirable text- it essentially allows 14 letters of the English alphabet. For example, "I'm well" would match as _three_ words: "i", "m", "well".
112
+ - `NimiPu` and other sets containing `a`, `mute`, `open`, and others will unavoidably match those words in English text too.
103
113
 
104
114
  ### Don't some of the cleaners/filters conflict?
105
115
 
106
- Yes. Some do so
116
+ Yes, though not terribly much.
107
117
 
108
118
  - `ConsecutiveDuplicates` may errantly change a word's validity. For example, "manna" is phonotactically invalid in Toki Pona, but would become "mana" which is valid.
109
- - `ConsecutiveDuplicates` will not work correctly with syllabaries (alphabets, but representing a pair of consonant and vowel).
119
+ - `ConsecutiveDuplicates` will not work correctly with syllabaries, though this should not change the validity of the analyzed word unless you attempt to dictionary match these words.
120
+ - If you build your own `MemberFilter` with words that have capital letters or consecutive duplicates, they will never match unless you use `prep_dictionary`.
110
121
 
111
- You'll notice a _lot_ of these are troubles regarding the application of latin alphabet filters to non-latin text. Working on it!
122
+ You'll notice these are mostly casued by applying latin alphabet filters to non-latin text. Working on it!
@@ -1,18 +1,18 @@
1
- sonatoki-0.3.0.dist-info/METADATA,sha256=94NlsvWK1jI4a-wQNdbtwtl0AH7985Cw5aV7IvQbcqo,5160
2
- sonatoki-0.3.0.dist-info/WHEEL,sha256=vnE8JVcI2Wz7GRKorsPArnBdnW2SWKWGow5gu5tHlRU,90
3
- sonatoki-0.3.0.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ sonatoki-0.3.1.dist-info/METADATA,sha256=nWomuM-AeE98VwnWen7qffNclw8emxAf-oFtXwba8wI,6341
2
+ sonatoki-0.3.1.dist-info/WHEEL,sha256=vnE8JVcI2Wz7GRKorsPArnBdnW2SWKWGow5gu5tHlRU,90
3
+ sonatoki-0.3.1.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
4
4
  sonatoki/Cleaners.py,sha256=m0j1a1vs9Mdqp724r9Xfh1Y_tyP6GYCkihv8rH8m7lA,1871
5
- sonatoki/Configs.py,sha256=qDLSI0c_FmTggtzNUiYk94P8GZqm5r0co5bdsoCZsa0,3120
6
- sonatoki/Filters.py,sha256=j7UcESrGGrZxS0Ln4D-0ZTEzm94xs8zzpcb22PSF_Fo,7930
5
+ sonatoki/Configs.py,sha256=NS1_esoDNna8LyH_9bPMkxbo2sMSilYhG1PwYLdq6L8,3402
6
+ sonatoki/Filters.py,sha256=-j5xSZ8URjqalQVGMBabMvJ5ofZWazfN7YPfXkM_4uQ,9429
7
7
  sonatoki/Preprocessors.py,sha256=aMXXuFBDlJudvzvukvCa7BixuROXXEb62un7I-TGOGs,4441
8
8
  sonatoki/Scorers.py,sha256=W-1uYiqjsDejJzoe592ixs7wHazjJXPhuo-41zuJ26U,3643
9
9
  sonatoki/Tokenizers.py,sha256=So5_Tu6J98MD3yVcwB_X3lw2uMG0TN6XHcTbQjFCu5Q,4254
10
10
  sonatoki/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  sonatoki/__main__.py,sha256=6xc-wIrrFo9wTyn4zRQNAmqwmJBtVvCMwV-CrM-hueA,82
12
- sonatoki/constants.py,sha256=ocH3gJOh5SzTKxhVgGmy0VP8KDk-IQpodwzh2Ilr_G4,12349
12
+ sonatoki/constants.py,sha256=XTFmEcnLBXwdYXjTq_EuW9e_TWLtnNLz2vFCf8m-sz0,12844
13
13
  sonatoki/ilo.py,sha256=yyLgNPI0Hmb4f1BzX6IRHr11FPChfL2xDR_9odlr8_8,3849
14
14
  sonatoki/linku.json,sha256=B5KNdhyM5UEfMciROgh1ECHr3i-ASBeMvwrkzNJX47c,271013
15
15
  sonatoki/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  sonatoki/sandbox.json,sha256=hx6LRsfvmmTtqXcXIyCsfSaGK3DZ-GCdbM8xhZQBHoA,77650
17
- sonatoki/utils.py,sha256=9Dcjg2fUZygA2Z9MUr30Dq3gL2xViJC4hBvRhQDSx3Q,3210
18
- sonatoki-0.3.0.dist-info/RECORD,,
17
+ sonatoki/utils.py,sha256=OMaRyoNvKGKYQCBDjQyaCI58-wMpQ0wrrNjTJKsEZ9Y,3550
18
+ sonatoki-0.3.1.dist-info/RECORD,,