sonatoki 0.8.4__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sonatoki/Configs.py CHANGED
@@ -9,14 +9,17 @@ from sonatoki.types import Number
9
9
  from sonatoki.Filters import (
10
10
  Or,
11
11
  And,
12
+ Len,
12
13
  Not,
13
14
  Filter,
14
15
  PuName,
15
16
  Numeric,
17
+ Syllabic,
16
18
  NimiUCSUR,
17
19
  Alphabetic,
18
20
  NimiKuLili,
19
21
  NimiKuSuli,
22
+ ProperName,
20
23
  Punctuation,
21
24
  LongSyllabic,
22
25
  Miscellaneous,
@@ -29,7 +32,7 @@ from sonatoki.Filters import (
29
32
  NimiLinkuUncommon,
30
33
  FalsePosAlphabetic,
31
34
  )
32
- from sonatoki.Scorers import Scorer, PassFail, SoftScaling, SoftPassFail
35
+ from sonatoki.Scorers import Scorer, Soften, Voting, PassFail, SoftScaling, SoftPassFail
33
36
  from sonatoki.Cleaners import Cleaner, ConsecutiveDuplicates
34
37
  from sonatoki.Tokenizers import Tokenizer, WordTokenizerRe
35
38
  from sonatoki.Preprocessors import (
@@ -49,21 +52,21 @@ __DICT_PHONOMATCHES = {
49
52
  "an", # article
50
53
  "api", # API
51
54
  "i", # 1st person
55
+ "je", # 1st person pronoun, french
52
56
  "kana", # japanese script
53
57
  "me", # 1st person singular, english
54
58
  "ne", # "no" in several languages
55
59
  "nu", # "new" in english, "now" in dutch
60
+ "omen", # ominous
56
61
  "se", # spanish particle, english "see"
62
+ "sole", # singular, of shoe
57
63
  "take", # acquire, perhaps forcefully or without permission
58
64
  "ten", # 10
59
65
  "to", # to, too
60
- "je", # 1st person pronoun, french
61
66
  "u", # no u
62
67
  "we", # 1st person plural, english
63
68
  "wi", # wii and discussions of syllables
64
- "sole", # singular, of shoe
65
69
  # unexplored candidates for removal
66
- # "omen", # ominous
67
70
  # "papa", # father
68
71
  # "lo", # "lo" and "loo"
69
72
  # "ewe", # sheep
@@ -99,11 +102,11 @@ PrefConfig: IloConfig = {
99
102
  "cleaners": [ConsecutiveDuplicates],
100
103
  "ignoring_filters": [Numeric, Punctuation],
101
104
  "scoring_filters": [
102
- Or(NimiLinkuByUsage(30), NimiUCSUR),
103
- And(LongSyllabic, Not(FalsePosSyllabic)),
105
+ Len(Or(NimiLinkuByUsage(30), NimiUCSUR), max=15),
106
+ Len(And(Syllabic, Not(FalsePosSyllabic)), min=3, max=24),
104
107
  # NOTE: These are allowed to pass name and alphabetic below, because they *could* be wrong
105
- LongProperName,
106
- And(LongAlphabetic, Not(FalsePosAlphabetic)),
108
+ Len(ProperName, min=2, max=24),
109
+ Len(And(Alphabetic, Not(FalsePosAlphabetic)), min=3, max=24),
107
110
  ],
108
111
  "scorer": SoftScaling,
109
112
  "passing_score": 0.8,
@@ -114,15 +117,18 @@ CorpusConfig: IloConfig = {
114
117
  "cleaners": [ConsecutiveDuplicates],
115
118
  "ignoring_filters": [Numeric, Punctuation],
116
119
  "scoring_filters": [
117
- Or(
118
- # awkward but efficient syntax
119
- NimiLinkuByUsage(0)(sub=__DICT_PHONOMATCHES),
120
- NimiUCSUR,
121
- Miscellaneous,
120
+ Len(
121
+ Or(
122
+ # awkward but efficient syntax
123
+ NimiLinkuByUsage(0)(sub=__DICT_PHONOMATCHES),
124
+ NimiUCSUR,
125
+ Miscellaneous,
126
+ ),
127
+ max=19,
122
128
  ),
123
- And(LongSyllabic, Not(FalsePosSyllabic)),
124
- LongProperName,
125
- And(LongAlphabetic, Not(FalsePosAlphabetic)),
129
+ Len(And(Syllabic, Not(FalsePosSyllabic)), min=3, max=24),
130
+ Len(ProperName, min=2, max=24),
131
+ Len(And(Alphabetic, Not(FalsePosAlphabetic)), min=3, max=24),
126
132
  ],
127
133
  "scorer": SoftScaling,
128
134
  "passing_score": 0.8,
sonatoki/Filters.py CHANGED
@@ -7,7 +7,7 @@ from functools import lru_cache as cache # cache comes in 3.9
7
7
 
8
8
  # PDM
9
9
  import regex
10
- from typing_extensions import override
10
+ from typing_extensions import override, deprecated
11
11
 
12
12
  # LOCAL
13
13
  from sonatoki.types import LinkuBooks, LinkuUsageDate, LinkuUsageCategory
@@ -41,6 +41,7 @@ class Filter(ABC):
41
41
  raise NotImplementedError
42
42
 
43
43
 
44
+ @deprecated("Use sonatoki.Filters.Len instead")
44
45
  class MinLen(Filter):
45
46
  """
46
47
  Meta filter meant to be inherited by another filter to add a length requirement.
@@ -62,12 +63,54 @@ class MinLen(Filter):
62
63
  return super().filter(token)
63
64
 
64
65
  def __new__(cls, filter: Type[Filter], length_: int) -> Type[Filter]:
65
- class MinLenFilter(MinLen, Filter):
66
+ class MinLenFilter(MinLen, filter):
66
67
  length = length_
67
68
 
68
69
  return MinLenFilter
69
70
 
70
71
 
72
+ class Len(Filter):
73
+ """Meta filter to be inherited by another filter to add any length
74
+ requirement. A bound will only be considered if it is non-zero, so you may
75
+ omit a minimum length or a maximum length to bound only one of them.
76
+
77
+ If inherited when defining a class, `Len` must be the first argument so `super()` resolves correctly.
78
+
79
+ To add minimum or maximum length requirements when defining a class:
80
+ ```
81
+ class LongAlphabetic(Len, Alphabetic):
82
+ minlen = 3
83
+ maxlen = 20
84
+ ```
85
+
86
+ You may also construct any other filter with a minimum length filter like so:
87
+ ```
88
+ Len(Alphabetic, min=3, max=20)
89
+ ```
90
+ """
91
+
92
+ minlen = 0
93
+ maxlen = 0
94
+
95
+ @classmethod
96
+ @cache(maxsize=None)
97
+ def filter(cls, token: str) -> bool:
98
+ tokenlen = len(token)
99
+
100
+ if cls.minlen and tokenlen < cls.minlen:
101
+ return False
102
+ if cls.maxlen and tokenlen > cls.maxlen:
103
+ return False
104
+ return super().filter(token)
105
+
106
+ def __new__(cls, filter: Type[Filter], min: int = 0, max: int = 0) -> Type[Filter]:
107
+ class LenFilter(Len, filter):
108
+ minlen = min
109
+ maxlen = max
110
+
111
+ return LenFilter
112
+
113
+
71
114
  class RegexFilter(Filter):
72
115
  pattern: "re.Pattern[str]"
73
116
 
@@ -183,8 +226,8 @@ class PuName(Filter):
183
226
  # this will errantly match.
184
227
 
185
228
 
186
- class LongProperName(MinLen, ProperName):
187
- length = 2 # reject "names" of length 1
229
+ class LongProperName(Len, ProperName):
230
+ minlen = 2 # reject "names" of length 1
188
231
 
189
232
 
190
233
  class NimiLinkuByUsage:
@@ -252,8 +295,8 @@ class Phonotactic(RegexFilter):
252
295
  )
253
296
 
254
297
 
255
- class LongPhonotactic(MinLen, Phonotactic):
256
- length = 3
298
+ class LongPhonotactic(Len, Phonotactic):
299
+ minlen = 3
257
300
 
258
301
 
259
302
  class Syllabic(RegexFilter):
@@ -271,8 +314,8 @@ class Syllabic(RegexFilter):
271
314
  )
272
315
 
273
316
 
274
- class LongSyllabic(MinLen, Syllabic):
275
- length = 3
317
+ class LongSyllabic(Len, Syllabic):
318
+ minlen = 3
276
319
 
277
320
 
278
321
  class Alphabetic(SubsetFilter):
@@ -283,8 +326,8 @@ class AlphabeticRe(RegexFilter):
283
326
  pattern = re.compile(rf"[{ALPHABET}]+", flags=re.IGNORECASE)
284
327
 
285
328
 
286
- class LongAlphabetic(MinLen, Alphabetic):
287
- length = 3
329
+ class LongAlphabetic(Len, Alphabetic):
330
+ minlen = 3
288
331
 
289
332
 
290
333
  class Numeric(Filter):
@@ -448,15 +491,26 @@ class Not(Filter):
448
491
  return NotFilter
449
492
 
450
493
 
494
+ class Pass(Filter):
495
+ @classmethod
496
+ @override
497
+ @cache(maxsize=None)
498
+ def filter(cls, token: str) -> bool:
499
+ return True
500
+
501
+
502
+ class Fail(Not, Pass): ...
503
+
504
+
451
505
  __all__ = [
452
506
  "Alphabetic",
453
507
  "And",
454
508
  "FalsePosSyllabic",
509
+ "Len",
455
510
  "LongAlphabetic",
456
511
  "LongPhonotactic",
457
512
  "LongProperName",
458
513
  "LongSyllabic",
459
- "MinLen",
460
514
  "NimiLinkuCore",
461
515
  "NimiLinkuSandbox",
462
516
  "NimiPu",
sonatoki/Scorers.py CHANGED
@@ -8,7 +8,7 @@ from typing_extensions import override
8
8
 
9
9
  # LOCAL
10
10
  from sonatoki.types import Number, Scorecard
11
- from sonatoki.Filters import Filter
11
+ from sonatoki.Filters import Pass, Filter
12
12
 
13
13
 
14
14
  class Scorer(ABC):
@@ -112,6 +112,72 @@ class Scaling(Scorer):
112
112
  return total_score / max_score if max_score else 0
113
113
 
114
114
 
115
+ class Voting(Scaling):
116
+ """Derives from `Scaling` in assigning scores from 0 to 1 based on the
117
+ first matching filter out of the list of filters. However, after all scores
118
+ are derived, each token scoring less than the threshold is assigned the
119
+ average score of its nearest 3 neighbors. The default threshold is 0.
120
+
121
+ If there are 3 or fewer tokens, this scorer is identical to the
122
+ Scaling scorer.
123
+
124
+ If the Voting scorer is created with a Filter, tokens must also
125
+ match that filter to be considered for voting. For example, the
126
+ following Voting filter would only check words with a score of 0.3
127
+ or less that still match the Syllabic filter: `Voting(Syllabic, 0.3)`
128
+ """
129
+
130
+ prereq: Type[Filter] = Pass
131
+ threshold: int = 0
132
+
133
+ def __new__(cls, filter: Type[Filter], threshold_: int = 0) -> Type[Scorer]:
134
+ class AnonVoting(Voting):
135
+ prereq = filter
136
+ threshold = threshold_
137
+
138
+ return AnonVoting
139
+
140
+ @classmethod
141
+ @override
142
+ def score(cls, tokens: List[str], filters: List[Type[Filter]]) -> Number:
143
+ if not tokens:
144
+ return 1
145
+
146
+ if len(tokens) < 4:
147
+ return super().score(tokens, filters)
148
+
149
+ len_filters = len(filters)
150
+ max_score = len(tokens) * len_filters
151
+
152
+ # score_token only emits ints
153
+ # but the averaging emits floats
154
+ # it doesn't really matter as long as no score exceeds len_filters
155
+ scores: List[Number] = []
156
+ for token in tokens:
157
+ score = cls.score_token(token, filters, len_filters)
158
+ scores.append(score)
159
+
160
+ # only consider scores from before voting
161
+ copied_scores = scores[:]
162
+ for i, (token, score) in enumerate(zip(tokens, copied_scores)):
163
+ if score > cls.threshold:
164
+ continue
165
+ if not cls.prereq.filter(token):
166
+ continue
167
+
168
+ # TODO: this is kinda dumb.
169
+ # we want to get exactly 3 neighbors, favoring 2 before and 1 after
170
+ # the way i'm doing this is both bad and slow as hell
171
+ start = max(i - 2, 0)
172
+ end = min(i + 1, len(scores) - 1)
173
+ neighbors = copied_scores[start:i] + copied_scores[i + 1 : end + 1]
174
+ scores[i] = sum(neighbors) / len(neighbors)
175
+
176
+ total_score = sum(scores)
177
+
178
+ return total_score / max_score if max_score else 0
179
+
180
+
115
181
  class SoftPassFail(Soften, PassFail):
116
182
  """Same as `PassFail`, but shorter messages are subject to less harsh
117
183
  scoring."""
@@ -122,6 +188,11 @@ class SoftScaling(Soften, Scaling):
122
188
  scoring."""
123
189
 
124
190
 
191
+ class SoftVoting(Soften, Voting):
192
+ """Same as `Voting`, but shorter messages are subject to less harsh
193
+ scoring."""
194
+
195
+
125
196
  class SentenceScorer(ABC):
126
197
  @classmethod
127
198
  @abstractmethod
sonatoki/__main__.py CHANGED
@@ -60,11 +60,11 @@ def download_json(url: str) -> Dict[str, Any]:
60
60
 
61
61
  def regen_linku_data():
62
62
  data = download_json(LINKU_WORDS)
63
- with open(os.path.join(HERE, "linku.json"), "w") as f:
63
+ with open(os.path.join(HERE, "linku.json"), "w", encoding="utf-8") as f:
64
64
  _ = f.write(json.dumps(data))
65
65
 
66
66
  data = download_json(LINKU_SANDBOX)
67
- with open(os.path.join(HERE, "sandbox.json"), "w") as f:
67
+ with open(os.path.join(HERE, "sandbox.json"), "w", encoding="utf-8") as f:
68
68
  _ = f.write(json.dumps(data))
69
69
 
70
70
 
@@ -96,11 +96,11 @@ def regen_false_negatives():
96
96
  continue
97
97
 
98
98
  # TODO: include short matches or no?
99
- with open(os.path.join(HERE, "syllabic.txt"), "w") as f:
99
+ with open(os.path.join(HERE, "syllabic.txt"), "w", encoding="utf-8") as f:
100
100
  syllabic_final = sorted([word + "\n" for word in syllabic_matches])
101
101
  f.writelines(syllabic_final)
102
102
 
103
- with open(os.path.join(HERE, "alphabetic.txt"), "w") as f:
103
+ with open(os.path.join(HERE, "alphabetic.txt"), "w", encoding="utf-8") as f:
104
104
  alphabetic_final = sorted([word + "\n" for word in alphabetic_matches])
105
105
  f.writelines(alphabetic_final)
106
106
 
sonatoki/constants.py CHANGED
@@ -7,7 +7,7 @@ from pathlib import Path
7
7
  from sonatoki.types import LinkuWord, LinkuUsageDate
8
8
  from sonatoki.utils import find_unicode_chars, find_unicode_ranges
9
9
 
10
- LATEST_DATE = "2023-09"
10
+ LATEST_DATE = "2024-09"
11
11
  # hardcoding this seems bad, but it means the parser is stable w.r.t. Linku!
12
12
 
13
13
 
@@ -648,6 +648,7 @@ FALSE_POS_SYLLABIC = {
648
648
  "insolate",
649
649
  "insulate",
650
650
  "intense",
651
+ "saluton",
651
652
  # "june",
652
653
  "lemon",
653
654
  "manipulate",
@@ -667,10 +668,11 @@ FALSE_POS_ALPHABETIC: Set[str] = {
667
668
  "not",
668
669
  "link",
669
670
  "wait",
671
+ "just",
670
672
  "lol",
671
673
  "new",
672
674
  "also",
673
- "isn", # TODO: tokenizer....
675
+ "isnt",
674
676
  "mean",
675
677
  "means",
676
678
  "it",
@@ -680,6 +682,7 @@ FALSE_POS_ALPHABETIC: Set[str] = {
680
682
  "new",
681
683
  "wel",
682
684
  "makes",
685
+ "unles",
683
686
  }
684
687
 
685
688
  UCSUR_RANGES = [
@@ -698,9 +701,9 @@ def linku_data() -> Dict[str, LinkuWord]:
698
701
  # NOTE: this does open+read+parse two files each time you construct a filter
699
702
  # but i expect users to construct filters only at the start of runtime
700
703
  # there is no reason to waste your RAM by leaving the linku data in it
701
- with open(LINKU) as f:
704
+ with open(LINKU, "r", encoding="utf-8") as f:
702
705
  linku: Dict[str, LinkuWord] = json.loads(f.read())
703
- with open(SANDBOX) as f:
706
+ with open(SANDBOX, "r", encoding="utf-8") as f:
704
707
  sandbox: Dict[str, LinkuWord] = json.loads(f.read())
705
708
 
706
709
  return {**linku, **sandbox}
@@ -721,6 +724,10 @@ def words_by_usage(
721
724
 
722
725
  result: Set[str] = set()
723
726
  for word in data.values():
727
+ if usage == 0:
728
+ result.add(word["word"])
729
+ continue
730
+
724
731
  usages = word["usage"]
725
732
  if date in usages and usages[date] >= usage:
726
733
  result.add(word["word"])
@@ -731,10 +738,10 @@ def words_by_usage(
731
738
  NIMI_PU_SYNONYMS = {"namako", "kin", "oko"}
732
739
 
733
740
 
734
- # with open(SYLLABICS) as f:
741
+ # with open(SYLLABICS, "r", encoding="utf-8") as f:
735
742
  # FALSE_POS_SYLLABIC = {line.strip() for line in f}
736
743
  #
737
- # with open(ALPHABETICS) as f:
744
+ # with open(ALPHABETICS, "r", encoding="utf-8") as f:
738
745
  # FALSE_POS_ALPHABETIC = {line.strip() for line in f}
739
746
 
740
747
  __all__ = [