webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (154) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +23 -0
  24. webscout/Litlogger/core/logger.py +166 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +33 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +173 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +249 -0
  31. webscout/Litlogger/styles/formats.py +460 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/AISEARCH/ISou.py +277 -0
  38. webscout/Provider/AISEARCH/__init__.py +2 -1
  39. webscout/Provider/Blackboxai.py +3 -3
  40. webscout/Provider/ChatGPTGratis.py +226 -0
  41. webscout/Provider/Cloudflare.py +3 -4
  42. webscout/Provider/DeepSeek.py +218 -0
  43. webscout/Provider/Deepinfra.py +40 -24
  44. webscout/Provider/Free2GPT.py +131 -124
  45. webscout/Provider/Gemini.py +100 -115
  46. webscout/Provider/Glider.py +3 -3
  47. webscout/Provider/Groq.py +5 -1
  48. webscout/Provider/Jadve.py +3 -3
  49. webscout/Provider/Marcus.py +191 -192
  50. webscout/Provider/Netwrck.py +3 -3
  51. webscout/Provider/PI.py +2 -2
  52. webscout/Provider/PizzaGPT.py +2 -3
  53. webscout/Provider/QwenLM.py +311 -0
  54. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  55. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  56. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  57. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  58. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  59. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  60. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  61. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  62. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  63. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  64. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  65. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  66. webscout/Provider/TTI/__init__.py +2 -1
  67. webscout/Provider/TTI/artbit/__init__.py +22 -22
  68. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  69. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  70. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  71. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  72. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  73. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  74. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  75. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  76. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  77. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  78. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  79. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  80. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  81. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  82. webscout/Provider/TTI/talkai/__init__.py +4 -4
  83. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  84. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  85. webscout/Provider/TTS/deepgram.py +182 -182
  86. webscout/Provider/TTS/elevenlabs.py +136 -136
  87. webscout/Provider/TTS/gesserit.py +150 -150
  88. webscout/Provider/TTS/murfai.py +138 -138
  89. webscout/Provider/TTS/parler.py +133 -134
  90. webscout/Provider/TTS/streamElements.py +360 -360
  91. webscout/Provider/TTS/utils.py +280 -280
  92. webscout/Provider/TTS/voicepod.py +116 -116
  93. webscout/Provider/TextPollinationsAI.py +28 -8
  94. webscout/Provider/WiseCat.py +193 -0
  95. webscout/Provider/__init__.py +146 -134
  96. webscout/Provider/cerebras.py +242 -227
  97. webscout/Provider/chatglm.py +204 -204
  98. webscout/Provider/dgaf.py +2 -3
  99. webscout/Provider/freeaichat.py +221 -0
  100. webscout/Provider/gaurish.py +2 -3
  101. webscout/Provider/geminiapi.py +208 -208
  102. webscout/Provider/granite.py +223 -0
  103. webscout/Provider/hermes.py +218 -218
  104. webscout/Provider/llama3mitril.py +179 -179
  105. webscout/Provider/llamatutor.py +3 -3
  106. webscout/Provider/llmchat.py +2 -3
  107. webscout/Provider/meta.py +794 -794
  108. webscout/Provider/multichat.py +331 -331
  109. webscout/Provider/typegpt.py +359 -359
  110. webscout/Provider/yep.py +3 -3
  111. webscout/__init__.py +1 -0
  112. webscout/__main__.py +5 -5
  113. webscout/cli.py +319 -319
  114. webscout/conversation.py +241 -242
  115. webscout/exceptions.py +328 -328
  116. webscout/litagent/__init__.py +28 -28
  117. webscout/litagent/agent.py +2 -3
  118. webscout/litprinter/__init__.py +0 -58
  119. webscout/scout/__init__.py +8 -8
  120. webscout/scout/core.py +884 -884
  121. webscout/scout/element.py +459 -459
  122. webscout/scout/parsers/__init__.py +69 -69
  123. webscout/scout/parsers/html5lib_parser.py +172 -172
  124. webscout/scout/parsers/html_parser.py +236 -236
  125. webscout/scout/parsers/lxml_parser.py +178 -178
  126. webscout/scout/utils.py +38 -38
  127. webscout/swiftcli/__init__.py +811 -811
  128. webscout/update_checker.py +2 -12
  129. webscout/version.py +1 -1
  130. webscout/webscout_search.py +87 -6
  131. webscout/webscout_search_async.py +58 -1
  132. webscout/yep_search.py +297 -0
  133. webscout/zeroart/__init__.py +54 -54
  134. webscout/zeroart/base.py +60 -60
  135. webscout/zeroart/effects.py +99 -99
  136. webscout/zeroart/fonts.py +816 -816
  137. {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
  138. webscout-7.3.dist-info/RECORD +223 -0
  139. {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
  140. webstoken/__init__.py +30 -30
  141. webstoken/classifier.py +189 -189
  142. webstoken/keywords.py +216 -216
  143. webstoken/language.py +128 -128
  144. webstoken/ner.py +164 -164
  145. webstoken/normalizer.py +35 -35
  146. webstoken/processor.py +77 -77
  147. webstoken/sentiment.py +206 -206
  148. webstoken/stemmer.py +73 -73
  149. webstoken/tagger.py +60 -60
  150. webstoken/tokenizer.py +158 -158
  151. webscout-7.1.dist-info/RECORD +0 -198
  152. {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
  153. {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
  154. {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
webstoken/stemmer.py CHANGED
@@ -1,73 +1,73 @@
1
- """
2
- Word stemming utilities.
3
- """
4
-
5
- from typing import Set
6
-
7
-
8
- class Stemmer:
9
- """Simple rule-based stemmer implementing Porter-like rules."""
10
-
11
- def __init__(self):
12
- self.vowels: Set[str] = {'a', 'e', 'i', 'o', 'u', 'y'}
13
- self.doubles: Set[str] = {'bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt'}
14
-
15
- def is_vowel(self, char: str, prev_char: str = None) -> bool:
16
- """Check if a character is a vowel, considering 'y' special cases."""
17
- return char in self.vowels or (char == 'y' and prev_char and prev_char not in self.vowels)
18
-
19
- def count_syllables(self, word: str) -> int:
20
- """Count syllables in a word based on vowel sequences."""
21
- count = 0
22
- prev_char = None
23
- for i, char in enumerate(word.lower()):
24
- if self.is_vowel(char, prev_char) and (i == 0 or not self.is_vowel(prev_char, word[i-2] if i > 1 else None)):
25
- count += 1
26
- prev_char = char
27
- return count or 1
28
-
29
- def stem(self, word: str) -> str:
30
- """Apply stemming rules to reduce word to its root form."""
31
- if len(word) <= 3:
32
- return word
33
-
34
- word = word.lower()
35
-
36
- # Step 1: Handle plurals and past participles
37
- if word.endswith('sses'):
38
- word = word[:-2]
39
- elif word.endswith('ies'):
40
- word = word[:-2]
41
- elif word.endswith('ss'):
42
- pass
43
- elif word.endswith('s') and len(word) > 4:
44
- word = word[:-1]
45
-
46
- # Step 2: Handle -ed and -ing
47
- if word.endswith('ed') and self.count_syllables(word[:-2]) > 1:
48
- word = word[:-2]
49
- elif word.endswith('ing') and self.count_syllables(word[:-3]) > 1:
50
- word = word[:-3]
51
-
52
- # Step 3: Handle miscellaneous endings
53
- if len(word) > 5:
54
- if word.endswith('ement'):
55
- word = word[:-5]
56
- elif word.endswith('ment'):
57
- word = word[:-4]
58
- elif word.endswith('ent'):
59
- word = word[:-3]
60
-
61
- # Step 4: Handle -ity endings
62
- if word.endswith('ity') and len(word) > 6:
63
- word = word[:-3]
64
- if word.endswith('abil'):
65
- word = word[:-4] + 'able'
66
- elif word.endswith('ic'):
67
- word = word[:-2]
68
-
69
- # Final step: Remove double consonants at the end
70
- if len(word) > 2 and word[-2:] in self.doubles:
71
- word = word[:-1]
72
-
73
- return word
1
+ """
2
+ Word stemming utilities.
3
+ """
4
+
5
+ from typing import Set
6
+
7
+
8
+ class Stemmer:
9
+ """Simple rule-based stemmer implementing Porter-like rules."""
10
+
11
+ def __init__(self):
12
+ self.vowels: Set[str] = {'a', 'e', 'i', 'o', 'u', 'y'}
13
+ self.doubles: Set[str] = {'bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt'}
14
+
15
+ def is_vowel(self, char: str, prev_char: str = None) -> bool:
16
+ """Check if a character is a vowel, considering 'y' special cases."""
17
+ return char in self.vowels or (char == 'y' and prev_char and prev_char not in self.vowels)
18
+
19
+ def count_syllables(self, word: str) -> int:
20
+ """Count syllables in a word based on vowel sequences."""
21
+ count = 0
22
+ prev_char = None
23
+ for i, char in enumerate(word.lower()):
24
+ if self.is_vowel(char, prev_char) and (i == 0 or not self.is_vowel(prev_char, word[i-2] if i > 1 else None)):
25
+ count += 1
26
+ prev_char = char
27
+ return count or 1
28
+
29
+ def stem(self, word: str) -> str:
30
+ """Apply stemming rules to reduce word to its root form."""
31
+ if len(word) <= 3:
32
+ return word
33
+
34
+ word = word.lower()
35
+
36
+ # Step 1: Handle plurals and past participles
37
+ if word.endswith('sses'):
38
+ word = word[:-2]
39
+ elif word.endswith('ies'):
40
+ word = word[:-2]
41
+ elif word.endswith('ss'):
42
+ pass
43
+ elif word.endswith('s') and len(word) > 4:
44
+ word = word[:-1]
45
+
46
+ # Step 2: Handle -ed and -ing
47
+ if word.endswith('ed') and self.count_syllables(word[:-2]) > 1:
48
+ word = word[:-2]
49
+ elif word.endswith('ing') and self.count_syllables(word[:-3]) > 1:
50
+ word = word[:-3]
51
+
52
+ # Step 3: Handle miscellaneous endings
53
+ if len(word) > 5:
54
+ if word.endswith('ement'):
55
+ word = word[:-5]
56
+ elif word.endswith('ment'):
57
+ word = word[:-4]
58
+ elif word.endswith('ent'):
59
+ word = word[:-3]
60
+
61
+ # Step 4: Handle -ity endings
62
+ if word.endswith('ity') and len(word) > 6:
63
+ word = word[:-3]
64
+ if word.endswith('abil'):
65
+ word = word[:-4] + 'able'
66
+ elif word.endswith('ic'):
67
+ word = word[:-2]
68
+
69
+ # Final step: Remove double consonants at the end
70
+ if len(word) > 2 and word[-2:] in self.doubles:
71
+ word = word[:-1]
72
+
73
+ return word
webstoken/tagger.py CHANGED
@@ -1,60 +1,60 @@
1
- """
2
- Part-of-Speech tagging utilities.
3
- """
4
-
5
- from typing import List, Set, Tuple
6
-
7
-
8
- class POSTagger:
9
- """Simple rule-based Part-of-Speech tagger."""
10
-
11
- def __init__(self):
12
- # Basic rules for POS tagging
13
- self.noun_suffixes: Set[str] = {'ness', 'ment', 'ship', 'dom', 'hood', 'er', 'or', 'ist'}
14
- self.verb_suffixes: Set[str] = {'ize', 'ate', 'ify', 'ing', 'ed'}
15
- self.adj_suffixes: Set[str] = {'able', 'ible', 'al', 'ful', 'ous', 'ive', 'less'}
16
- self.adv_suffixes: Set[str] = {'ly'}
17
-
18
- # Common words by POS
19
- self.determiners: Set[str] = {'the', 'a', 'an', 'this', 'that', 'these', 'those'}
20
- self.prepositions: Set[str] = {'in', 'on', 'at', 'by', 'with', 'from', 'to', 'for'}
21
- self.pronouns: Set[str] = {'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her'}
22
-
23
- def tag(self, tokens: List[str]) -> List[Tuple[str, str]]:
24
- """Assign POS tags to tokens based on rules."""
25
- tagged = []
26
- prev_tag = None
27
-
28
- for i, token in enumerate(tokens):
29
- word = token.lower()
30
-
31
- # Check special cases first
32
- if word in self.determiners:
33
- tag = 'DET'
34
- elif word in self.prepositions:
35
- tag = 'PREP'
36
- elif word in self.pronouns:
37
- tag = 'PRON'
38
- # Check suffixes
39
- elif any(word.endswith(suffix) for suffix in self.noun_suffixes):
40
- tag = 'NOUN'
41
- elif any(word.endswith(suffix) for suffix in self.verb_suffixes):
42
- tag = 'VERB'
43
- elif any(word.endswith(suffix) for suffix in self.adj_suffixes):
44
- tag = 'ADJ'
45
- elif any(word.endswith(suffix) for suffix in self.adv_suffixes):
46
- tag = 'ADV'
47
- # Default cases
48
- elif word[0].isupper() and i > 0:
49
- tag = 'PROPN' # Proper noun
50
- elif word.isdigit():
51
- tag = 'NUM'
52
- elif not word.isalnum():
53
- tag = 'PUNCT'
54
- else:
55
- tag = 'NOUN' # Default to noun
56
-
57
- tagged.append((token, tag))
58
- prev_tag = tag
59
-
60
- return tagged
1
+ """
2
+ Part-of-Speech tagging utilities.
3
+ """
4
+
5
+ from typing import List, Set, Tuple
6
+
7
+
8
+ class POSTagger:
9
+ """Simple rule-based Part-of-Speech tagger."""
10
+
11
+ def __init__(self):
12
+ # Basic rules for POS tagging
13
+ self.noun_suffixes: Set[str] = {'ness', 'ment', 'ship', 'dom', 'hood', 'er', 'or', 'ist'}
14
+ self.verb_suffixes: Set[str] = {'ize', 'ate', 'ify', 'ing', 'ed'}
15
+ self.adj_suffixes: Set[str] = {'able', 'ible', 'al', 'ful', 'ous', 'ive', 'less'}
16
+ self.adv_suffixes: Set[str] = {'ly'}
17
+
18
+ # Common words by POS
19
+ self.determiners: Set[str] = {'the', 'a', 'an', 'this', 'that', 'these', 'those'}
20
+ self.prepositions: Set[str] = {'in', 'on', 'at', 'by', 'with', 'from', 'to', 'for'}
21
+ self.pronouns: Set[str] = {'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her'}
22
+
23
+ def tag(self, tokens: List[str]) -> List[Tuple[str, str]]:
24
+ """Assign POS tags to tokens based on rules."""
25
+ tagged = []
26
+ prev_tag = None
27
+
28
+ for i, token in enumerate(tokens):
29
+ word = token.lower()
30
+
31
+ # Check special cases first
32
+ if word in self.determiners:
33
+ tag = 'DET'
34
+ elif word in self.prepositions:
35
+ tag = 'PREP'
36
+ elif word in self.pronouns:
37
+ tag = 'PRON'
38
+ # Check suffixes
39
+ elif any(word.endswith(suffix) for suffix in self.noun_suffixes):
40
+ tag = 'NOUN'
41
+ elif any(word.endswith(suffix) for suffix in self.verb_suffixes):
42
+ tag = 'VERB'
43
+ elif any(word.endswith(suffix) for suffix in self.adj_suffixes):
44
+ tag = 'ADJ'
45
+ elif any(word.endswith(suffix) for suffix in self.adv_suffixes):
46
+ tag = 'ADV'
47
+ # Default cases
48
+ elif word[0].isupper() and i > 0:
49
+ tag = 'PROPN' # Proper noun
50
+ elif word.isdigit():
51
+ tag = 'NUM'
52
+ elif not word.isalnum():
53
+ tag = 'PUNCT'
54
+ else:
55
+ tag = 'NOUN' # Default to noun
56
+
57
+ tagged.append((token, tag))
58
+ prev_tag = tag
59
+
60
+ return tagged
webstoken/tokenizer.py CHANGED
@@ -1,158 +1,158 @@
1
- """
2
- Tokenization utilities for sentence and word-level tokenization.
3
- """
4
-
5
- from typing import List, Dict, Set, Pattern
6
- import re
7
-
8
-
9
- class SentenceTokenizer:
10
- """Advanced sentence tokenizer with support for complex cases and proper formatting."""
11
-
12
- def __init__(self) -> None:
13
- # Common abbreviations by category
14
- self.TITLES: Set[str] = {
15
- 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
16
- 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
17
- 'lt', 'sgt', 'cpl', 'pvt'
18
- }
19
-
20
- self.ACADEMIC: Set[str] = {
21
- 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
22
- 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
23
- }
24
-
25
- self.ORGANIZATIONS: Set[str] = {
26
- 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
27
- 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
28
- }
29
-
30
- self.MONTHS: Set[str] = {
31
- 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
32
- }
33
-
34
- self.UNITS: Set[str] = {
35
- 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
36
- 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
37
- }
38
-
39
- self.TECHNOLOGY: Set[str] = {
40
- 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
41
- 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
42
- }
43
-
44
- self.MISC: Set[str] = {
45
- 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
46
- 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
47
- }
48
-
49
- # Combine all abbreviations
50
- self.all_abbreviations: Set[str] = (
51
- self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
52
- self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
53
- )
54
-
55
- # Special patterns
56
- self.ELLIPSIS: str = r'\.{2,}|…'
57
- self.URL_PATTERN: str = (
58
- r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
59
- )
60
- self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
61
- self.NUMBER_PATTERN: str = (
62
- r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
63
- )
64
-
65
- # Quote and bracket pairs
66
- self.QUOTE_PAIRS: Dict[str, str] = {
67
- '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
68
- "«": "»", "‹": "›", "'": "'", "‚": "'"
69
- }
70
-
71
- self.BRACKETS: Dict[str, str] = {
72
- '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
73
- '『': '』', '【': '】', '〖': '〗', '「': '」'
74
- }
75
-
76
- # Compile regex patterns
77
- self._compile_patterns()
78
-
79
- def _compile_patterns(self) -> None:
80
- """Compile regex patterns for better performance."""
81
- # Pattern for finding potential sentence boundaries
82
- self.SENTENCE_END: Pattern = re.compile(
83
- r'''
84
- # Group for sentence endings
85
- (?:
86
- # Standard endings with optional quotes/brackets
87
- (?<=[.!?])[\"\'\)\]\}»›」』\s]*
88
-
89
- # Ellipsis
90
- |(?:\.{2,}|…)
91
-
92
- # Asian-style endings
93
- |(?<=[。!?」』】\s])
94
- )
95
-
96
- # Must be followed by whitespace and capital letter or number
97
- (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
98
- ''',
99
- re.VERBOSE
100
- )
101
-
102
- # Pattern for abbreviations
103
- abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
104
- self.ABBREV_PATTERN: Pattern = re.compile(
105
- fr'\b(?:{abbrev_pattern})\.?',
106
- re.IGNORECASE
107
- )
108
-
109
- def tokenize(self, text: str) -> List[str]:
110
- """Split text into sentences while handling complex cases."""
111
- if not text or not text.strip():
112
- return []
113
-
114
- # Initial split on potential sentence boundaries
115
- sentences = self.SENTENCE_END.split(text)
116
-
117
- # Clean and validate sentences
118
- final_sentences = []
119
- for sentence in sentences:
120
- sentence = sentence.strip()
121
- if sentence:
122
- final_sentences.append(sentence)
123
-
124
- return final_sentences
125
-
126
-
127
- class WordTokenizer:
128
- """Simple but effective word tokenizer with support for contractions and special cases."""
129
-
130
- def __init__(self):
131
- self.contractions = {
132
- "n't": "not", "'ll": "will", "'re": "are", "'s": "is",
133
- "'m": "am", "'ve": "have", "'d": "would"
134
- }
135
-
136
- self.word_pattern = re.compile(r"""
137
- (?:[A-Za-z]+(?:[''][A-Za-z]+)*)| # Words with optional internal apostrophes
138
- (?:\d+(?:,\d{3})*(?:\.\d+)?)| # Numbers with commas and decimals
139
- (?:[@#]?\w+)| # Hashtags and mentions
140
- (?:[^\w\s]) # Punctuation and symbols
141
- """, re.VERBOSE)
142
-
143
- def tokenize(self, text: str) -> List[str]:
144
- """Split text into words while handling contractions and special cases."""
145
- tokens = []
146
- for match in self.word_pattern.finditer(text):
147
- word = match.group()
148
- # Handle contractions
149
- for contraction, expansion in self.contractions.items():
150
- if word.endswith(contraction):
151
- base = word[:-len(contraction)]
152
- if base:
153
- tokens.append(base)
154
- tokens.append(expansion)
155
- break
156
- else:
157
- tokens.append(word)
158
- return tokens
1
+ """
2
+ Tokenization utilities for sentence and word-level tokenization.
3
+ """
4
+
5
+ from typing import List, Dict, Set, Pattern
6
+ import re
7
+
8
+
9
+ class SentenceTokenizer:
10
+ """Advanced sentence tokenizer with support for complex cases and proper formatting."""
11
+
12
+ def __init__(self) -> None:
13
+ # Common abbreviations by category
14
+ self.TITLES: Set[str] = {
15
+ 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
16
+ 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
17
+ 'lt', 'sgt', 'cpl', 'pvt'
18
+ }
19
+
20
+ self.ACADEMIC: Set[str] = {
21
+ 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
22
+ 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
23
+ }
24
+
25
+ self.ORGANIZATIONS: Set[str] = {
26
+ 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
27
+ 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
28
+ }
29
+
30
+ self.MONTHS: Set[str] = {
31
+ 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
32
+ }
33
+
34
+ self.UNITS: Set[str] = {
35
+ 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
36
+ 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
37
+ }
38
+
39
+ self.TECHNOLOGY: Set[str] = {
40
+ 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
41
+ 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
42
+ }
43
+
44
+ self.MISC: Set[str] = {
45
+ 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
46
+ 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
47
+ }
48
+
49
+ # Combine all abbreviations
50
+ self.all_abbreviations: Set[str] = (
51
+ self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
52
+ self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
53
+ )
54
+
55
+ # Special patterns
56
+ self.ELLIPSIS: str = r'\.{2,}|…'
57
+ self.URL_PATTERN: str = (
58
+ r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
59
+ )
60
+ self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
61
+ self.NUMBER_PATTERN: str = (
62
+ r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
63
+ )
64
+
65
+ # Quote and bracket pairs
66
+ self.QUOTE_PAIRS: Dict[str, str] = {
67
+ '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
68
+ "«": "»", "‹": "›", "'": "'", "‚": "'"
69
+ }
70
+
71
+ self.BRACKETS: Dict[str, str] = {
72
+ '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
73
+ '『': '』', '【': '】', '〖': '〗', '「': '」'
74
+ }
75
+
76
+ # Compile regex patterns
77
+ self._compile_patterns()
78
+
79
+ def _compile_patterns(self) -> None:
80
+ """Compile regex patterns for better performance."""
81
+ # Pattern for finding potential sentence boundaries
82
+ self.SENTENCE_END: Pattern = re.compile(
83
+ r'''
84
+ # Group for sentence endings
85
+ (?:
86
+ # Standard endings with optional quotes/brackets
87
+ (?<=[.!?])[\"\'\)\]\}»›」』\s]*
88
+
89
+ # Ellipsis
90
+ |(?:\.{2,}|…)
91
+
92
+ # Asian-style endings
93
+ |(?<=[。!?」』】\s])
94
+ )
95
+
96
+ # Must be followed by whitespace and capital letter or number
97
+ (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
98
+ ''',
99
+ re.VERBOSE
100
+ )
101
+
102
+ # Pattern for abbreviations
103
+ abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
104
+ self.ABBREV_PATTERN: Pattern = re.compile(
105
+ fr'\b(?:{abbrev_pattern})\.?',
106
+ re.IGNORECASE
107
+ )
108
+
109
+ def tokenize(self, text: str) -> List[str]:
110
+ """Split text into sentences while handling complex cases."""
111
+ if not text or not text.strip():
112
+ return []
113
+
114
+ # Initial split on potential sentence boundaries
115
+ sentences = self.SENTENCE_END.split(text)
116
+
117
+ # Clean and validate sentences
118
+ final_sentences = []
119
+ for sentence in sentences:
120
+ sentence = sentence.strip()
121
+ if sentence:
122
+ final_sentences.append(sentence)
123
+
124
+ return final_sentences
125
+
126
+
127
+ class WordTokenizer:
128
+ """Simple but effective word tokenizer with support for contractions and special cases."""
129
+
130
+ def __init__(self):
131
+ self.contractions = {
132
+ "n't": "not", "'ll": "will", "'re": "are", "'s": "is",
133
+ "'m": "am", "'ve": "have", "'d": "would"
134
+ }
135
+
136
+ self.word_pattern = re.compile(r"""
137
+ (?:[A-Za-z]+(?:[''][A-Za-z]+)*)| # Words with optional internal apostrophes
138
+ (?:\d+(?:,\d{3})*(?:\.\d+)?)| # Numbers with commas and decimals
139
+ (?:[@#]?\w+)| # Hashtags and mentions
140
+ (?:[^\w\s]) # Punctuation and symbols
141
+ """, re.VERBOSE)
142
+
143
+ def tokenize(self, text: str) -> List[str]:
144
+ """Split text into words while handling contractions and special cases."""
145
+ tokens = []
146
+ for match in self.word_pattern.finditer(text):
147
+ word = match.group()
148
+ # Handle contractions
149
+ for contraction, expansion in self.contractions.items():
150
+ if word.endswith(contraction):
151
+ base = word[:-len(contraction)]
152
+ if base:
153
+ tokens.append(base)
154
+ tokens.append(expansion)
155
+ break
156
+ else:
157
+ tokens.append(word)
158
+ return tokens