webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webstoken/stemmer.py CHANGED
@@ -1,73 +1,73 @@
1
- """
2
- Word stemming utilities.
3
- """
4
-
5
- from typing import Set
6
-
7
-
8
- class Stemmer:
9
- """Simple rule-based stemmer implementing Porter-like rules."""
10
-
11
- def __init__(self):
12
- self.vowels: Set[str] = {'a', 'e', 'i', 'o', 'u', 'y'}
13
- self.doubles: Set[str] = {'bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt'}
14
-
15
- def is_vowel(self, char: str, prev_char: str = None) -> bool:
16
- """Check if a character is a vowel, considering 'y' special cases."""
17
- return char in self.vowels or (char == 'y' and prev_char and prev_char not in self.vowels)
18
-
19
- def count_syllables(self, word: str) -> int:
20
- """Count syllables in a word based on vowel sequences."""
21
- count = 0
22
- prev_char = None
23
- for i, char in enumerate(word.lower()):
24
- if self.is_vowel(char, prev_char) and (i == 0 or not self.is_vowel(prev_char, word[i-2] if i > 1 else None)):
25
- count += 1
26
- prev_char = char
27
- return count or 1
28
-
29
- def stem(self, word: str) -> str:
30
- """Apply stemming rules to reduce word to its root form."""
31
- if len(word) <= 3:
32
- return word
33
-
34
- word = word.lower()
35
-
36
- # Step 1: Handle plurals and past participles
37
- if word.endswith('sses'):
38
- word = word[:-2]
39
- elif word.endswith('ies'):
40
- word = word[:-2]
41
- elif word.endswith('ss'):
42
- pass
43
- elif word.endswith('s') and len(word) > 4:
44
- word = word[:-1]
45
-
46
- # Step 2: Handle -ed and -ing
47
- if word.endswith('ed') and self.count_syllables(word[:-2]) > 1:
48
- word = word[:-2]
49
- elif word.endswith('ing') and self.count_syllables(word[:-3]) > 1:
50
- word = word[:-3]
51
-
52
- # Step 3: Handle miscellaneous endings
53
- if len(word) > 5:
54
- if word.endswith('ement'):
55
- word = word[:-5]
56
- elif word.endswith('ment'):
57
- word = word[:-4]
58
- elif word.endswith('ent'):
59
- word = word[:-3]
60
-
61
- # Step 4: Handle -ity endings
62
- if word.endswith('ity') and len(word) > 6:
63
- word = word[:-3]
64
- if word.endswith('abil'):
65
- word = word[:-4] + 'able'
66
- elif word.endswith('ic'):
67
- word = word[:-2]
68
-
69
- # Final step: Remove double consonants at the end
70
- if len(word) > 2 and word[-2:] in self.doubles:
71
- word = word[:-1]
72
-
73
- return word
1
+ """
2
+ Word stemming utilities.
3
+ """
4
+
5
+ from typing import Set
6
+
7
+
8
+ class Stemmer:
9
+ """Simple rule-based stemmer implementing Porter-like rules."""
10
+
11
+ def __init__(self):
12
+ self.vowels: Set[str] = {'a', 'e', 'i', 'o', 'u', 'y'}
13
+ self.doubles: Set[str] = {'bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt'}
14
+
15
+ def is_vowel(self, char: str, prev_char: str = None) -> bool:
16
+ """Check if a character is a vowel, considering 'y' special cases."""
17
+ return char in self.vowels or (char == 'y' and prev_char and prev_char not in self.vowels)
18
+
19
+ def count_syllables(self, word: str) -> int:
20
+ """Count syllables in a word based on vowel sequences."""
21
+ count = 0
22
+ prev_char = None
23
+ for i, char in enumerate(word.lower()):
24
+ if self.is_vowel(char, prev_char) and (i == 0 or not self.is_vowel(prev_char, word[i-2] if i > 1 else None)):
25
+ count += 1
26
+ prev_char = char
27
+ return count or 1
28
+
29
+ def stem(self, word: str) -> str:
30
+ """Apply stemming rules to reduce word to its root form."""
31
+ if len(word) <= 3:
32
+ return word
33
+
34
+ word = word.lower()
35
+
36
+ # Step 1: Handle plurals and past participles
37
+ if word.endswith('sses'):
38
+ word = word[:-2]
39
+ elif word.endswith('ies'):
40
+ word = word[:-2]
41
+ elif word.endswith('ss'):
42
+ pass
43
+ elif word.endswith('s') and len(word) > 4:
44
+ word = word[:-1]
45
+
46
+ # Step 2: Handle -ed and -ing
47
+ if word.endswith('ed') and self.count_syllables(word[:-2]) > 1:
48
+ word = word[:-2]
49
+ elif word.endswith('ing') and self.count_syllables(word[:-3]) > 1:
50
+ word = word[:-3]
51
+
52
+ # Step 3: Handle miscellaneous endings
53
+ if len(word) > 5:
54
+ if word.endswith('ement'):
55
+ word = word[:-5]
56
+ elif word.endswith('ment'):
57
+ word = word[:-4]
58
+ elif word.endswith('ent'):
59
+ word = word[:-3]
60
+
61
+ # Step 4: Handle -ity endings
62
+ if word.endswith('ity') and len(word) > 6:
63
+ word = word[:-3]
64
+ if word.endswith('abil'):
65
+ word = word[:-4] + 'able'
66
+ elif word.endswith('ic'):
67
+ word = word[:-2]
68
+
69
+ # Final step: Remove double consonants at the end
70
+ if len(word) > 2 and word[-2:] in self.doubles:
71
+ word = word[:-1]
72
+
73
+ return word
webstoken/tagger.py CHANGED
@@ -1,60 +1,60 @@
1
- """
2
- Part-of-Speech tagging utilities.
3
- """
4
-
5
- from typing import List, Set, Tuple
6
-
7
-
8
- class POSTagger:
9
- """Simple rule-based Part-of-Speech tagger."""
10
-
11
- def __init__(self):
12
- # Basic rules for POS tagging
13
- self.noun_suffixes: Set[str] = {'ness', 'ment', 'ship', 'dom', 'hood', 'er', 'or', 'ist'}
14
- self.verb_suffixes: Set[str] = {'ize', 'ate', 'ify', 'ing', 'ed'}
15
- self.adj_suffixes: Set[str] = {'able', 'ible', 'al', 'ful', 'ous', 'ive', 'less'}
16
- self.adv_suffixes: Set[str] = {'ly'}
17
-
18
- # Common words by POS
19
- self.determiners: Set[str] = {'the', 'a', 'an', 'this', 'that', 'these', 'those'}
20
- self.prepositions: Set[str] = {'in', 'on', 'at', 'by', 'with', 'from', 'to', 'for'}
21
- self.pronouns: Set[str] = {'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her'}
22
-
23
- def tag(self, tokens: List[str]) -> List[Tuple[str, str]]:
24
- """Assign POS tags to tokens based on rules."""
25
- tagged = []
26
- prev_tag = None
27
-
28
- for i, token in enumerate(tokens):
29
- word = token.lower()
30
-
31
- # Check special cases first
32
- if word in self.determiners:
33
- tag = 'DET'
34
- elif word in self.prepositions:
35
- tag = 'PREP'
36
- elif word in self.pronouns:
37
- tag = 'PRON'
38
- # Check suffixes
39
- elif any(word.endswith(suffix) for suffix in self.noun_suffixes):
40
- tag = 'NOUN'
41
- elif any(word.endswith(suffix) for suffix in self.verb_suffixes):
42
- tag = 'VERB'
43
- elif any(word.endswith(suffix) for suffix in self.adj_suffixes):
44
- tag = 'ADJ'
45
- elif any(word.endswith(suffix) for suffix in self.adv_suffixes):
46
- tag = 'ADV'
47
- # Default cases
48
- elif word[0].isupper() and i > 0:
49
- tag = 'PROPN' # Proper noun
50
- elif word.isdigit():
51
- tag = 'NUM'
52
- elif not word.isalnum():
53
- tag = 'PUNCT'
54
- else:
55
- tag = 'NOUN' # Default to noun
56
-
57
- tagged.append((token, tag))
58
- prev_tag = tag
59
-
60
- return tagged
1
+ """
2
+ Part-of-Speech tagging utilities.
3
+ """
4
+
5
+ from typing import List, Set, Tuple
6
+
7
+
8
+ class POSTagger:
9
+ """Simple rule-based Part-of-Speech tagger."""
10
+
11
+ def __init__(self):
12
+ # Basic rules for POS tagging
13
+ self.noun_suffixes: Set[str] = {'ness', 'ment', 'ship', 'dom', 'hood', 'er', 'or', 'ist'}
14
+ self.verb_suffixes: Set[str] = {'ize', 'ate', 'ify', 'ing', 'ed'}
15
+ self.adj_suffixes: Set[str] = {'able', 'ible', 'al', 'ful', 'ous', 'ive', 'less'}
16
+ self.adv_suffixes: Set[str] = {'ly'}
17
+
18
+ # Common words by POS
19
+ self.determiners: Set[str] = {'the', 'a', 'an', 'this', 'that', 'these', 'those'}
20
+ self.prepositions: Set[str] = {'in', 'on', 'at', 'by', 'with', 'from', 'to', 'for'}
21
+ self.pronouns: Set[str] = {'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her'}
22
+
23
+ def tag(self, tokens: List[str]) -> List[Tuple[str, str]]:
24
+ """Assign POS tags to tokens based on rules."""
25
+ tagged = []
26
+ prev_tag = None
27
+
28
+ for i, token in enumerate(tokens):
29
+ word = token.lower()
30
+
31
+ # Check special cases first
32
+ if word in self.determiners:
33
+ tag = 'DET'
34
+ elif word in self.prepositions:
35
+ tag = 'PREP'
36
+ elif word in self.pronouns:
37
+ tag = 'PRON'
38
+ # Check suffixes
39
+ elif any(word.endswith(suffix) for suffix in self.noun_suffixes):
40
+ tag = 'NOUN'
41
+ elif any(word.endswith(suffix) for suffix in self.verb_suffixes):
42
+ tag = 'VERB'
43
+ elif any(word.endswith(suffix) for suffix in self.adj_suffixes):
44
+ tag = 'ADJ'
45
+ elif any(word.endswith(suffix) for suffix in self.adv_suffixes):
46
+ tag = 'ADV'
47
+ # Default cases
48
+ elif word[0].isupper() and i > 0:
49
+ tag = 'PROPN' # Proper noun
50
+ elif word.isdigit():
51
+ tag = 'NUM'
52
+ elif not word.isalnum():
53
+ tag = 'PUNCT'
54
+ else:
55
+ tag = 'NOUN' # Default to noun
56
+
57
+ tagged.append((token, tag))
58
+ prev_tag = tag
59
+
60
+ return tagged
webstoken/tokenizer.py CHANGED
@@ -1,158 +1,158 @@
1
- """
2
- Tokenization utilities for sentence and word-level tokenization.
3
- """
4
-
5
- from typing import List, Dict, Set, Pattern
6
- import re
7
-
8
-
9
- class SentenceTokenizer:
10
- """Advanced sentence tokenizer with support for complex cases and proper formatting."""
11
-
12
- def __init__(self) -> None:
13
- # Common abbreviations by category
14
- self.TITLES: Set[str] = {
15
- 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
16
- 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
17
- 'lt', 'sgt', 'cpl', 'pvt'
18
- }
19
-
20
- self.ACADEMIC: Set[str] = {
21
- 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
22
- 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
23
- }
24
-
25
- self.ORGANIZATIONS: Set[str] = {
26
- 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
27
- 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
28
- }
29
-
30
- self.MONTHS: Set[str] = {
31
- 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
32
- }
33
-
34
- self.UNITS: Set[str] = {
35
- 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
36
- 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
37
- }
38
-
39
- self.TECHNOLOGY: Set[str] = {
40
- 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
41
- 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
42
- }
43
-
44
- self.MISC: Set[str] = {
45
- 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
46
- 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
47
- }
48
-
49
- # Combine all abbreviations
50
- self.all_abbreviations: Set[str] = (
51
- self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
52
- self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
53
- )
54
-
55
- # Special patterns
56
- self.ELLIPSIS: str = r'\.{2,}|…'
57
- self.URL_PATTERN: str = (
58
- r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
59
- )
60
- self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
61
- self.NUMBER_PATTERN: str = (
62
- r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
63
- )
64
-
65
- # Quote and bracket pairs
66
- self.QUOTE_PAIRS: Dict[str, str] = {
67
- '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
68
- "«": "»", "‹": "›", "'": "'", "‚": "'"
69
- }
70
-
71
- self.BRACKETS: Dict[str, str] = {
72
- '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
73
- '『': '』', '【': '】', '〖': '〗', '「': '」'
74
- }
75
-
76
- # Compile regex patterns
77
- self._compile_patterns()
78
-
79
- def _compile_patterns(self) -> None:
80
- """Compile regex patterns for better performance."""
81
- # Pattern for finding potential sentence boundaries
82
- self.SENTENCE_END: Pattern = re.compile(
83
- r'''
84
- # Group for sentence endings
85
- (?:
86
- # Standard endings with optional quotes/brackets
87
- (?<=[.!?])[\"\'\)\]\}»›」』\s]*
88
-
89
- # Ellipsis
90
- |(?:\.{2,}|…)
91
-
92
- # Asian-style endings
93
- |(?<=[。!?」』】\s])
94
- )
95
-
96
- # Must be followed by whitespace and capital letter or number
97
- (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
98
- ''',
99
- re.VERBOSE
100
- )
101
-
102
- # Pattern for abbreviations
103
- abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
104
- self.ABBREV_PATTERN: Pattern = re.compile(
105
- fr'\b(?:{abbrev_pattern})\.?',
106
- re.IGNORECASE
107
- )
108
-
109
- def tokenize(self, text: str) -> List[str]:
110
- """Split text into sentences while handling complex cases."""
111
- if not text or not text.strip():
112
- return []
113
-
114
- # Initial split on potential sentence boundaries
115
- sentences = self.SENTENCE_END.split(text)
116
-
117
- # Clean and validate sentences
118
- final_sentences = []
119
- for sentence in sentences:
120
- sentence = sentence.strip()
121
- if sentence:
122
- final_sentences.append(sentence)
123
-
124
- return final_sentences
125
-
126
-
127
- class WordTokenizer:
128
- """Simple but effective word tokenizer with support for contractions and special cases."""
129
-
130
- def __init__(self):
131
- self.contractions = {
132
- "n't": "not", "'ll": "will", "'re": "are", "'s": "is",
133
- "'m": "am", "'ve": "have", "'d": "would"
134
- }
135
-
136
- self.word_pattern = re.compile(r"""
137
- (?:[A-Za-z]+(?:[''][A-Za-z]+)*)| # Words with optional internal apostrophes
138
- (?:\d+(?:,\d{3})*(?:\.\d+)?)| # Numbers with commas and decimals
139
- (?:[@#]?\w+)| # Hashtags and mentions
140
- (?:[^\w\s]) # Punctuation and symbols
141
- """, re.VERBOSE)
142
-
143
- def tokenize(self, text: str) -> List[str]:
144
- """Split text into words while handling contractions and special cases."""
145
- tokens = []
146
- for match in self.word_pattern.finditer(text):
147
- word = match.group()
148
- # Handle contractions
149
- for contraction, expansion in self.contractions.items():
150
- if word.endswith(contraction):
151
- base = word[:-len(contraction)]
152
- if base:
153
- tokens.append(base)
154
- tokens.append(expansion)
155
- break
156
- else:
157
- tokens.append(word)
158
- return tokens
1
+ """
2
+ Tokenization utilities for sentence and word-level tokenization.
3
+ """
4
+
5
+ from typing import List, Dict, Set, Pattern
6
+ import re
7
+
8
+
9
+ class SentenceTokenizer:
10
+ """Advanced sentence tokenizer with support for complex cases and proper formatting."""
11
+
12
+ def __init__(self) -> None:
13
+ # Common abbreviations by category
14
+ self.TITLES: Set[str] = {
15
+ 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
16
+ 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
17
+ 'lt', 'sgt', 'cpl', 'pvt'
18
+ }
19
+
20
+ self.ACADEMIC: Set[str] = {
21
+ 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
22
+ 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
23
+ }
24
+
25
+ self.ORGANIZATIONS: Set[str] = {
26
+ 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
27
+ 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
28
+ }
29
+
30
+ self.MONTHS: Set[str] = {
31
+ 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
32
+ }
33
+
34
+ self.UNITS: Set[str] = {
35
+ 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
36
+ 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
37
+ }
38
+
39
+ self.TECHNOLOGY: Set[str] = {
40
+ 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
41
+ 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
42
+ }
43
+
44
+ self.MISC: Set[str] = {
45
+ 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
46
+ 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
47
+ }
48
+
49
+ # Combine all abbreviations
50
+ self.all_abbreviations: Set[str] = (
51
+ self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
52
+ self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
53
+ )
54
+
55
+ # Special patterns
56
+ self.ELLIPSIS: str = r'\.{2,}|…'
57
+ self.URL_PATTERN: str = (
58
+ r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
59
+ )
60
+ self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
61
+ self.NUMBER_PATTERN: str = (
62
+ r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
63
+ )
64
+
65
+ # Quote and bracket pairs
66
+ self.QUOTE_PAIRS: Dict[str, str] = {
67
+ '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
68
+ "«": "»", "‹": "›", "'": "'", "‚": "'"
69
+ }
70
+
71
+ self.BRACKETS: Dict[str, str] = {
72
+ '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
73
+ '『': '』', '【': '】', '〖': '〗', '「': '」'
74
+ }
75
+
76
+ # Compile regex patterns
77
+ self._compile_patterns()
78
+
79
+ def _compile_patterns(self) -> None:
80
+ """Compile regex patterns for better performance."""
81
+ # Pattern for finding potential sentence boundaries
82
+ self.SENTENCE_END: Pattern = re.compile(
83
+ r'''
84
+ # Group for sentence endings
85
+ (?:
86
+ # Standard endings with optional quotes/brackets
87
+ (?<=[.!?])[\"\'\)\]\}»›」』\s]*
88
+
89
+ # Ellipsis
90
+ |(?:\.{2,}|…)
91
+
92
+ # Asian-style endings
93
+ |(?<=[。!?」』】\s])
94
+ )
95
+
96
+ # Must be followed by whitespace and capital letter or number
97
+ (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
98
+ ''',
99
+ re.VERBOSE
100
+ )
101
+
102
+ # Pattern for abbreviations
103
+ abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
104
+ self.ABBREV_PATTERN: Pattern = re.compile(
105
+ fr'\b(?:{abbrev_pattern})\.?',
106
+ re.IGNORECASE
107
+ )
108
+
109
+ def tokenize(self, text: str) -> List[str]:
110
+ """Split text into sentences while handling complex cases."""
111
+ if not text or not text.strip():
112
+ return []
113
+
114
+ # Initial split on potential sentence boundaries
115
+ sentences = self.SENTENCE_END.split(text)
116
+
117
+ # Clean and validate sentences
118
+ final_sentences = []
119
+ for sentence in sentences:
120
+ sentence = sentence.strip()
121
+ if sentence:
122
+ final_sentences.append(sentence)
123
+
124
+ return final_sentences
125
+
126
+
127
+ class WordTokenizer:
128
+ """Simple but effective word tokenizer with support for contractions and special cases."""
129
+
130
+ def __init__(self):
131
+ self.contractions = {
132
+ "n't": "not", "'ll": "will", "'re": "are", "'s": "is",
133
+ "'m": "am", "'ve": "have", "'d": "would"
134
+ }
135
+
136
+ self.word_pattern = re.compile(r"""
137
+ (?:[A-Za-z]+(?:[''][A-Za-z]+)*)| # Words with optional internal apostrophes
138
+ (?:\d+(?:,\d{3})*(?:\.\d+)?)| # Numbers with commas and decimals
139
+ (?:[@#]?\w+)| # Hashtags and mentions
140
+ (?:[^\w\s]) # Punctuation and symbols
141
+ """, re.VERBOSE)
142
+
143
+ def tokenize(self, text: str) -> List[str]:
144
+ """Split text into words while handling contractions and special cases."""
145
+ tokens = []
146
+ for match in self.word_pattern.finditer(text):
147
+ word = match.group()
148
+ # Handle contractions
149
+ for contraction, expansion in self.contractions.items():
150
+ if word.endswith(contraction):
151
+ base = word[:-len(contraction)]
152
+ if base:
153
+ tokens.append(base)
154
+ tokens.append(expansion)
155
+ break
156
+ else:
157
+ tokens.append(word)
158
+ return tokens