webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,277 +1,277 @@
1
- from typing import List, Dict, Tuple, Set, Pattern
2
- import re
3
-
4
-
5
- class SentenceTokenizer:
6
- """Advanced sentence tokenizer with support for complex cases and proper formatting."""
7
-
8
- def __init__(self) -> None:
9
- # Common abbreviations by category
10
- self.TITLES: Set[str] = {
11
- 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
12
- 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
13
- 'lt', 'sgt', 'cpl', 'pvt'
14
- }
15
-
16
- self.ACADEMIC: Set[str] = {
17
- 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
18
- 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
19
- }
20
-
21
- self.ORGANIZATIONS: Set[str] = {
22
- 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
23
- 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
24
- }
25
-
26
- self.MONTHS: Set[str] = {
27
- 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
28
- }
29
-
30
- self.UNITS: Set[str] = {
31
- 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
32
- 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
33
- }
34
-
35
- self.TECHNOLOGY: Set[str] = {
36
- 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
37
- 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
38
- }
39
-
40
- self.MISC: Set[str] = {
41
- 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
42
- 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
43
- }
44
-
45
- # Combine all abbreviations
46
- self.all_abbreviations: Set[str] = (
47
- self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
48
- self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
49
- )
50
-
51
- # Special patterns
52
- self.ELLIPSIS: str = r'\.{2,}|…'
53
- self.URL_PATTERN: str = (
54
- r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
55
- )
56
- self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
57
- self.NUMBER_PATTERN: str = (
58
- r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
59
- )
60
-
61
- # Quote and bracket pairs
62
- self.QUOTE_PAIRS: Dict[str, str] = {
63
- '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
64
- "«": "»", "‹": "›", "'": "'", "‚": "'"
65
- }
66
-
67
- self.BRACKETS: Dict[str, str] = {
68
- '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
69
- '『': '』', '【': '】', '〖': '〗', '「': '」'
70
- }
71
-
72
- # Compile regex patterns
73
- self._compile_patterns()
74
-
75
- def _compile_patterns(self) -> None:
76
- """Compile regex patterns for better performance."""
77
- # Pattern for finding potential sentence boundaries
78
- self.SENTENCE_END: Pattern = re.compile(
79
- r'''
80
- # Group for sentence endings
81
- (?:
82
- # Standard endings with optional quotes/brackets
83
- (?<=[.!?])[\"\'\)\]\}»›」』\s]*
84
-
85
- # Ellipsis
86
- |(?:\.{2,}|…)
87
-
88
- # Asian-style endings
89
- |(?<=[。!?」』】\s])
90
- )
91
-
92
- # Must be followed by whitespace and capital letter or number
93
- (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
94
- ''',
95
- re.VERBOSE
96
- )
97
-
98
- # Pattern for abbreviations
99
- abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
100
- self.ABBREV_PATTERN: Pattern = re.compile(
101
- fr'\b(?:{abbrev_pattern})\.?',
102
- re.IGNORECASE
103
- )
104
-
105
- def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
106
- """Protect URLs, emails, and other special cases from being split."""
107
- protected = text
108
- placeholders: Dict[str, str] = {}
109
- counter = 0
110
-
111
- # Protect URLs and emails
112
- for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
113
- for match in re.finditer(pattern, protected):
114
- placeholder = f'__PROTECTED_{counter}__'
115
- placeholders[placeholder] = match.group()
116
- protected = protected.replace(match.group(), placeholder)
117
- counter += 1
118
-
119
- # Protect quoted content
120
- stack = []
121
- protected_chars = list(protected)
122
- i = 0
123
- while i < len(protected_chars):
124
- char = protected_chars[i]
125
- if char in self.QUOTE_PAIRS:
126
- stack.append((char, i))
127
- elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
128
- start_quote, start_idx = stack.pop()
129
- content = ''.join(protected_chars[start_idx:i + 1])
130
- placeholder = f'__PROTECTED_{counter}__'
131
- placeholders[placeholder] = content
132
- protected_chars[start_idx:i + 1] = list(placeholder)
133
- counter += 1
134
- i += 1
135
-
136
- return ''.join(protected_chars), placeholders
137
-
138
- def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
139
- """Restore protected content."""
140
- restored = text
141
- for placeholder, original in placeholders.items():
142
- restored = restored.replace(placeholder, original)
143
- return restored
144
-
145
- def _handle_abbreviations(self, text: str) -> str:
146
- """Handle abbreviations to prevent incorrect sentence splitting."""
147
- def replace_abbrev(match: re.Match) -> str:
148
- abbr = match.group().lower().rstrip('.')
149
- if abbr in self.all_abbreviations:
150
- return match.group().replace('.', '__DOT__')
151
- return match.group()
152
-
153
- return self.ABBREV_PATTERN.sub(replace_abbrev, text)
154
-
155
- def _normalize_whitespace(self, text: str) -> str:
156
- """Normalize whitespace while preserving paragraph breaks."""
157
- # Replace multiple newlines with special marker
158
- text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
159
- # Normalize remaining whitespace
160
- text = re.sub(r'\s+', ' ', text)
161
- return text.strip()
162
-
163
- def _restore_formatting(self, sentences: List[str]) -> List[str]:
164
- """Restore original formatting and clean up sentences."""
165
- restored = []
166
- for sentence in sentences:
167
- # Restore dots in abbreviations
168
- sentence = sentence.replace('__DOT__', '.')
169
-
170
- # Restore paragraph breaks
171
- sentence = sentence.replace('__PARA__', '\n\n')
172
-
173
- # Clean up whitespace
174
- sentence = re.sub(r'\s+', ' ', sentence).strip()
175
-
176
- # Capitalize first letter if it's lowercase and not an abbreviation
177
- words = sentence.split()
178
- if words and words[0].lower() not in self.all_abbreviations:
179
- sentence = sentence[0].upper() + sentence[1:]
180
-
181
- if sentence:
182
- restored.append(sentence)
183
-
184
- return restored
185
-
186
- def tokenize(self, text: str) -> List[str]:
187
- """
188
- Split text into sentences while handling complex cases.
189
-
190
- Args:
191
- text (str): Input text to split into sentences.
192
-
193
- Returns:
194
- List[str]: List of properly formatted sentences.
195
- """
196
- if not text or not text.strip():
197
- return []
198
-
199
- # Step 1: Protect special cases
200
- protected_text, placeholders = self._protect_special_cases(text)
201
-
202
- # Step 2: Normalize whitespace
203
- protected_text = self._normalize_whitespace(protected_text)
204
-
205
- # Step 3: Handle abbreviations
206
- protected_text = self._handle_abbreviations(protected_text)
207
-
208
- # Step 4: Split into potential sentences
209
- potential_sentences = self.SENTENCE_END.split(protected_text)
210
-
211
- # Step 5: Process and restore formatting
212
- sentences = self._restore_formatting(potential_sentences)
213
-
214
- # Step 6: Restore special cases
215
- sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
216
-
217
- # Step 7: Post-process sentences
218
- final_sentences = []
219
- current_sentence = []
220
-
221
- for sentence in sentences:
222
- # Skip empty sentences
223
- if not sentence.strip():
224
- continue
225
-
226
- # Check if sentence might be continuation of previous
227
- if current_sentence and sentence[0].islower():
228
- current_sentence.append(sentence)
229
- else:
230
- if current_sentence:
231
- final_sentences.append(' '.join(current_sentence))
232
- current_sentence = [sentence]
233
-
234
- # Add last sentence if exists
235
- if current_sentence:
236
- final_sentences.append(' '.join(current_sentence))
237
-
238
- return final_sentences
239
-
240
-
241
- def split_sentences(text: str) -> List[str]:
242
- """
243
- Convenience function to split text into sentences using SentenceTokenizer.
244
-
245
- Args:
246
- text (str): Input text to split into sentences.
247
-
248
- Returns:
249
- List[str]: List of properly formatted sentences.
250
- """
251
- tokenizer = SentenceTokenizer()
252
- return tokenizer.tokenize(text)
253
-
254
-
255
- if __name__ == "__main__":
256
- # Test text with various challenging cases
257
- test_text: str = """
258
- Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
259
- They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
260
- The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
261
-
262
- Visit our website at https://www.example.com or email us at test@example.com!
263
- The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
264
-
265
- 「これは日本語の文章です。」This is a mixed-language text! How cool is that?
266
-
267
- Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
268
- Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
269
- """
270
-
271
- # Process and print each sentence
272
- sentences: List[str] = split_sentences(test_text)
273
- print("Detected sentences:")
274
- print("-" * 80)
275
- for i, sentence in enumerate(sentences, 1):
276
- print(f"{i}. {sentence}")
277
- print("-" * 80)
1
+ from typing import List, Dict, Tuple, Set, Pattern
2
+ import re
3
+
4
+
5
+ class SentenceTokenizer:
6
+ """Advanced sentence tokenizer with support for complex cases and proper formatting."""
7
+
8
+ def __init__(self) -> None:
9
+ # Common abbreviations by category
10
+ self.TITLES: Set[str] = {
11
+ 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
12
+ 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
13
+ 'lt', 'sgt', 'cpl', 'pvt'
14
+ }
15
+
16
+ self.ACADEMIC: Set[str] = {
17
+ 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
18
+ 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
19
+ }
20
+
21
+ self.ORGANIZATIONS: Set[str] = {
22
+ 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
23
+ 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
24
+ }
25
+
26
+ self.MONTHS: Set[str] = {
27
+ 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
28
+ }
29
+
30
+ self.UNITS: Set[str] = {
31
+ 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
32
+ 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
33
+ }
34
+
35
+ self.TECHNOLOGY: Set[str] = {
36
+ 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
37
+ 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
38
+ }
39
+
40
+ self.MISC: Set[str] = {
41
+ 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
42
+ 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
43
+ }
44
+
45
+ # Combine all abbreviations
46
+ self.all_abbreviations: Set[str] = (
47
+ self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
48
+ self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
49
+ )
50
+
51
+ # Special patterns
52
+ self.ELLIPSIS: str = r'\.{2,}|…'
53
+ self.URL_PATTERN: str = (
54
+ r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
55
+ )
56
+ self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
57
+ self.NUMBER_PATTERN: str = (
58
+ r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
59
+ )
60
+
61
+ # Quote and bracket pairs
62
+ self.QUOTE_PAIRS: Dict[str, str] = {
63
+ '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
64
+ "«": "»", "‹": "›", "'": "'", "‚": "'"
65
+ }
66
+
67
+ self.BRACKETS: Dict[str, str] = {
68
+ '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
69
+ '『': '』', '【': '】', '〖': '〗', '「': '」'
70
+ }
71
+
72
+ # Compile regex patterns
73
+ self._compile_patterns()
74
+
75
+ def _compile_patterns(self) -> None:
76
+ """Compile regex patterns for better performance."""
77
+ # Pattern for finding potential sentence boundaries
78
+ self.SENTENCE_END: Pattern = re.compile(
79
+ r'''
80
+ # Group for sentence endings
81
+ (?:
82
+ # Standard endings with optional quotes/brackets
83
+ (?<=[.!?])[\"\'\)\]\}»›」』\s]*
84
+
85
+ # Ellipsis
86
+ |(?:\.{2,}|…)
87
+
88
+ # Asian-style endings
89
+ |(?<=[。!?」』】\s])
90
+ )
91
+
92
+ # Must be followed by whitespace and capital letter or number
93
+ (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
94
+ ''',
95
+ re.VERBOSE
96
+ )
97
+
98
+ # Pattern for abbreviations
99
+ abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
100
+ self.ABBREV_PATTERN: Pattern = re.compile(
101
+ fr'\b(?:{abbrev_pattern})\.?',
102
+ re.IGNORECASE
103
+ )
104
+
105
+ def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
106
+ """Protect URLs, emails, and other special cases from being split."""
107
+ protected = text
108
+ placeholders: Dict[str, str] = {}
109
+ counter = 0
110
+
111
+ # Protect URLs and emails
112
+ for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
113
+ for match in re.finditer(pattern, protected):
114
+ placeholder = f'__PROTECTED_{counter}__'
115
+ placeholders[placeholder] = match.group()
116
+ protected = protected.replace(match.group(), placeholder)
117
+ counter += 1
118
+
119
+ # Protect quoted content
120
+ stack = []
121
+ protected_chars = list(protected)
122
+ i = 0
123
+ while i < len(protected_chars):
124
+ char = protected_chars[i]
125
+ if char in self.QUOTE_PAIRS:
126
+ stack.append((char, i))
127
+ elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
128
+ start_quote, start_idx = stack.pop()
129
+ content = ''.join(protected_chars[start_idx:i + 1])
130
+ placeholder = f'__PROTECTED_{counter}__'
131
+ placeholders[placeholder] = content
132
+ protected_chars[start_idx:i + 1] = list(placeholder)
133
+ counter += 1
134
+ i += 1
135
+
136
+ return ''.join(protected_chars), placeholders
137
+
138
+ def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
139
+ """Restore protected content."""
140
+ restored = text
141
+ for placeholder, original in placeholders.items():
142
+ restored = restored.replace(placeholder, original)
143
+ return restored
144
+
145
+ def _handle_abbreviations(self, text: str) -> str:
146
+ """Handle abbreviations to prevent incorrect sentence splitting."""
147
+ def replace_abbrev(match: re.Match) -> str:
148
+ abbr = match.group().lower().rstrip('.')
149
+ if abbr in self.all_abbreviations:
150
+ return match.group().replace('.', '__DOT__')
151
+ return match.group()
152
+
153
+ return self.ABBREV_PATTERN.sub(replace_abbrev, text)
154
+
155
+ def _normalize_whitespace(self, text: str) -> str:
156
+ """Normalize whitespace while preserving paragraph breaks."""
157
+ # Replace multiple newlines with special marker
158
+ text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
159
+ # Normalize remaining whitespace
160
+ text = re.sub(r'\s+', ' ', text)
161
+ return text.strip()
162
+
163
+ def _restore_formatting(self, sentences: List[str]) -> List[str]:
164
+ """Restore original formatting and clean up sentences."""
165
+ restored = []
166
+ for sentence in sentences:
167
+ # Restore dots in abbreviations
168
+ sentence = sentence.replace('__DOT__', '.')
169
+
170
+ # Restore paragraph breaks
171
+ sentence = sentence.replace('__PARA__', '\n\n')
172
+
173
+ # Clean up whitespace
174
+ sentence = re.sub(r'\s+', ' ', sentence).strip()
175
+
176
+ # Capitalize first letter if it's lowercase and not an abbreviation
177
+ words = sentence.split()
178
+ if words and words[0].lower() not in self.all_abbreviations:
179
+ sentence = sentence[0].upper() + sentence[1:]
180
+
181
+ if sentence:
182
+ restored.append(sentence)
183
+
184
+ return restored
185
+
186
+ def tokenize(self, text: str) -> List[str]:
187
+ """
188
+ Split text into sentences while handling complex cases.
189
+
190
+ Args:
191
+ text (str): Input text to split into sentences.
192
+
193
+ Returns:
194
+ List[str]: List of properly formatted sentences.
195
+ """
196
+ if not text or not text.strip():
197
+ return []
198
+
199
+ # Step 1: Protect special cases
200
+ protected_text, placeholders = self._protect_special_cases(text)
201
+
202
+ # Step 2: Normalize whitespace
203
+ protected_text = self._normalize_whitespace(protected_text)
204
+
205
+ # Step 3: Handle abbreviations
206
+ protected_text = self._handle_abbreviations(protected_text)
207
+
208
+ # Step 4: Split into potential sentences
209
+ potential_sentences = self.SENTENCE_END.split(protected_text)
210
+
211
+ # Step 5: Process and restore formatting
212
+ sentences = self._restore_formatting(potential_sentences)
213
+
214
+ # Step 6: Restore special cases
215
+ sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
216
+
217
+ # Step 7: Post-process sentences
218
+ final_sentences = []
219
+ current_sentence = []
220
+
221
+ for sentence in sentences:
222
+ # Skip empty sentences
223
+ if not sentence.strip():
224
+ continue
225
+
226
+ # Check if sentence might be continuation of previous
227
+ if current_sentence and sentence[0].islower():
228
+ current_sentence.append(sentence)
229
+ else:
230
+ if current_sentence:
231
+ final_sentences.append(' '.join(current_sentence))
232
+ current_sentence = [sentence]
233
+
234
+ # Add last sentence if exists
235
+ if current_sentence:
236
+ final_sentences.append(' '.join(current_sentence))
237
+
238
+ return final_sentences
239
+
240
+
241
+ def split_sentences(text: str) -> List[str]:
242
+ """
243
+ Convenience function to split text into sentences using SentenceTokenizer.
244
+
245
+ Args:
246
+ text (str): Input text to split into sentences.
247
+
248
+ Returns:
249
+ List[str]: List of properly formatted sentences.
250
+ """
251
+ tokenizer = SentenceTokenizer()
252
+ return tokenizer.tokenize(text)
253
+
254
+
255
+ if __name__ == "__main__":
256
+ # Test text with various challenging cases
257
+ test_text: str = """
258
+ Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
259
+ They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
260
+ The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
261
+
262
+ Visit our website at https://www.example.com or email us at test@example.com!
263
+ The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
264
+
265
+ 「これは日本語の文章です。」This is a mixed-language text! How cool is that?
266
+
267
+ Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
268
+ Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
269
+ """
270
+
271
+ # Process and print each sentence
272
+ sentences: List[str] = split_sentences(test_text)
273
+ print("Detected sentences:")
274
+ print("-" * 80)
275
+ for i, sentence in enumerate(sentences, 1):
276
+ print(f"{i}. {sentence}")
277
+ print("-" * 80)
@@ -1,52 +1,52 @@
1
- """
2
- Scout Web Analyzer Module
3
- """
4
-
5
- from typing import Dict, Any
6
- from ..element import Tag
7
-
8
- class ScoutWebAnalyzer:
9
- """
10
- Advanced web content analysis utility.
11
- """
12
- @staticmethod
13
- def analyze_page_structure(scout_obj) -> Dict[str, Any]:
14
- """
15
- Analyze the structure of a web page.
16
-
17
- Args:
18
- scout_obj: Parsed Scout object
19
-
20
- Returns:
21
- Dict[str, Any]: Page structure analysis
22
- """
23
- analysis = {
24
- 'tag_distribution': {},
25
- 'class_distribution': {},
26
- 'id_distribution': {},
27
- 'depth_analysis': {}
28
- }
29
-
30
- # Tag distribution
31
- for tag in scout_obj.find_all():
32
- analysis['tag_distribution'][tag.name] = analysis['tag_distribution'].get(tag.name, 0) + 1
33
-
34
- # Class distribution
35
- for tag in scout_obj.find_all(attrs={'class': True}):
36
- for cls in tag.get('class', []):
37
- analysis['class_distribution'][cls] = analysis['class_distribution'].get(cls, 0) + 1
38
-
39
- # ID distribution
40
- for tag in scout_obj.find_all(attrs={'id': True}):
41
- analysis['id_distribution'][tag.get('id')] = analysis['id_distribution'].get(tag.get('id'), 0) + 1
42
-
43
- # Depth analysis
44
- def _analyze_depth(tag, current_depth=0):
45
- analysis['depth_analysis'][current_depth] = analysis['depth_analysis'].get(current_depth, 0) + 1
46
- for child in tag.contents:
47
- if isinstance(child, Tag):
48
- _analyze_depth(child, current_depth + 1)
49
-
50
- _analyze_depth(scout_obj._soup)
51
-
1
+ """
2
+ Scout Web Analyzer Module
3
+ """
4
+
5
+ from typing import Dict, Any
6
+ from ..element import Tag
7
+
8
+ class ScoutWebAnalyzer:
9
+ """
10
+ Advanced web content analysis utility.
11
+ """
12
+ @staticmethod
13
+ def analyze_page_structure(scout_obj) -> Dict[str, Any]:
14
+ """
15
+ Analyze the structure of a web page.
16
+
17
+ Args:
18
+ scout_obj: Parsed Scout object
19
+
20
+ Returns:
21
+ Dict[str, Any]: Page structure analysis
22
+ """
23
+ analysis = {
24
+ 'tag_distribution': {},
25
+ 'class_distribution': {},
26
+ 'id_distribution': {},
27
+ 'depth_analysis': {}
28
+ }
29
+
30
+ # Tag distribution
31
+ for tag in scout_obj.find_all():
32
+ analysis['tag_distribution'][tag.name] = analysis['tag_distribution'].get(tag.name, 0) + 1
33
+
34
+ # Class distribution
35
+ for tag in scout_obj.find_all(attrs={'class': True}):
36
+ for cls in tag.get('class', []):
37
+ analysis['class_distribution'][cls] = analysis['class_distribution'].get(cls, 0) + 1
38
+
39
+ # ID distribution
40
+ for tag in scout_obj.find_all(attrs={'id': True}):
41
+ analysis['id_distribution'][tag.get('id')] = analysis['id_distribution'].get(tag.get('id'), 0) + 1
42
+
43
+ # Depth analysis
44
+ def _analyze_depth(tag, current_depth=0):
45
+ analysis['depth_analysis'][current_depth] = analysis['depth_analysis'].get(current_depth, 0) + 1
46
+ for child in tag.contents:
47
+ if isinstance(child, Tag):
48
+ _analyze_depth(child, current_depth + 1)
49
+
50
+ _analyze_depth(scout_obj._soup)
51
+
52
52
  return analysis