webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +237 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +27 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +663 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
- webscout-2025.10.13.dist-info/RECORD +329 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
|
@@ -1,277 +1,277 @@
|
|
|
1
|
-
from typing import List, Dict, Tuple, Set, Pattern
|
|
2
|
-
import re
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class SentenceTokenizer:
|
|
6
|
-
"""Advanced sentence tokenizer with support for complex cases and proper formatting."""
|
|
7
|
-
|
|
8
|
-
def __init__(self) -> None:
|
|
9
|
-
# Common abbreviations by category
|
|
10
|
-
self.TITLES: Set[str] = {
|
|
11
|
-
'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
|
|
12
|
-
'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
|
|
13
|
-
'lt', 'sgt', 'cpl', 'pvt'
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
self.ACADEMIC: Set[str] = {
|
|
17
|
-
'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
|
|
18
|
-
'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
self.ORGANIZATIONS: Set[str] = {
|
|
22
|
-
'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
|
|
23
|
-
'intl', 'dept', 'est', 'dist', 'mfg', 'div'
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
self.MONTHS: Set[str] = {
|
|
27
|
-
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
self.UNITS: Set[str] = {
|
|
31
|
-
'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
|
|
32
|
-
'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
self.TECHNOLOGY: Set[str] = {
|
|
36
|
-
'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
|
|
37
|
-
'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
self.MISC: Set[str] = {
|
|
41
|
-
'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
|
|
42
|
-
'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
# Combine all abbreviations
|
|
46
|
-
self.all_abbreviations: Set[str] = (
|
|
47
|
-
self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
|
|
48
|
-
self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
# Special patterns
|
|
52
|
-
self.ELLIPSIS: str = r'\.{2,}|…'
|
|
53
|
-
self.URL_PATTERN: str = (
|
|
54
|
-
r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
|
|
55
|
-
)
|
|
56
|
-
self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
|
|
57
|
-
self.NUMBER_PATTERN: str = (
|
|
58
|
-
r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
|
|
59
|
-
)
|
|
60
|
-
|
|
61
|
-
# Quote and bracket pairs
|
|
62
|
-
self.QUOTE_PAIRS: Dict[str, str] = {
|
|
63
|
-
'"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
|
|
64
|
-
"«": "»", "‹": "›", "'": "'", "‚": "'"
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
self.BRACKETS: Dict[str, str] = {
|
|
68
|
-
'(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
|
|
69
|
-
'『': '』', '【': '】', '〖': '〗', '「': '」'
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
# Compile regex patterns
|
|
73
|
-
self._compile_patterns()
|
|
74
|
-
|
|
75
|
-
def _compile_patterns(self) -> None:
|
|
76
|
-
"""Compile regex patterns for better performance."""
|
|
77
|
-
# Pattern for finding potential sentence boundaries
|
|
78
|
-
self.SENTENCE_END: Pattern = re.compile(
|
|
79
|
-
r'''
|
|
80
|
-
# Group for sentence endings
|
|
81
|
-
(?:
|
|
82
|
-
# Standard endings with optional quotes/brackets
|
|
83
|
-
(?<=[.!?])[\"\'\)\]\}»›」』\s]*
|
|
84
|
-
|
|
85
|
-
# Ellipsis
|
|
86
|
-
|(?:\.{2,}|…)
|
|
87
|
-
|
|
88
|
-
# Asian-style endings
|
|
89
|
-
|(?<=[。!?」』】\s])
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
# Must be followed by whitespace and capital letter or number
|
|
93
|
-
(?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
|
|
94
|
-
''',
|
|
95
|
-
re.VERBOSE
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
# Pattern for abbreviations
|
|
99
|
-
abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
|
|
100
|
-
self.ABBREV_PATTERN: Pattern = re.compile(
|
|
101
|
-
fr'\b(?:{abbrev_pattern})\.?',
|
|
102
|
-
re.IGNORECASE
|
|
103
|
-
)
|
|
104
|
-
|
|
105
|
-
def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
|
|
106
|
-
"""Protect URLs, emails, and other special cases from being split."""
|
|
107
|
-
protected = text
|
|
108
|
-
placeholders: Dict[str, str] = {}
|
|
109
|
-
counter = 0
|
|
110
|
-
|
|
111
|
-
# Protect URLs and emails
|
|
112
|
-
for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
|
|
113
|
-
for match in re.finditer(pattern, protected):
|
|
114
|
-
placeholder = f'__PROTECTED_{counter}__'
|
|
115
|
-
placeholders[placeholder] = match.group()
|
|
116
|
-
protected = protected.replace(match.group(), placeholder)
|
|
117
|
-
counter += 1
|
|
118
|
-
|
|
119
|
-
# Protect quoted content
|
|
120
|
-
stack = []
|
|
121
|
-
protected_chars = list(protected)
|
|
122
|
-
i = 0
|
|
123
|
-
while i < len(protected_chars):
|
|
124
|
-
char = protected_chars[i]
|
|
125
|
-
if char in self.QUOTE_PAIRS:
|
|
126
|
-
stack.append((char, i))
|
|
127
|
-
elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
|
|
128
|
-
start_quote, start_idx = stack.pop()
|
|
129
|
-
content = ''.join(protected_chars[start_idx:i + 1])
|
|
130
|
-
placeholder = f'__PROTECTED_{counter}__'
|
|
131
|
-
placeholders[placeholder] = content
|
|
132
|
-
protected_chars[start_idx:i + 1] = list(placeholder)
|
|
133
|
-
counter += 1
|
|
134
|
-
i += 1
|
|
135
|
-
|
|
136
|
-
return ''.join(protected_chars), placeholders
|
|
137
|
-
|
|
138
|
-
def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
|
|
139
|
-
"""Restore protected content."""
|
|
140
|
-
restored = text
|
|
141
|
-
for placeholder, original in placeholders.items():
|
|
142
|
-
restored = restored.replace(placeholder, original)
|
|
143
|
-
return restored
|
|
144
|
-
|
|
145
|
-
def _handle_abbreviations(self, text: str) -> str:
|
|
146
|
-
"""Handle abbreviations to prevent incorrect sentence splitting."""
|
|
147
|
-
def replace_abbrev(match: re.Match) -> str:
|
|
148
|
-
abbr = match.group().lower().rstrip('.')
|
|
149
|
-
if abbr in self.all_abbreviations:
|
|
150
|
-
return match.group().replace('.', '__DOT__')
|
|
151
|
-
return match.group()
|
|
152
|
-
|
|
153
|
-
return self.ABBREV_PATTERN.sub(replace_abbrev, text)
|
|
154
|
-
|
|
155
|
-
def _normalize_whitespace(self, text: str) -> str:
|
|
156
|
-
"""Normalize whitespace while preserving paragraph breaks."""
|
|
157
|
-
# Replace multiple newlines with special marker
|
|
158
|
-
text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
|
|
159
|
-
# Normalize remaining whitespace
|
|
160
|
-
text = re.sub(r'\s+', ' ', text)
|
|
161
|
-
return text.strip()
|
|
162
|
-
|
|
163
|
-
def _restore_formatting(self, sentences: List[str]) -> List[str]:
|
|
164
|
-
"""Restore original formatting and clean up sentences."""
|
|
165
|
-
restored = []
|
|
166
|
-
for sentence in sentences:
|
|
167
|
-
# Restore dots in abbreviations
|
|
168
|
-
sentence = sentence.replace('__DOT__', '.')
|
|
169
|
-
|
|
170
|
-
# Restore paragraph breaks
|
|
171
|
-
sentence = sentence.replace('__PARA__', '\n\n')
|
|
172
|
-
|
|
173
|
-
# Clean up whitespace
|
|
174
|
-
sentence = re.sub(r'\s+', ' ', sentence).strip()
|
|
175
|
-
|
|
176
|
-
# Capitalize first letter if it's lowercase and not an abbreviation
|
|
177
|
-
words = sentence.split()
|
|
178
|
-
if words and words[0].lower() not in self.all_abbreviations:
|
|
179
|
-
sentence = sentence[0].upper() + sentence[1:]
|
|
180
|
-
|
|
181
|
-
if sentence:
|
|
182
|
-
restored.append(sentence)
|
|
183
|
-
|
|
184
|
-
return restored
|
|
185
|
-
|
|
186
|
-
def tokenize(self, text: str) -> List[str]:
|
|
187
|
-
"""
|
|
188
|
-
Split text into sentences while handling complex cases.
|
|
189
|
-
|
|
190
|
-
Args:
|
|
191
|
-
text (str): Input text to split into sentences.
|
|
192
|
-
|
|
193
|
-
Returns:
|
|
194
|
-
List[str]: List of properly formatted sentences.
|
|
195
|
-
"""
|
|
196
|
-
if not text or not text.strip():
|
|
197
|
-
return []
|
|
198
|
-
|
|
199
|
-
# Step 1: Protect special cases
|
|
200
|
-
protected_text, placeholders = self._protect_special_cases(text)
|
|
201
|
-
|
|
202
|
-
# Step 2: Normalize whitespace
|
|
203
|
-
protected_text = self._normalize_whitespace(protected_text)
|
|
204
|
-
|
|
205
|
-
# Step 3: Handle abbreviations
|
|
206
|
-
protected_text = self._handle_abbreviations(protected_text)
|
|
207
|
-
|
|
208
|
-
# Step 4: Split into potential sentences
|
|
209
|
-
potential_sentences = self.SENTENCE_END.split(protected_text)
|
|
210
|
-
|
|
211
|
-
# Step 5: Process and restore formatting
|
|
212
|
-
sentences = self._restore_formatting(potential_sentences)
|
|
213
|
-
|
|
214
|
-
# Step 6: Restore special cases
|
|
215
|
-
sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
|
|
216
|
-
|
|
217
|
-
# Step 7: Post-process sentences
|
|
218
|
-
final_sentences = []
|
|
219
|
-
current_sentence = []
|
|
220
|
-
|
|
221
|
-
for sentence in sentences:
|
|
222
|
-
# Skip empty sentences
|
|
223
|
-
if not sentence.strip():
|
|
224
|
-
continue
|
|
225
|
-
|
|
226
|
-
# Check if sentence might be continuation of previous
|
|
227
|
-
if current_sentence and sentence[0].islower():
|
|
228
|
-
current_sentence.append(sentence)
|
|
229
|
-
else:
|
|
230
|
-
if current_sentence:
|
|
231
|
-
final_sentences.append(' '.join(current_sentence))
|
|
232
|
-
current_sentence = [sentence]
|
|
233
|
-
|
|
234
|
-
# Add last sentence if exists
|
|
235
|
-
if current_sentence:
|
|
236
|
-
final_sentences.append(' '.join(current_sentence))
|
|
237
|
-
|
|
238
|
-
return final_sentences
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
def split_sentences(text: str) -> List[str]:
|
|
242
|
-
"""
|
|
243
|
-
Convenience function to split text into sentences using SentenceTokenizer.
|
|
244
|
-
|
|
245
|
-
Args:
|
|
246
|
-
text (str): Input text to split into sentences.
|
|
247
|
-
|
|
248
|
-
Returns:
|
|
249
|
-
List[str]: List of properly formatted sentences.
|
|
250
|
-
"""
|
|
251
|
-
tokenizer = SentenceTokenizer()
|
|
252
|
-
return tokenizer.tokenize(text)
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
if __name__ == "__main__":
|
|
256
|
-
# Test text with various challenging cases
|
|
257
|
-
test_text: str = """
|
|
258
|
-
Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
|
|
259
|
-
They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
|
|
260
|
-
The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
|
|
261
|
-
|
|
262
|
-
Visit our website at https://www.example.com or email us at test@example.com!
|
|
263
|
-
The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
|
|
264
|
-
|
|
265
|
-
「これは日本語の文章です。」This is a mixed-language text! How cool is that?
|
|
266
|
-
|
|
267
|
-
Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
|
|
268
|
-
Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
|
|
269
|
-
"""
|
|
270
|
-
|
|
271
|
-
# Process and print each sentence
|
|
272
|
-
sentences: List[str] = split_sentences(test_text)
|
|
273
|
-
print("Detected sentences:")
|
|
274
|
-
print("-" * 80)
|
|
275
|
-
for i, sentence in enumerate(sentences, 1):
|
|
276
|
-
print(f"{i}. {sentence}")
|
|
277
|
-
print("-" * 80)
|
|
1
|
+
from typing import List, Dict, Tuple, Set, Pattern
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SentenceTokenizer:
|
|
6
|
+
"""Advanced sentence tokenizer with support for complex cases and proper formatting."""
|
|
7
|
+
|
|
8
|
+
def __init__(self) -> None:
|
|
9
|
+
# Common abbreviations by category
|
|
10
|
+
self.TITLES: Set[str] = {
|
|
11
|
+
'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
|
|
12
|
+
'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
|
|
13
|
+
'lt', 'sgt', 'cpl', 'pvt'
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
self.ACADEMIC: Set[str] = {
|
|
17
|
+
'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
|
|
18
|
+
'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
self.ORGANIZATIONS: Set[str] = {
|
|
22
|
+
'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
|
|
23
|
+
'intl', 'dept', 'est', 'dist', 'mfg', 'div'
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
self.MONTHS: Set[str] = {
|
|
27
|
+
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
self.UNITS: Set[str] = {
|
|
31
|
+
'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
|
|
32
|
+
'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
self.TECHNOLOGY: Set[str] = {
|
|
36
|
+
'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
|
|
37
|
+
'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
self.MISC: Set[str] = {
|
|
41
|
+
'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
|
|
42
|
+
'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
# Combine all abbreviations
|
|
46
|
+
self.all_abbreviations: Set[str] = (
|
|
47
|
+
self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
|
|
48
|
+
self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Special patterns
|
|
52
|
+
self.ELLIPSIS: str = r'\.{2,}|…'
|
|
53
|
+
self.URL_PATTERN: str = (
|
|
54
|
+
r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
|
|
55
|
+
)
|
|
56
|
+
self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
|
|
57
|
+
self.NUMBER_PATTERN: str = (
|
|
58
|
+
r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Quote and bracket pairs
|
|
62
|
+
self.QUOTE_PAIRS: Dict[str, str] = {
|
|
63
|
+
'"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
|
|
64
|
+
"«": "»", "‹": "›", "'": "'", "‚": "'"
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
self.BRACKETS: Dict[str, str] = {
|
|
68
|
+
'(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
|
|
69
|
+
'『': '』', '【': '】', '〖': '〗', '「': '」'
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# Compile regex patterns
|
|
73
|
+
self._compile_patterns()
|
|
74
|
+
|
|
75
|
+
def _compile_patterns(self) -> None:
|
|
76
|
+
"""Compile regex patterns for better performance."""
|
|
77
|
+
# Pattern for finding potential sentence boundaries
|
|
78
|
+
self.SENTENCE_END: Pattern = re.compile(
|
|
79
|
+
r'''
|
|
80
|
+
# Group for sentence endings
|
|
81
|
+
(?:
|
|
82
|
+
# Standard endings with optional quotes/brackets
|
|
83
|
+
(?<=[.!?])[\"\'\)\]\}»›」』\s]*
|
|
84
|
+
|
|
85
|
+
# Ellipsis
|
|
86
|
+
|(?:\.{2,}|…)
|
|
87
|
+
|
|
88
|
+
# Asian-style endings
|
|
89
|
+
|(?<=[。!?」』】\s])
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Must be followed by whitespace and capital letter or number
|
|
93
|
+
(?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
|
|
94
|
+
''',
|
|
95
|
+
re.VERBOSE
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Pattern for abbreviations
|
|
99
|
+
abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
|
|
100
|
+
self.ABBREV_PATTERN: Pattern = re.compile(
|
|
101
|
+
fr'\b(?:{abbrev_pattern})\.?',
|
|
102
|
+
re.IGNORECASE
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
|
|
106
|
+
"""Protect URLs, emails, and other special cases from being split."""
|
|
107
|
+
protected = text
|
|
108
|
+
placeholders: Dict[str, str] = {}
|
|
109
|
+
counter = 0
|
|
110
|
+
|
|
111
|
+
# Protect URLs and emails
|
|
112
|
+
for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
|
|
113
|
+
for match in re.finditer(pattern, protected):
|
|
114
|
+
placeholder = f'__PROTECTED_{counter}__'
|
|
115
|
+
placeholders[placeholder] = match.group()
|
|
116
|
+
protected = protected.replace(match.group(), placeholder)
|
|
117
|
+
counter += 1
|
|
118
|
+
|
|
119
|
+
# Protect quoted content
|
|
120
|
+
stack = []
|
|
121
|
+
protected_chars = list(protected)
|
|
122
|
+
i = 0
|
|
123
|
+
while i < len(protected_chars):
|
|
124
|
+
char = protected_chars[i]
|
|
125
|
+
if char in self.QUOTE_PAIRS:
|
|
126
|
+
stack.append((char, i))
|
|
127
|
+
elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
|
|
128
|
+
start_quote, start_idx = stack.pop()
|
|
129
|
+
content = ''.join(protected_chars[start_idx:i + 1])
|
|
130
|
+
placeholder = f'__PROTECTED_{counter}__'
|
|
131
|
+
placeholders[placeholder] = content
|
|
132
|
+
protected_chars[start_idx:i + 1] = list(placeholder)
|
|
133
|
+
counter += 1
|
|
134
|
+
i += 1
|
|
135
|
+
|
|
136
|
+
return ''.join(protected_chars), placeholders
|
|
137
|
+
|
|
138
|
+
def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
|
|
139
|
+
"""Restore protected content."""
|
|
140
|
+
restored = text
|
|
141
|
+
for placeholder, original in placeholders.items():
|
|
142
|
+
restored = restored.replace(placeholder, original)
|
|
143
|
+
return restored
|
|
144
|
+
|
|
145
|
+
def _handle_abbreviations(self, text: str) -> str:
|
|
146
|
+
"""Handle abbreviations to prevent incorrect sentence splitting."""
|
|
147
|
+
def replace_abbrev(match: re.Match) -> str:
|
|
148
|
+
abbr = match.group().lower().rstrip('.')
|
|
149
|
+
if abbr in self.all_abbreviations:
|
|
150
|
+
return match.group().replace('.', '__DOT__')
|
|
151
|
+
return match.group()
|
|
152
|
+
|
|
153
|
+
return self.ABBREV_PATTERN.sub(replace_abbrev, text)
|
|
154
|
+
|
|
155
|
+
def _normalize_whitespace(self, text: str) -> str:
|
|
156
|
+
"""Normalize whitespace while preserving paragraph breaks."""
|
|
157
|
+
# Replace multiple newlines with special marker
|
|
158
|
+
text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
|
|
159
|
+
# Normalize remaining whitespace
|
|
160
|
+
text = re.sub(r'\s+', ' ', text)
|
|
161
|
+
return text.strip()
|
|
162
|
+
|
|
163
|
+
def _restore_formatting(self, sentences: List[str]) -> List[str]:
|
|
164
|
+
"""Restore original formatting and clean up sentences."""
|
|
165
|
+
restored = []
|
|
166
|
+
for sentence in sentences:
|
|
167
|
+
# Restore dots in abbreviations
|
|
168
|
+
sentence = sentence.replace('__DOT__', '.')
|
|
169
|
+
|
|
170
|
+
# Restore paragraph breaks
|
|
171
|
+
sentence = sentence.replace('__PARA__', '\n\n')
|
|
172
|
+
|
|
173
|
+
# Clean up whitespace
|
|
174
|
+
sentence = re.sub(r'\s+', ' ', sentence).strip()
|
|
175
|
+
|
|
176
|
+
# Capitalize first letter if it's lowercase and not an abbreviation
|
|
177
|
+
words = sentence.split()
|
|
178
|
+
if words and words[0].lower() not in self.all_abbreviations:
|
|
179
|
+
sentence = sentence[0].upper() + sentence[1:]
|
|
180
|
+
|
|
181
|
+
if sentence:
|
|
182
|
+
restored.append(sentence)
|
|
183
|
+
|
|
184
|
+
return restored
|
|
185
|
+
|
|
186
|
+
def tokenize(self, text: str) -> List[str]:
|
|
187
|
+
"""
|
|
188
|
+
Split text into sentences while handling complex cases.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
text (str): Input text to split into sentences.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
List[str]: List of properly formatted sentences.
|
|
195
|
+
"""
|
|
196
|
+
if not text or not text.strip():
|
|
197
|
+
return []
|
|
198
|
+
|
|
199
|
+
# Step 1: Protect special cases
|
|
200
|
+
protected_text, placeholders = self._protect_special_cases(text)
|
|
201
|
+
|
|
202
|
+
# Step 2: Normalize whitespace
|
|
203
|
+
protected_text = self._normalize_whitespace(protected_text)
|
|
204
|
+
|
|
205
|
+
# Step 3: Handle abbreviations
|
|
206
|
+
protected_text = self._handle_abbreviations(protected_text)
|
|
207
|
+
|
|
208
|
+
# Step 4: Split into potential sentences
|
|
209
|
+
potential_sentences = self.SENTENCE_END.split(protected_text)
|
|
210
|
+
|
|
211
|
+
# Step 5: Process and restore formatting
|
|
212
|
+
sentences = self._restore_formatting(potential_sentences)
|
|
213
|
+
|
|
214
|
+
# Step 6: Restore special cases
|
|
215
|
+
sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
|
|
216
|
+
|
|
217
|
+
# Step 7: Post-process sentences
|
|
218
|
+
final_sentences = []
|
|
219
|
+
current_sentence = []
|
|
220
|
+
|
|
221
|
+
for sentence in sentences:
|
|
222
|
+
# Skip empty sentences
|
|
223
|
+
if not sentence.strip():
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
# Check if sentence might be continuation of previous
|
|
227
|
+
if current_sentence and sentence[0].islower():
|
|
228
|
+
current_sentence.append(sentence)
|
|
229
|
+
else:
|
|
230
|
+
if current_sentence:
|
|
231
|
+
final_sentences.append(' '.join(current_sentence))
|
|
232
|
+
current_sentence = [sentence]
|
|
233
|
+
|
|
234
|
+
# Add last sentence if exists
|
|
235
|
+
if current_sentence:
|
|
236
|
+
final_sentences.append(' '.join(current_sentence))
|
|
237
|
+
|
|
238
|
+
return final_sentences
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def split_sentences(text: str) -> List[str]:
|
|
242
|
+
"""
|
|
243
|
+
Convenience function to split text into sentences using SentenceTokenizer.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
text (str): Input text to split into sentences.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
List[str]: List of properly formatted sentences.
|
|
250
|
+
"""
|
|
251
|
+
tokenizer = SentenceTokenizer()
|
|
252
|
+
return tokenizer.tokenize(text)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
if __name__ == "__main__":
|
|
256
|
+
# Test text with various challenging cases
|
|
257
|
+
test_text: str = """
|
|
258
|
+
Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
|
|
259
|
+
They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
|
|
260
|
+
The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
|
|
261
|
+
|
|
262
|
+
Visit our website at https://www.example.com or email us at test@example.com!
|
|
263
|
+
The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
|
|
264
|
+
|
|
265
|
+
「これは日本語の文章です。」This is a mixed-language text! How cool is that?
|
|
266
|
+
|
|
267
|
+
Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
|
|
268
|
+
Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
# Process and print each sentence
|
|
272
|
+
sentences: List[str] = split_sentences(test_text)
|
|
273
|
+
print("Detected sentences:")
|
|
274
|
+
print("-" * 80)
|
|
275
|
+
for i, sentence in enumerate(sentences, 1):
|
|
276
|
+
print(f"{i}. {sentence}")
|
|
277
|
+
print("-" * 80)
|
|
@@ -1,52 +1,52 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Scout Web Analyzer Module
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from typing import Dict, Any
|
|
6
|
-
from ..element import Tag
|
|
7
|
-
|
|
8
|
-
class ScoutWebAnalyzer:
|
|
9
|
-
"""
|
|
10
|
-
Advanced web content analysis utility.
|
|
11
|
-
"""
|
|
12
|
-
@staticmethod
|
|
13
|
-
def analyze_page_structure(scout_obj) -> Dict[str, Any]:
|
|
14
|
-
"""
|
|
15
|
-
Analyze the structure of a web page.
|
|
16
|
-
|
|
17
|
-
Args:
|
|
18
|
-
scout_obj: Parsed Scout object
|
|
19
|
-
|
|
20
|
-
Returns:
|
|
21
|
-
Dict[str, Any]: Page structure analysis
|
|
22
|
-
"""
|
|
23
|
-
analysis = {
|
|
24
|
-
'tag_distribution': {},
|
|
25
|
-
'class_distribution': {},
|
|
26
|
-
'id_distribution': {},
|
|
27
|
-
'depth_analysis': {}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
# Tag distribution
|
|
31
|
-
for tag in scout_obj.find_all():
|
|
32
|
-
analysis['tag_distribution'][tag.name] = analysis['tag_distribution'].get(tag.name, 0) + 1
|
|
33
|
-
|
|
34
|
-
# Class distribution
|
|
35
|
-
for tag in scout_obj.find_all(attrs={'class': True}):
|
|
36
|
-
for cls in tag.get('class', []):
|
|
37
|
-
analysis['class_distribution'][cls] = analysis['class_distribution'].get(cls, 0) + 1
|
|
38
|
-
|
|
39
|
-
# ID distribution
|
|
40
|
-
for tag in scout_obj.find_all(attrs={'id': True}):
|
|
41
|
-
analysis['id_distribution'][tag.get('id')] = analysis['id_distribution'].get(tag.get('id'), 0) + 1
|
|
42
|
-
|
|
43
|
-
# Depth analysis
|
|
44
|
-
def _analyze_depth(tag, current_depth=0):
|
|
45
|
-
analysis['depth_analysis'][current_depth] = analysis['depth_analysis'].get(current_depth, 0) + 1
|
|
46
|
-
for child in tag.contents:
|
|
47
|
-
if isinstance(child, Tag):
|
|
48
|
-
_analyze_depth(child, current_depth + 1)
|
|
49
|
-
|
|
50
|
-
_analyze_depth(scout_obj._soup)
|
|
51
|
-
|
|
1
|
+
"""
|
|
2
|
+
Scout Web Analyzer Module
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from ..element import Tag
|
|
7
|
+
|
|
8
|
+
class ScoutWebAnalyzer:
|
|
9
|
+
"""
|
|
10
|
+
Advanced web content analysis utility.
|
|
11
|
+
"""
|
|
12
|
+
@staticmethod
|
|
13
|
+
def analyze_page_structure(scout_obj) -> Dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Analyze the structure of a web page.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
scout_obj: Parsed Scout object
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
Dict[str, Any]: Page structure analysis
|
|
22
|
+
"""
|
|
23
|
+
analysis = {
|
|
24
|
+
'tag_distribution': {},
|
|
25
|
+
'class_distribution': {},
|
|
26
|
+
'id_distribution': {},
|
|
27
|
+
'depth_analysis': {}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
# Tag distribution
|
|
31
|
+
for tag in scout_obj.find_all():
|
|
32
|
+
analysis['tag_distribution'][tag.name] = analysis['tag_distribution'].get(tag.name, 0) + 1
|
|
33
|
+
|
|
34
|
+
# Class distribution
|
|
35
|
+
for tag in scout_obj.find_all(attrs={'class': True}):
|
|
36
|
+
for cls in tag.get('class', []):
|
|
37
|
+
analysis['class_distribution'][cls] = analysis['class_distribution'].get(cls, 0) + 1
|
|
38
|
+
|
|
39
|
+
# ID distribution
|
|
40
|
+
for tag in scout_obj.find_all(attrs={'id': True}):
|
|
41
|
+
analysis['id_distribution'][tag.get('id')] = analysis['id_distribution'].get(tag.get('id'), 0) + 1
|
|
42
|
+
|
|
43
|
+
# Depth analysis
|
|
44
|
+
def _analyze_depth(tag, current_depth=0):
|
|
45
|
+
analysis['depth_analysis'][current_depth] = analysis['depth_analysis'].get(current_depth, 0) + 1
|
|
46
|
+
for child in tag.contents:
|
|
47
|
+
if isinstance(child, Tag):
|
|
48
|
+
_analyze_depth(child, current_depth + 1)
|
|
49
|
+
|
|
50
|
+
_analyze_depth(scout_obj._soup)
|
|
51
|
+
|
|
52
52
|
return analysis
|