webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
webscout/Provider/TTS/utils.py
CHANGED
|
@@ -1,280 +1,280 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Text processing utilities for TTS providers.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class SentenceTokenizer:
|
|
9
|
-
"""Advanced sentence tokenizer with support for complex cases and proper formatting."""
|
|
10
|
-
|
|
11
|
-
def __init__(self) -> None:
|
|
12
|
-
# Common abbreviations by category
|
|
13
|
-
self.TITLES: Set[str] = {
|
|
14
|
-
'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
|
|
15
|
-
'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
|
|
16
|
-
'lt', 'sgt', 'cpl', 'pvt'
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
self.ACADEMIC: Set[str] = {
|
|
20
|
-
'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
|
|
21
|
-
'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
self.ORGANIZATIONS: Set[str] = {
|
|
25
|
-
'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
|
|
26
|
-
'intl', 'dept', 'est', 'dist', 'mfg', 'div'
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
self.MONTHS: Set[str] = {
|
|
30
|
-
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
self.UNITS: Set[str] = {
|
|
34
|
-
'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
|
|
35
|
-
'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
self.TECHNOLOGY: Set[str] = {
|
|
39
|
-
'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
|
|
40
|
-
'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
self.MISC: Set[str] = {
|
|
44
|
-
'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
|
|
45
|
-
'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
# Combine all abbreviations
|
|
49
|
-
self.all_abbreviations: Set[str] = (
|
|
50
|
-
self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
|
|
51
|
-
self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
# Special patterns
|
|
55
|
-
self.ELLIPSIS: str = r'\.{2,}|…'
|
|
56
|
-
self.URL_PATTERN: str = (
|
|
57
|
-
r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
|
|
58
|
-
)
|
|
59
|
-
self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
|
|
60
|
-
self.NUMBER_PATTERN: str = (
|
|
61
|
-
r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
# Quote and bracket pairs
|
|
65
|
-
self.QUOTE_PAIRS: Dict[str, str] = {
|
|
66
|
-
'"': '"', "'": "'",
|
|
67
|
-
"«": "»", "‹": "›", "
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
self.BRACKETS: Dict[str, str] = {
|
|
71
|
-
'(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
|
|
72
|
-
'『': '』', '【': '】', '〖': '〗', '「': '」'
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
# Compile regex patterns
|
|
76
|
-
self._compile_patterns()
|
|
77
|
-
|
|
78
|
-
def _compile_patterns(self) -> None:
|
|
79
|
-
"""Compile regex patterns for better performance."""
|
|
80
|
-
# Pattern for finding potential sentence boundaries
|
|
81
|
-
self.SENTENCE_END: Pattern = re.compile(
|
|
82
|
-
r'''
|
|
83
|
-
# Group for sentence endings
|
|
84
|
-
(?:
|
|
85
|
-
# Standard endings with optional quotes/brackets
|
|
86
|
-
(?<=[.!?])[\"\'\)\]\}»›」』\s]*
|
|
87
|
-
|
|
88
|
-
# Ellipsis
|
|
89
|
-
|(?:\.{2,}|…)
|
|
90
|
-
|
|
91
|
-
# Asian-style endings
|
|
92
|
-
|(?<=[。!?」』】\s])
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
# Must be followed by whitespace and capital letter or number
|
|
96
|
-
(?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
|
|
97
|
-
''',
|
|
98
|
-
re.VERBOSE
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
# Pattern for abbreviations
|
|
102
|
-
abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
|
|
103
|
-
self.ABBREV_PATTERN: Pattern = re.compile(
|
|
104
|
-
fr'\b(?:{abbrev_pattern})\.?',
|
|
105
|
-
re.IGNORECASE
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
|
|
109
|
-
"""Protect URLs, emails, and other special cases from being split."""
|
|
110
|
-
protected = text
|
|
111
|
-
placeholders: Dict[str, str] = {}
|
|
112
|
-
counter = 0
|
|
113
|
-
|
|
114
|
-
# Protect URLs and emails
|
|
115
|
-
for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
|
|
116
|
-
for match in re.finditer(pattern, protected):
|
|
117
|
-
placeholder = f'__PROTECTED_{counter}__'
|
|
118
|
-
placeholders[placeholder] = match.group()
|
|
119
|
-
protected = protected.replace(match.group(), placeholder)
|
|
120
|
-
counter += 1
|
|
121
|
-
|
|
122
|
-
# Protect quoted content
|
|
123
|
-
stack = []
|
|
124
|
-
protected_chars = list(protected)
|
|
125
|
-
i = 0
|
|
126
|
-
while i < len(protected_chars):
|
|
127
|
-
char = protected_chars[i]
|
|
128
|
-
if char in self.QUOTE_PAIRS:
|
|
129
|
-
stack.append((char, i))
|
|
130
|
-
elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
|
|
131
|
-
start_quote, start_idx = stack.pop()
|
|
132
|
-
content = ''.join(protected_chars[start_idx:i + 1])
|
|
133
|
-
placeholder = f'__PROTECTED_{counter}__'
|
|
134
|
-
placeholders[placeholder] = content
|
|
135
|
-
protected_chars[start_idx:i + 1] = list(placeholder)
|
|
136
|
-
counter += 1
|
|
137
|
-
i += 1
|
|
138
|
-
|
|
139
|
-
return ''.join(protected_chars), placeholders
|
|
140
|
-
|
|
141
|
-
def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
|
|
142
|
-
"""Restore protected content."""
|
|
143
|
-
restored = text
|
|
144
|
-
for placeholder, original in placeholders.items():
|
|
145
|
-
restored = restored.replace(placeholder, original)
|
|
146
|
-
return restored
|
|
147
|
-
|
|
148
|
-
def _handle_abbreviations(self, text: str) -> str:
|
|
149
|
-
"""Handle abbreviations to prevent incorrect sentence splitting."""
|
|
150
|
-
def replace_abbrev(match: re.Match) -> str:
|
|
151
|
-
abbr = match.group().lower().rstrip('.')
|
|
152
|
-
if abbr in self.all_abbreviations:
|
|
153
|
-
return match.group().replace('.', '__DOT__')
|
|
154
|
-
return match.group()
|
|
155
|
-
|
|
156
|
-
return self.ABBREV_PATTERN.sub(replace_abbrev, text)
|
|
157
|
-
|
|
158
|
-
def _normalize_whitespace(self, text: str) -> str:
|
|
159
|
-
"""Normalize whitespace while preserving paragraph breaks."""
|
|
160
|
-
# Replace multiple newlines with special marker
|
|
161
|
-
text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
|
|
162
|
-
# Normalize remaining whitespace
|
|
163
|
-
text = re.sub(r'\s+', ' ', text)
|
|
164
|
-
return text.strip()
|
|
165
|
-
|
|
166
|
-
def _restore_formatting(self, sentences: List[str]) -> List[str]:
|
|
167
|
-
"""Restore original formatting and clean up sentences."""
|
|
168
|
-
restored = []
|
|
169
|
-
for sentence in sentences:
|
|
170
|
-
# Restore dots in abbreviations
|
|
171
|
-
sentence = sentence.replace('__DOT__', '.')
|
|
172
|
-
|
|
173
|
-
# Restore paragraph breaks
|
|
174
|
-
sentence = sentence.replace('__PARA__', '\n\n')
|
|
175
|
-
|
|
176
|
-
# Clean up whitespace
|
|
177
|
-
sentence = re.sub(r'\s+', ' ', sentence).strip()
|
|
178
|
-
|
|
179
|
-
# Capitalize first letter if it's lowercase and not an abbreviation
|
|
180
|
-
words = sentence.split()
|
|
181
|
-
if words and words[0].lower() not in self.all_abbreviations:
|
|
182
|
-
sentence = sentence[0].upper() + sentence[1:]
|
|
183
|
-
|
|
184
|
-
if sentence:
|
|
185
|
-
restored.append(sentence)
|
|
186
|
-
|
|
187
|
-
return restored
|
|
188
|
-
|
|
189
|
-
def tokenize(self, text: str) -> List[str]:
|
|
190
|
-
"""
|
|
191
|
-
Split text into sentences while handling complex cases.
|
|
192
|
-
|
|
193
|
-
Args:
|
|
194
|
-
text (str): Input text to split into sentences.
|
|
195
|
-
|
|
196
|
-
Returns:
|
|
197
|
-
List[str]: List of properly formatted sentences.
|
|
198
|
-
"""
|
|
199
|
-
if not text or not text.strip():
|
|
200
|
-
return []
|
|
201
|
-
|
|
202
|
-
# Step 1: Protect special cases
|
|
203
|
-
protected_text, placeholders = self._protect_special_cases(text)
|
|
204
|
-
|
|
205
|
-
# Step 2: Normalize whitespace
|
|
206
|
-
protected_text = self._normalize_whitespace(protected_text)
|
|
207
|
-
|
|
208
|
-
# Step 3: Handle abbreviations
|
|
209
|
-
protected_text = self._handle_abbreviations(protected_text)
|
|
210
|
-
|
|
211
|
-
# Step 4: Split into potential sentences
|
|
212
|
-
potential_sentences = self.SENTENCE_END.split(protected_text)
|
|
213
|
-
|
|
214
|
-
# Step 5: Process and restore formatting
|
|
215
|
-
sentences = self._restore_formatting(potential_sentences)
|
|
216
|
-
|
|
217
|
-
# Step 6: Restore special cases
|
|
218
|
-
sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
|
|
219
|
-
|
|
220
|
-
# Step 7: Post-process sentences
|
|
221
|
-
final_sentences = []
|
|
222
|
-
current_sentence = []
|
|
223
|
-
|
|
224
|
-
for sentence in sentences:
|
|
225
|
-
# Skip empty sentences
|
|
226
|
-
if not sentence.strip():
|
|
227
|
-
continue
|
|
228
|
-
|
|
229
|
-
# Check if sentence might be continuation of previous
|
|
230
|
-
if current_sentence and sentence[0].islower():
|
|
231
|
-
current_sentence.append(sentence)
|
|
232
|
-
else:
|
|
233
|
-
if current_sentence:
|
|
234
|
-
final_sentences.append(' '.join(current_sentence))
|
|
235
|
-
current_sentence = [sentence]
|
|
236
|
-
|
|
237
|
-
# Add last sentence if exists
|
|
238
|
-
if current_sentence:
|
|
239
|
-
final_sentences.append(' '.join(current_sentence))
|
|
240
|
-
|
|
241
|
-
return final_sentences
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
def split_sentences(text: str) -> List[str]:
|
|
245
|
-
"""
|
|
246
|
-
Convenience function to split text into sentences using SentenceTokenizer.
|
|
247
|
-
|
|
248
|
-
Args:
|
|
249
|
-
text (str): Input text to split into sentences.
|
|
250
|
-
|
|
251
|
-
Returns:
|
|
252
|
-
List[str]: List of properly formatted sentences.
|
|
253
|
-
"""
|
|
254
|
-
tokenizer = SentenceTokenizer()
|
|
255
|
-
return tokenizer.tokenize(text)
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
if __name__ == "__main__":
|
|
259
|
-
# Test text with various challenging cases
|
|
260
|
-
test_text: str = """
|
|
261
|
-
Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
|
|
262
|
-
They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
|
|
263
|
-
The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
|
|
264
|
-
|
|
265
|
-
Visit our website at https://www.example.com or email us at test@example.com!
|
|
266
|
-
The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
|
|
267
|
-
|
|
268
|
-
「これは日本語の文章です。」This is a mixed-language text! How cool is that?
|
|
269
|
-
|
|
270
|
-
Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
|
|
271
|
-
Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
|
|
272
|
-
"""
|
|
273
|
-
|
|
274
|
-
# Process and print each sentence
|
|
275
|
-
sentences: List[str] = split_sentences(test_text)
|
|
276
|
-
print("Detected sentences:")
|
|
277
|
-
print("-" * 80)
|
|
278
|
-
for i, sentence in enumerate(sentences, 1):
|
|
279
|
-
print(f"{i}. {sentence}")
|
|
280
|
-
print("-" * 80)
|
|
1
|
+
"""
|
|
2
|
+
Text processing utilities for TTS providers.
|
|
3
|
+
"""
|
|
4
|
+
import re
|
|
5
|
+
from typing import Dict, List, Optional, Pattern, Set, Tuple, Union, cast
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SentenceTokenizer:
|
|
9
|
+
"""Advanced sentence tokenizer with support for complex cases and proper formatting."""
|
|
10
|
+
|
|
11
|
+
def __init__(self) -> None:
|
|
12
|
+
# Common abbreviations by category
|
|
13
|
+
self.TITLES: Set[str] = {
|
|
14
|
+
'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
|
|
15
|
+
'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
|
|
16
|
+
'lt', 'sgt', 'cpl', 'pvt'
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
self.ACADEMIC: Set[str] = {
|
|
20
|
+
'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
|
|
21
|
+
'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
self.ORGANIZATIONS: Set[str] = {
|
|
25
|
+
'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
|
|
26
|
+
'intl', 'dept', 'est', 'dist', 'mfg', 'div'
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
self.MONTHS: Set[str] = {
|
|
30
|
+
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
self.UNITS: Set[str] = {
|
|
34
|
+
'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
|
|
35
|
+
'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
self.TECHNOLOGY: Set[str] = {
|
|
39
|
+
'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
|
|
40
|
+
'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
self.MISC: Set[str] = {
|
|
44
|
+
'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
|
|
45
|
+
'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# Combine all abbreviations
|
|
49
|
+
self.all_abbreviations: Set[str] = (
|
|
50
|
+
self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
|
|
51
|
+
self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Special patterns
|
|
55
|
+
self.ELLIPSIS: str = r'\.{2,}|…'
|
|
56
|
+
self.URL_PATTERN: str = (
|
|
57
|
+
r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
|
|
58
|
+
)
|
|
59
|
+
self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
|
|
60
|
+
self.NUMBER_PATTERN: str = (
|
|
61
|
+
r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Quote and bracket pairs
|
|
65
|
+
self.QUOTE_PAIRS: Dict[str, str] = {
|
|
66
|
+
'"': '"', "'": "'", "「": "」", "『": "』",
|
|
67
|
+
"«": "»", "‹": "›", "‚": "'"
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
self.BRACKETS: Dict[str, str] = {
|
|
71
|
+
'(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
|
|
72
|
+
'『': '』', '【': '】', '〖': '〗', '「': '」'
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
# Compile regex patterns
|
|
76
|
+
self._compile_patterns()
|
|
77
|
+
|
|
78
|
+
def _compile_patterns(self) -> None:
|
|
79
|
+
"""Compile regex patterns for better performance."""
|
|
80
|
+
# Pattern for finding potential sentence boundaries
|
|
81
|
+
self.SENTENCE_END: Pattern = re.compile(
|
|
82
|
+
r'''
|
|
83
|
+
# Group for sentence endings
|
|
84
|
+
(?:
|
|
85
|
+
# Standard endings with optional quotes/brackets
|
|
86
|
+
(?<=[.!?])[\"\'\)\]\}»›」』\s]*
|
|
87
|
+
|
|
88
|
+
# Ellipsis
|
|
89
|
+
|(?:\.{2,}|…)
|
|
90
|
+
|
|
91
|
+
# Asian-style endings
|
|
92
|
+
|(?<=[。!?」』】\s])
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Must be followed by whitespace and capital letter or number
|
|
96
|
+
(?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
|
|
97
|
+
''',
|
|
98
|
+
re.VERBOSE
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Pattern for abbreviations
|
|
102
|
+
abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
|
|
103
|
+
self.ABBREV_PATTERN: Pattern = re.compile(
|
|
104
|
+
fr'\b(?:{abbrev_pattern})\.?',
|
|
105
|
+
re.IGNORECASE
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
|
|
109
|
+
"""Protect URLs, emails, and other special cases from being split."""
|
|
110
|
+
protected = text
|
|
111
|
+
placeholders: Dict[str, str] = {}
|
|
112
|
+
counter = 0
|
|
113
|
+
|
|
114
|
+
# Protect URLs and emails
|
|
115
|
+
for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
|
|
116
|
+
for match in re.finditer(pattern, protected):
|
|
117
|
+
placeholder = f'__PROTECTED_{counter}__'
|
|
118
|
+
placeholders[placeholder] = match.group()
|
|
119
|
+
protected = protected.replace(match.group(), placeholder)
|
|
120
|
+
counter += 1
|
|
121
|
+
|
|
122
|
+
# Protect quoted content
|
|
123
|
+
stack = []
|
|
124
|
+
protected_chars = list(protected)
|
|
125
|
+
i = 0
|
|
126
|
+
while i < len(protected_chars):
|
|
127
|
+
char = protected_chars[i]
|
|
128
|
+
if char in self.QUOTE_PAIRS:
|
|
129
|
+
stack.append((char, i))
|
|
130
|
+
elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
|
|
131
|
+
start_quote, start_idx = stack.pop()
|
|
132
|
+
content = ''.join(protected_chars[start_idx:i + 1])
|
|
133
|
+
placeholder = f'__PROTECTED_{counter}__'
|
|
134
|
+
placeholders[placeholder] = content
|
|
135
|
+
protected_chars[start_idx:i + 1] = list(placeholder)
|
|
136
|
+
counter += 1
|
|
137
|
+
i += 1
|
|
138
|
+
|
|
139
|
+
return ''.join(protected_chars), placeholders
|
|
140
|
+
|
|
141
|
+
def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
|
|
142
|
+
"""Restore protected content."""
|
|
143
|
+
restored = text
|
|
144
|
+
for placeholder, original in placeholders.items():
|
|
145
|
+
restored = restored.replace(placeholder, original)
|
|
146
|
+
return restored
|
|
147
|
+
|
|
148
|
+
def _handle_abbreviations(self, text: str) -> str:
|
|
149
|
+
"""Handle abbreviations to prevent incorrect sentence splitting."""
|
|
150
|
+
def replace_abbrev(match: re.Match) -> str:
|
|
151
|
+
abbr = match.group().lower().rstrip('.')
|
|
152
|
+
if abbr in self.all_abbreviations:
|
|
153
|
+
return match.group().replace('.', '__DOT__')
|
|
154
|
+
return match.group()
|
|
155
|
+
|
|
156
|
+
return self.ABBREV_PATTERN.sub(replace_abbrev, text)
|
|
157
|
+
|
|
158
|
+
def _normalize_whitespace(self, text: str) -> str:
|
|
159
|
+
"""Normalize whitespace while preserving paragraph breaks."""
|
|
160
|
+
# Replace multiple newlines with special marker
|
|
161
|
+
text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
|
|
162
|
+
# Normalize remaining whitespace
|
|
163
|
+
text = re.sub(r'\s+', ' ', text)
|
|
164
|
+
return text.strip()
|
|
165
|
+
|
|
166
|
+
def _restore_formatting(self, sentences: List[str]) -> List[str]:
|
|
167
|
+
"""Restore original formatting and clean up sentences."""
|
|
168
|
+
restored = []
|
|
169
|
+
for sentence in sentences:
|
|
170
|
+
# Restore dots in abbreviations
|
|
171
|
+
sentence = sentence.replace('__DOT__', '.')
|
|
172
|
+
|
|
173
|
+
# Restore paragraph breaks
|
|
174
|
+
sentence = sentence.replace('__PARA__', '\n\n')
|
|
175
|
+
|
|
176
|
+
# Clean up whitespace
|
|
177
|
+
sentence = re.sub(r'\s+', ' ', sentence).strip()
|
|
178
|
+
|
|
179
|
+
# Capitalize first letter if it's lowercase and not an abbreviation
|
|
180
|
+
words = sentence.split()
|
|
181
|
+
if words and words[0].lower() not in self.all_abbreviations:
|
|
182
|
+
sentence = sentence[0].upper() + sentence[1:]
|
|
183
|
+
|
|
184
|
+
if sentence:
|
|
185
|
+
restored.append(sentence)
|
|
186
|
+
|
|
187
|
+
return restored
|
|
188
|
+
|
|
189
|
+
def tokenize(self, text: str) -> List[str]:
|
|
190
|
+
"""
|
|
191
|
+
Split text into sentences while handling complex cases.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
text (str): Input text to split into sentences.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
List[str]: List of properly formatted sentences.
|
|
198
|
+
"""
|
|
199
|
+
if not text or not text.strip():
|
|
200
|
+
return []
|
|
201
|
+
|
|
202
|
+
# Step 1: Protect special cases
|
|
203
|
+
protected_text, placeholders = self._protect_special_cases(text)
|
|
204
|
+
|
|
205
|
+
# Step 2: Normalize whitespace
|
|
206
|
+
protected_text = self._normalize_whitespace(protected_text)
|
|
207
|
+
|
|
208
|
+
# Step 3: Handle abbreviations
|
|
209
|
+
protected_text = self._handle_abbreviations(protected_text)
|
|
210
|
+
|
|
211
|
+
# Step 4: Split into potential sentences
|
|
212
|
+
potential_sentences = self.SENTENCE_END.split(protected_text)
|
|
213
|
+
|
|
214
|
+
# Step 5: Process and restore formatting
|
|
215
|
+
sentences = self._restore_formatting(potential_sentences)
|
|
216
|
+
|
|
217
|
+
# Step 6: Restore special cases
|
|
218
|
+
sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
|
|
219
|
+
|
|
220
|
+
# Step 7: Post-process sentences
|
|
221
|
+
final_sentences = []
|
|
222
|
+
current_sentence = []
|
|
223
|
+
|
|
224
|
+
for sentence in sentences:
|
|
225
|
+
# Skip empty sentences
|
|
226
|
+
if not sentence.strip():
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
# Check if sentence might be continuation of previous
|
|
230
|
+
if current_sentence and sentence[0].islower():
|
|
231
|
+
current_sentence.append(sentence)
|
|
232
|
+
else:
|
|
233
|
+
if current_sentence:
|
|
234
|
+
final_sentences.append(' '.join(current_sentence))
|
|
235
|
+
current_sentence = [sentence]
|
|
236
|
+
|
|
237
|
+
# Add last sentence if exists
|
|
238
|
+
if current_sentence:
|
|
239
|
+
final_sentences.append(' '.join(current_sentence))
|
|
240
|
+
|
|
241
|
+
return final_sentences
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def split_sentences(text: str) -> List[str]:
|
|
245
|
+
"""
|
|
246
|
+
Convenience function to split text into sentences using SentenceTokenizer.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
text (str): Input text to split into sentences.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
List[str]: List of properly formatted sentences.
|
|
253
|
+
"""
|
|
254
|
+
tokenizer = SentenceTokenizer()
|
|
255
|
+
return tokenizer.tokenize(text)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
if __name__ == "__main__":
|
|
259
|
+
# Test text with various challenging cases
|
|
260
|
+
test_text: str = """
|
|
261
|
+
Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
|
|
262
|
+
They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
|
|
263
|
+
The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
|
|
264
|
+
|
|
265
|
+
Visit our website at https://www.example.com or email us at test@example.com!
|
|
266
|
+
The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
|
|
267
|
+
|
|
268
|
+
「これは日本語の文章です。」This is a mixed-language text! How cool is that?
|
|
269
|
+
|
|
270
|
+
Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
|
|
271
|
+
Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
|
|
272
|
+
"""
|
|
273
|
+
|
|
274
|
+
# Process and print each sentence
|
|
275
|
+
sentences: List[str] = split_sentences(test_text)
|
|
276
|
+
print("Detected sentences:")
|
|
277
|
+
print("-" * 80)
|
|
278
|
+
for i, sentence in enumerate(sentences, 1):
|
|
279
|
+
print(f"{i}. {sentence}")
|
|
280
|
+
print("-" * 80)
|