webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -143
- webscout/AIbase.py +247 -123
- webscout/AIutel.py +68 -132
- webscout/Bard.py +1072 -535
- webscout/Extra/GitToolkit/__init__.py +2 -2
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -0
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +189 -18
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -682
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +237 -304
- webscout/Provider/AISEARCH/README.md +106 -0
- webscout/Provider/AISEARCH/__init__.py +16 -10
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +130 -209
- webscout/Provider/AISEARCH/monica_search.py +200 -246
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -0
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +343 -173
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +60 -54
- webscout/Provider/GithubChat.py +385 -367
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -670
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -233
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -266
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -381
- webscout/Provider/Netwrck.py +273 -228
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -0
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -0
- webscout/Provider/OPENAI/__init__.py +148 -25
- webscout/Provider/OPENAI/ai4chat.py +348 -0
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/ayle.py +365 -0
- webscout/Provider/OPENAI/base.py +253 -46
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +514 -193
- webscout/Provider/OPENAI/chatsandbox.py +233 -0
- webscout/Provider/OPENAI/deepinfra.py +403 -272
- webscout/Provider/OPENAI/e2b.py +2370 -1350
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +186 -138
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -0
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +100 -104
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -327
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +110 -84
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -0
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -0
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +126 -115
- webscout/Provider/OPENAI/textpollinations.py +218 -133
- webscout/Provider/OPENAI/toolbaz.py +136 -166
- webscout/Provider/OPENAI/typefully.py +419 -0
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -211
- webscout/Provider/OPENAI/wisecat.py +103 -125
- webscout/Provider/OPENAI/writecream.py +185 -156
- webscout/Provider/OPENAI/x0gpt.py +227 -136
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -344
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -0
- webscout/Provider/TTI/__init__.py +37 -12
- webscout/Provider/TTI/base.py +147 -0
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -0
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -0
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -0
- webscout/Provider/TTS/README.md +186 -0
- webscout/Provider/TTS/__init__.py +43 -7
- webscout/Provider/TTS/base.py +523 -0
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -0
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -180
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +221 -121
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -199
- webscout/Provider/TypliAI.py +311 -0
- webscout/Provider/UNFINISHED/ChatHub.py +219 -0
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
- webscout/Provider/UNFINISHED/GizAI.py +300 -0
- webscout/Provider/UNFINISHED/Marcus.py +218 -0
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/UNFINISHED/XenAI.py +330 -0
- webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
- webscout/Provider/UNFINISHED/samurai.py +231 -0
- webscout/Provider/WiseCat.py +256 -196
- webscout/Provider/WrDoChat.py +390 -0
- webscout/Provider/__init__.py +115 -198
- webscout/Provider/ai4chat.py +181 -202
- webscout/Provider/akashgpt.py +330 -342
- webscout/Provider/cerebras.py +397 -242
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -234
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -266
- webscout/Provider/llama3mitril.py +230 -180
- webscout/Provider/llmchat.py +308 -213
- webscout/Provider/llmchatco.py +321 -311
- webscout/Provider/meta.py +996 -794
- webscout/Provider/oivscode.py +332 -0
- webscout/Provider/searchchat.py +316 -293
- webscout/Provider/sonus.py +264 -208
- webscout/Provider/toolbaz.py +359 -320
- webscout/Provider/turboseek.py +332 -219
- webscout/Provider/typefully.py +262 -280
- webscout/Provider/x0gpt.py +332 -256
- webscout/__init__.py +31 -38
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -293
- webscout/client.py +1497 -0
- webscout/conversation.py +140 -565
- webscout/exceptions.py +383 -339
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +32 -378
- webscout/prompt_manager.py +376 -274
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -0
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -140
- webscout/scout/core/scout.py +800 -568
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -460
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -809
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +574 -0
- webscout/swiftcli/core/context.py +98 -0
- webscout/swiftcli/core/group.py +268 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +243 -0
- webscout/swiftcli/decorators/options.py +247 -0
- webscout/swiftcli/decorators/output.py +392 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +134 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +58 -0
- webscout/swiftcli/utils/formatting.py +251 -0
- webscout/swiftcli/utils/parsing.py +368 -0
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -55
- webscout/zeroart/base.py +70 -60
- webscout/zeroart/effects.py +155 -99
- webscout/zeroart/fonts.py +1799 -816
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- webscout-2026.1.19.dist-info/entry_points.txt +4 -0
- webscout-2026.1.19.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/DWEBS.py +0 -477
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/yep.py +0 -376
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/core.py +0 -881
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1346
- webscout/webscout_search_async.py +0 -877
- webscout/yep_search.py +0 -297
- webscout-8.2.2.dist-info/METADATA +0 -734
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
webscout/LLM.py
DELETED
|
@@ -1,442 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
>>> from webscout.LLM import LLM, VLM
|
|
3
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
4
|
-
>>> response = llm.chat([{"role": "user", "content": "What's good?"}])
|
|
5
|
-
>>> print(response)
|
|
6
|
-
'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
|
|
7
|
-
|
|
8
|
-
>>> # For vision tasks
|
|
9
|
-
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
10
|
-
>>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
import requests
|
|
14
|
-
import base64
|
|
15
|
-
import json
|
|
16
|
-
from typing import List, Dict, Union, Generator, Optional, Any
|
|
17
|
-
|
|
18
|
-
class LLMError(Exception):
|
|
19
|
-
"""Custom exception for LLM API errors 🚫
|
|
20
|
-
|
|
21
|
-
Examples:
|
|
22
|
-
>>> try:
|
|
23
|
-
... raise LLMError("API key not found!")
|
|
24
|
-
... except LLMError as e:
|
|
25
|
-
... print(f"Error: {e}")
|
|
26
|
-
Error: API key not found!
|
|
27
|
-
"""
|
|
28
|
-
pass
|
|
29
|
-
|
|
30
|
-
class LLM:
|
|
31
|
-
"""A class for chatting with DeepInfra's powerful language models! 🚀
|
|
32
|
-
|
|
33
|
-
This class lets you:
|
|
34
|
-
- Chat with state-of-the-art language models 💬
|
|
35
|
-
- Stream responses in real-time ⚡
|
|
36
|
-
- Control temperature and token limits 🎮
|
|
37
|
-
- Handle system messages and chat history 📝
|
|
38
|
-
|
|
39
|
-
Examples:
|
|
40
|
-
>>> from webscout.LLM import LLM
|
|
41
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
42
|
-
>>> response = llm.chat([
|
|
43
|
-
... {"role": "user", "content": "Write a short poem!"}
|
|
44
|
-
... ])
|
|
45
|
-
>>> print(response)
|
|
46
|
-
'Through starlit skies and morning dew,
|
|
47
|
-
Nature's beauty, forever new.
|
|
48
|
-
In every moment, magic gleams,
|
|
49
|
-
Life's poetry flows like gentle streams.'
|
|
50
|
-
"""
|
|
51
|
-
|
|
52
|
-
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
53
|
-
"""
|
|
54
|
-
Initialize the LLM client.
|
|
55
|
-
|
|
56
|
-
Args:
|
|
57
|
-
model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
|
|
58
|
-
system_message: The system message to use for the conversation
|
|
59
|
-
|
|
60
|
-
Examples:
|
|
61
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
62
|
-
>>> print(llm.model)
|
|
63
|
-
'meta-llama/Meta-Llama-3-70B-Instruct'
|
|
64
|
-
"""
|
|
65
|
-
self.model = model
|
|
66
|
-
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
67
|
-
self.conversation_history = [{"role": "system", "content": system_message}]
|
|
68
|
-
self.headers = {
|
|
69
|
-
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
70
|
-
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
71
|
-
'Cache-Control': 'no-cache',
|
|
72
|
-
'Connection': 'keep-alive',
|
|
73
|
-
'Content-Type': 'application/json',
|
|
74
|
-
'Origin': 'https://deepinfra.com',
|
|
75
|
-
'Pragma': 'no-cache',
|
|
76
|
-
'Referer': 'https://deepinfra.com/',
|
|
77
|
-
'Sec-Fetch-Dest': 'empty',
|
|
78
|
-
'Sec-Fetch-Mode': 'cors',
|
|
79
|
-
'Sec-Fetch-Site': 'same-site',
|
|
80
|
-
'X-Deepinfra-Source': 'web-embed',
|
|
81
|
-
'accept': 'text/event-stream',
|
|
82
|
-
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
83
|
-
'sec-ch-ua-mobile': '?0',
|
|
84
|
-
'sec-ch-ua-platform': '"macOS"'
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
def _prepare_payload(
|
|
88
|
-
self,
|
|
89
|
-
messages: List[Dict[str, str]],
|
|
90
|
-
stream: bool = False,
|
|
91
|
-
temperature: float = 0.7,
|
|
92
|
-
max_tokens: int = 8028,
|
|
93
|
-
stop: Optional[List[str]] = None,
|
|
94
|
-
) -> Dict[str, Any]:
|
|
95
|
-
"""Prepare the chat payload with all the right settings! 🎯
|
|
96
|
-
|
|
97
|
-
Args:
|
|
98
|
-
messages: Your chat messages (role & content)
|
|
99
|
-
stream: Want real-time responses? Set True! ⚡
|
|
100
|
-
temperature: Creativity level (0-1) 🎨
|
|
101
|
-
max_tokens: Max words to generate 📝
|
|
102
|
-
stop: Words to stop at (optional) 🛑
|
|
103
|
-
|
|
104
|
-
Returns:
|
|
105
|
-
Dict with all the API settings ready to go! 🚀
|
|
106
|
-
|
|
107
|
-
Examples:
|
|
108
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
109
|
-
>>> payload = llm._prepare_payload([
|
|
110
|
-
... {"role": "user", "content": "Hi!"}
|
|
111
|
-
... ])
|
|
112
|
-
>>> print(payload['model'])
|
|
113
|
-
'meta-llama/Meta-Llama-3-70B-Instruct'
|
|
114
|
-
"""
|
|
115
|
-
return {
|
|
116
|
-
'model': self.model,
|
|
117
|
-
'messages': messages,
|
|
118
|
-
'temperature': temperature,
|
|
119
|
-
'max_tokens': max_tokens,
|
|
120
|
-
'stop': stop or [],
|
|
121
|
-
'stream': stream
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
def chat(
|
|
125
|
-
self,
|
|
126
|
-
messages: List[Dict[str, str]],
|
|
127
|
-
stream: bool = False,
|
|
128
|
-
temperature: float = 0.7,
|
|
129
|
-
max_tokens: int = 8028,
|
|
130
|
-
stop: Optional[List[str]] = None,
|
|
131
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
132
|
-
"""Start chatting with the AI! 💬
|
|
133
|
-
|
|
134
|
-
This method is your gateway to:
|
|
135
|
-
- Having awesome conversations 🗣️
|
|
136
|
-
- Getting creative responses 🎨
|
|
137
|
-
- Streaming real-time replies ⚡
|
|
138
|
-
- Controlling the output style 🎮
|
|
139
|
-
|
|
140
|
-
Args:
|
|
141
|
-
messages: Your chat messages (role & content)
|
|
142
|
-
stream: Want real-time responses? Set True!
|
|
143
|
-
temperature: Creativity level (0-1)
|
|
144
|
-
max_tokens: Max words to generate
|
|
145
|
-
stop: Words to stop at (optional)
|
|
146
|
-
|
|
147
|
-
Returns:
|
|
148
|
-
Either a complete response or streaming generator
|
|
149
|
-
|
|
150
|
-
Raises:
|
|
151
|
-
LLMError: If something goes wrong 🚫
|
|
152
|
-
|
|
153
|
-
Examples:
|
|
154
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
155
|
-
>>> # Regular chat
|
|
156
|
-
>>> response = llm.chat([
|
|
157
|
-
... {"role": "user", "content": "Tell me a joke!"}
|
|
158
|
-
... ])
|
|
159
|
-
>>> # Streaming chat
|
|
160
|
-
>>> for chunk in llm.chat([
|
|
161
|
-
... {"role": "user", "content": "Tell me a story!"}
|
|
162
|
-
... ], stream=True):
|
|
163
|
-
... print(chunk, end='')
|
|
164
|
-
"""
|
|
165
|
-
payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
|
|
166
|
-
|
|
167
|
-
try:
|
|
168
|
-
if stream:
|
|
169
|
-
return self._stream_response(payload)
|
|
170
|
-
else:
|
|
171
|
-
return self._send_request(payload)
|
|
172
|
-
except Exception as e:
|
|
173
|
-
raise LLMError(f"API request failed: {str(e)}")
|
|
174
|
-
|
|
175
|
-
def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
|
|
176
|
-
"""Stream the chat response in real-time! ⚡
|
|
177
|
-
|
|
178
|
-
Args:
|
|
179
|
-
payload: The prepared chat payload
|
|
180
|
-
|
|
181
|
-
Yields:
|
|
182
|
-
Streaming chunks of the response
|
|
183
|
-
|
|
184
|
-
Raises:
|
|
185
|
-
LLMError: If the stream request fails 🚫
|
|
186
|
-
|
|
187
|
-
Examples:
|
|
188
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
189
|
-
>>> for chunk in llm._stream_response(llm._prepare_payload([
|
|
190
|
-
... {"role": "user", "content": "Tell me a story!"}
|
|
191
|
-
... ])):
|
|
192
|
-
... print(chunk, end='')
|
|
193
|
-
"""
|
|
194
|
-
try:
|
|
195
|
-
with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
|
|
196
|
-
response.raise_for_status()
|
|
197
|
-
for line in response.iter_lines():
|
|
198
|
-
if line:
|
|
199
|
-
if line.strip() == b'data: [DONE]':
|
|
200
|
-
break
|
|
201
|
-
if line.startswith(b'data: '):
|
|
202
|
-
try:
|
|
203
|
-
chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
|
|
204
|
-
if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
|
|
205
|
-
yield content
|
|
206
|
-
except json.JSONDecodeError:
|
|
207
|
-
continue
|
|
208
|
-
except requests.RequestException as e:
|
|
209
|
-
raise LLMError(f"Stream request failed: {str(e)}")
|
|
210
|
-
|
|
211
|
-
def _send_request(self, payload: Dict[str, Any]) -> str:
|
|
212
|
-
"""Send a non-streaming chat request.
|
|
213
|
-
|
|
214
|
-
Args:
|
|
215
|
-
payload: The prepared chat payload
|
|
216
|
-
|
|
217
|
-
Returns:
|
|
218
|
-
The complete response
|
|
219
|
-
|
|
220
|
-
Raises:
|
|
221
|
-
LLMError: If the request fails 🚫
|
|
222
|
-
|
|
223
|
-
Examples:
|
|
224
|
-
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
225
|
-
>>> response = llm._send_request(llm._prepare_payload([
|
|
226
|
-
... {"role": "user", "content": "Tell me a joke!"}
|
|
227
|
-
... ]))
|
|
228
|
-
>>> print(response)
|
|
229
|
-
"""
|
|
230
|
-
try:
|
|
231
|
-
response = requests.post(self.api_url, json=payload, headers=self.headers)
|
|
232
|
-
response.raise_for_status()
|
|
233
|
-
result = response.json()
|
|
234
|
-
return result['choices'][0]['message']['content']
|
|
235
|
-
except requests.RequestException as e:
|
|
236
|
-
raise LLMError(f"Request failed: {str(e)}")
|
|
237
|
-
except (KeyError, IndexError) as e:
|
|
238
|
-
raise LLMError(f"Invalid response format: {str(e)}")
|
|
239
|
-
except json.JSONDecodeError as e:
|
|
240
|
-
raise LLMError(f"Invalid JSON response: {str(e)}")
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
class VLM:
|
|
244
|
-
"""Your gateway to vision-language AI magic! 🖼️
|
|
245
|
-
|
|
246
|
-
This class lets you:
|
|
247
|
-
- Chat about images with AI 🎨
|
|
248
|
-
- Get detailed image descriptions 📝
|
|
249
|
-
- Answer questions about images 🤔
|
|
250
|
-
- Stream responses in real-time ⚡
|
|
251
|
-
|
|
252
|
-
Examples:
|
|
253
|
-
>>> from webscout.LLM import VLM
|
|
254
|
-
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
255
|
-
>>> # Chat about an image
|
|
256
|
-
>>> response = vlm.chat([{
|
|
257
|
-
... "role": "user",
|
|
258
|
-
... "content": [
|
|
259
|
-
... {"type": "image", "image_url": "path/to/image.jpg"},
|
|
260
|
-
... {"type": "text", "text": "What's in this image?"}
|
|
261
|
-
... ]
|
|
262
|
-
... }])
|
|
263
|
-
>>> print(response)
|
|
264
|
-
'I see a beautiful sunset over mountains...'
|
|
265
|
-
"""
|
|
266
|
-
|
|
267
|
-
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
268
|
-
"""Get ready for some vision-language magic! 🚀
|
|
269
|
-
|
|
270
|
-
Args:
|
|
271
|
-
model: Your chosen vision model
|
|
272
|
-
system_message: Set the AI's personality
|
|
273
|
-
|
|
274
|
-
Examples:
|
|
275
|
-
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
276
|
-
>>> print(vlm.model)
|
|
277
|
-
'cogvlm-grounding-generalist'
|
|
278
|
-
"""
|
|
279
|
-
self.model = model
|
|
280
|
-
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
281
|
-
self.conversation_history = [{"role": "system", "content": system_message}]
|
|
282
|
-
self.headers = {
|
|
283
|
-
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
284
|
-
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
285
|
-
'Cache-Control': 'no-cache',
|
|
286
|
-
'Connection': 'keep-alive',
|
|
287
|
-
'Content-Type': 'application/json',
|
|
288
|
-
'Origin': 'https://deepinfra.com',
|
|
289
|
-
'Pragma': 'no-cache',
|
|
290
|
-
'Referer': 'https://deepinfra.com/',
|
|
291
|
-
'Sec-Fetch-Dest': 'empty',
|
|
292
|
-
'Sec-Fetch-Mode': 'cors',
|
|
293
|
-
'Sec-Fetch-Site': 'same-site',
|
|
294
|
-
'X-Deepinfra-Source': 'web-embed',
|
|
295
|
-
'accept': 'text/event-stream',
|
|
296
|
-
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
297
|
-
'sec-ch-ua-mobile': '?0',
|
|
298
|
-
'sec-ch-ua-platform': '"macOS"'
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
def chat(
|
|
302
|
-
self,
|
|
303
|
-
messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
|
|
304
|
-
stream: bool = False,
|
|
305
|
-
temperature: float = 0.7,
|
|
306
|
-
max_tokens: int = 8028,
|
|
307
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
308
|
-
"""Chat about images with AI! 🖼️
|
|
309
|
-
|
|
310
|
-
This method lets you:
|
|
311
|
-
- Ask questions about images 🤔
|
|
312
|
-
- Get detailed descriptions 📝
|
|
313
|
-
- Stream responses in real-time ⚡
|
|
314
|
-
- Control response creativity 🎨
|
|
315
|
-
|
|
316
|
-
Args:
|
|
317
|
-
messages: Your chat + image data
|
|
318
|
-
stream: Want real-time responses?
|
|
319
|
-
temperature: Creativity level (0-1)
|
|
320
|
-
max_tokens: Max words to generate
|
|
321
|
-
|
|
322
|
-
Returns:
|
|
323
|
-
Either a complete response or streaming generator
|
|
324
|
-
|
|
325
|
-
Raises:
|
|
326
|
-
LLMError: If something goes wrong 🚫
|
|
327
|
-
|
|
328
|
-
Examples:
|
|
329
|
-
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
330
|
-
>>> # Regular chat with image
|
|
331
|
-
>>> response = vlm.chat([{
|
|
332
|
-
... "role": "user",
|
|
333
|
-
... "content": [
|
|
334
|
-
... {"type": "image", "image_url": "sunset.jpg"},
|
|
335
|
-
... {"type": "text", "text": "Describe this scene"}
|
|
336
|
-
... ]
|
|
337
|
-
... }])
|
|
338
|
-
>>> # Streaming chat
|
|
339
|
-
>>> for chunk in vlm.chat([...], stream=True):
|
|
340
|
-
... print(chunk, end='')
|
|
341
|
-
"""
|
|
342
|
-
payload = {
|
|
343
|
-
"model": self.model,
|
|
344
|
-
"messages": messages,
|
|
345
|
-
"stream": stream,
|
|
346
|
-
"temperature": temperature,
|
|
347
|
-
"max_tokens": max_tokens
|
|
348
|
-
}
|
|
349
|
-
|
|
350
|
-
try:
|
|
351
|
-
if stream:
|
|
352
|
-
return self._stream_response(payload)
|
|
353
|
-
else:
|
|
354
|
-
return self._send_request(payload)
|
|
355
|
-
except Exception as e:
|
|
356
|
-
raise LLMError(f"VLM API request failed: {str(e)}")
|
|
357
|
-
|
|
358
|
-
def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
|
|
359
|
-
"""Stream the VLM chat response."""
|
|
360
|
-
try:
|
|
361
|
-
with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
|
|
362
|
-
response.raise_for_status()
|
|
363
|
-
for line in response.iter_lines():
|
|
364
|
-
if line:
|
|
365
|
-
if line.strip() == b'data: [DONE]':
|
|
366
|
-
break
|
|
367
|
-
if line.startswith(b'data: '):
|
|
368
|
-
try:
|
|
369
|
-
chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
|
|
370
|
-
if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
|
|
371
|
-
yield content
|
|
372
|
-
except json.JSONDecodeError:
|
|
373
|
-
continue
|
|
374
|
-
except requests.RequestException as e:
|
|
375
|
-
raise LLMError(f"VLM stream request failed: {str(e)}")
|
|
376
|
-
|
|
377
|
-
def _send_request(self, payload: Dict[str, Any]) -> str:
|
|
378
|
-
"""Send a non-streaming VLM chat request."""
|
|
379
|
-
try:
|
|
380
|
-
response = requests.post(self.api_url, json=payload, headers=self.headers)
|
|
381
|
-
response.raise_for_status()
|
|
382
|
-
result = response.json()
|
|
383
|
-
return result['choices'][0]['message']['content']
|
|
384
|
-
except requests.RequestException as e:
|
|
385
|
-
raise LLMError(f"VLM request failed: {str(e)}")
|
|
386
|
-
except (KeyError, IndexError) as e:
|
|
387
|
-
raise LLMError(f"Invalid VLM response format: {str(e)}")
|
|
388
|
-
except json.JSONDecodeError as e:
|
|
389
|
-
raise LLMError(f"Invalid VLM JSON response: {str(e)}")
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def encode_image_to_base64(image_path: str) -> str:
|
|
393
|
-
"""Turn your image into base64 magic! 🎨
|
|
394
|
-
|
|
395
|
-
Args:
|
|
396
|
-
image_path: Where's your image at?
|
|
397
|
-
|
|
398
|
-
Returns:
|
|
399
|
-
Your image as a base64 string ✨
|
|
400
|
-
|
|
401
|
-
Raises:
|
|
402
|
-
IOError: If we can't read your image 🚫
|
|
403
|
-
|
|
404
|
-
Examples:
|
|
405
|
-
>>> from webscout.LLM import encode_image_to_base64
|
|
406
|
-
>>> image_data = encode_image_to_base64("cool_pic.jpg")
|
|
407
|
-
>>> print(len(image_data)) # Check the encoded length
|
|
408
|
-
12345
|
|
409
|
-
"""
|
|
410
|
-
try:
|
|
411
|
-
with open(image_path, "rb") as image_file:
|
|
412
|
-
return base64.b64encode(image_file.read()).decode("utf-8")
|
|
413
|
-
except IOError as e:
|
|
414
|
-
raise LLMError(f"Failed to read image file: {str(e)}")
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
if __name__ == "__main__":
|
|
418
|
-
# Example usage
|
|
419
|
-
try:
|
|
420
|
-
# Initialize LLM with Llama 3 model
|
|
421
|
-
llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
|
|
422
|
-
|
|
423
|
-
# Example messages
|
|
424
|
-
messages = [
|
|
425
|
-
{"role": "user", "content": "Write a short poem about AI."}
|
|
426
|
-
]
|
|
427
|
-
|
|
428
|
-
# Example 1: Non-streaming response
|
|
429
|
-
print("\nNon-streaming response:")
|
|
430
|
-
response = llm.chat(messages, stream=False)
|
|
431
|
-
print(response)
|
|
432
|
-
|
|
433
|
-
# Example 2: Streaming response
|
|
434
|
-
print("\nStreaming response:")
|
|
435
|
-
for chunk in llm.chat(messages, stream=True):
|
|
436
|
-
print(chunk, end='', flush=True)
|
|
437
|
-
print("\n")
|
|
438
|
-
|
|
439
|
-
except LLMError as e:
|
|
440
|
-
print(f"Error: {str(e)}")
|
|
441
|
-
except KeyboardInterrupt:
|
|
442
|
-
print("\nOperation cancelled by user")
|
webscout/Litlogger/__init__.py
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
LitLogger - A feature-rich, colorful logging library with intelligent level detection.
|
|
3
|
-
|
|
4
|
-
Features:
|
|
5
|
-
- Colorful console output
|
|
6
|
-
- Multiple output formats including JSON
|
|
7
|
-
- File logging with rotation
|
|
8
|
-
- Network logging (HTTP/HTTPS/TCP)
|
|
9
|
-
- Async logging support
|
|
10
|
-
- Intelligent log level detection
|
|
11
|
-
- Context managers
|
|
12
|
-
- Performance metrics
|
|
13
|
-
- Log aggregation
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
from .core.logger import Logger
|
|
17
|
-
from .core.level import LogLevel
|
|
18
|
-
from .styles.colors import LogColors
|
|
19
|
-
from .styles.formats import LogFormat
|
|
20
|
-
from .styles.text import TextStyle
|
|
21
|
-
from .handlers.console import ConsoleHandler, ErrorConsoleHandler
|
|
22
|
-
from .handlers.file import FileHandler
|
|
23
|
-
from .handlers.network import NetworkHandler
|
|
24
|
-
from .utils.detectors import LevelDetector
|
|
25
|
-
from .utils.formatters import MessageFormatter
|
|
26
|
-
|
|
27
|
-
# Create a default logger instance
|
|
28
|
-
default_logger = Logger(
|
|
29
|
-
name="LitLogger",
|
|
30
|
-
handlers=[ConsoleHandler()]
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
# Expose common logging methods at package level
|
|
34
|
-
debug = default_logger.debug
|
|
35
|
-
info = default_logger.info
|
|
36
|
-
warning = default_logger.warning
|
|
37
|
-
error = default_logger.error
|
|
38
|
-
critical = default_logger.critical
|
|
39
|
-
|
|
40
|
-
__all__ = [
|
|
41
|
-
# Core
|
|
42
|
-
"Logger",
|
|
43
|
-
"LogLevel",
|
|
44
|
-
|
|
45
|
-
# Styles
|
|
46
|
-
"LogColors",
|
|
47
|
-
"LogFormat",
|
|
48
|
-
"TextStyle",
|
|
49
|
-
|
|
50
|
-
# Handlers
|
|
51
|
-
"ConsoleHandler",
|
|
52
|
-
"ErrorConsoleHandler",
|
|
53
|
-
"FileHandler",
|
|
54
|
-
"NetworkHandler",
|
|
55
|
-
|
|
56
|
-
# Utils
|
|
57
|
-
"LevelDetector",
|
|
58
|
-
"MessageFormatter",
|
|
59
|
-
|
|
60
|
-
# Package-level logging functions
|
|
61
|
-
"debug",
|
|
62
|
-
"info",
|
|
63
|
-
"warning",
|
|
64
|
-
"error",
|
|
65
|
-
"critical",
|
|
66
|
-
|
|
67
|
-
]
|
webscout/Litlogger/core/level.py
DELETED
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
from enum import Enum
|
|
2
|
-
|
|
3
|
-
class LogLevel(Enum):
|
|
4
|
-
NOTSET = 0
|
|
5
|
-
DEBUG = 10
|
|
6
|
-
INFO = 20
|
|
7
|
-
WARNING = 30
|
|
8
|
-
ERROR = 40
|
|
9
|
-
CRITICAL = 50
|
|
10
|
-
|
|
11
|
-
@staticmethod
|
|
12
|
-
def get_level(level_str: str) -> 'LogLevel':
|
|
13
|
-
if not level_str:
|
|
14
|
-
return LogLevel.NOTSET
|
|
15
|
-
try:
|
|
16
|
-
return LogLevel[level_str.upper()]
|
|
17
|
-
except KeyError:
|
|
18
|
-
raise ValueError(f"Invalid log level: {level_str}")
|
|
19
|
-
|
|
20
|
-
def __lt__(self, other):
|
|
21
|
-
if isinstance(other, LogLevel):
|
|
22
|
-
return self.value < other.value
|
|
23
|
-
return NotImplemented
|