webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
webscout/Provider/AllenAI.py
DELETED
|
@@ -1,440 +0,0 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import os
|
|
5
|
-
from uuid import uuid4
|
|
6
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class AllenAI(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the AllenAI (Ai2 Playground) API.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
AVAILABLE_MODELS = [
|
|
21
|
-
'olmo-2-0325-32b-instruct',
|
|
22
|
-
'tulu3-405b'
|
|
23
|
-
]
|
|
24
|
-
|
|
25
|
-
# Default model options from JS implementation
|
|
26
|
-
DEFAULT_OPTIONS = {
|
|
27
|
-
"max_tokens": 2048,
|
|
28
|
-
"temperature": 0.7,
|
|
29
|
-
"top_p": 1,
|
|
30
|
-
"n": 1,
|
|
31
|
-
"stop": None,
|
|
32
|
-
"logprobs": None
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
# Host mapping for models - some models work best with specific hosts
|
|
36
|
-
MODEL_HOST_MAP = {
|
|
37
|
-
'olmo-2-0325-32b-instruct': 'modal',
|
|
38
|
-
'tulu3-405b': 'inferd'
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
def __init__(
|
|
42
|
-
self,
|
|
43
|
-
is_conversation: bool = True,
|
|
44
|
-
max_tokens: int = 2048,
|
|
45
|
-
timeout: int = 30,
|
|
46
|
-
intro: str = None,
|
|
47
|
-
filepath: str = None,
|
|
48
|
-
update_file: bool = True,
|
|
49
|
-
proxies: dict = {},
|
|
50
|
-
history_offset: int = 10250,
|
|
51
|
-
act: str = None,
|
|
52
|
-
model: str = "OLMo-2-1124-13B-Instruct",
|
|
53
|
-
host: str = None
|
|
54
|
-
):
|
|
55
|
-
"""Initializes the AllenAI API client."""
|
|
56
|
-
if model not in self.AVAILABLE_MODELS:
|
|
57
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
58
|
-
|
|
59
|
-
self.url = "https://playground.allenai.org"
|
|
60
|
-
self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
|
|
61
|
-
self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
|
|
62
|
-
|
|
63
|
-
# Updated headers (remove those handled by impersonate)
|
|
64
|
-
self.headers = {
|
|
65
|
-
'Accept': '*/*',
|
|
66
|
-
'Accept-Language': 'id-ID,id;q=0.9',
|
|
67
|
-
'Origin': self.url,
|
|
68
|
-
'Referer': f"{self.url}/",
|
|
69
|
-
'Cache-Control': 'no-cache',
|
|
70
|
-
'Pragma': 'no-cache',
|
|
71
|
-
'Priority': 'u=1, i',
|
|
72
|
-
'Sec-Fetch-Dest': 'empty',
|
|
73
|
-
'Sec-Fetch-Mode': 'cors',
|
|
74
|
-
'Sec-Fetch-Site': 'cross-site',
|
|
75
|
-
'Content-Type': 'application/json'
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
# Initialize curl_cffi Session
|
|
79
|
-
self.session = Session()
|
|
80
|
-
# Update curl_cffi session headers and proxies
|
|
81
|
-
self.session.headers.update(self.headers)
|
|
82
|
-
self.session.proxies = proxies
|
|
83
|
-
|
|
84
|
-
self.model = model
|
|
85
|
-
|
|
86
|
-
# Auto-detect host if not provided
|
|
87
|
-
if not host:
|
|
88
|
-
# Use the preferred host from the model-host map, or default to modal
|
|
89
|
-
self.host = self.MODEL_HOST_MAP.get(model, 'modal')
|
|
90
|
-
else:
|
|
91
|
-
self.host = host
|
|
92
|
-
|
|
93
|
-
self.is_conversation = is_conversation
|
|
94
|
-
self.max_tokens_to_sample = max_tokens
|
|
95
|
-
self.timeout = timeout
|
|
96
|
-
self.last_response = {}
|
|
97
|
-
# Generate user ID if needed
|
|
98
|
-
self.x_anonymous_user_id = None
|
|
99
|
-
self.parent = None
|
|
100
|
-
|
|
101
|
-
# Default options
|
|
102
|
-
self.options = self.DEFAULT_OPTIONS.copy()
|
|
103
|
-
self.options["max_tokens"] = max_tokens
|
|
104
|
-
|
|
105
|
-
self.__available_optimizers = (
|
|
106
|
-
method
|
|
107
|
-
for method in dir(Optimizers)
|
|
108
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
109
|
-
)
|
|
110
|
-
Conversation.intro = (
|
|
111
|
-
AwesomePrompts().get_act(
|
|
112
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
113
|
-
)
|
|
114
|
-
if act
|
|
115
|
-
else intro or Conversation.intro
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
self.conversation = Conversation(
|
|
119
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
120
|
-
)
|
|
121
|
-
self.conversation.history_offset = history_offset
|
|
122
|
-
|
|
123
|
-
def whoami(self):
|
|
124
|
-
"""Gets or creates a user ID for authentication with Allen AI API"""
|
|
125
|
-
temp_id = str(uuid4())
|
|
126
|
-
request_headers = self.session.headers.copy() # Use session headers as base
|
|
127
|
-
request_headers.update({"x-anonymous-user-id": temp_id})
|
|
128
|
-
|
|
129
|
-
try:
|
|
130
|
-
# Use curl_cffi session get with impersonate
|
|
131
|
-
response = self.session.get(
|
|
132
|
-
self.whoami_endpoint,
|
|
133
|
-
headers=request_headers, # Pass updated headers
|
|
134
|
-
timeout=self.timeout,
|
|
135
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
136
|
-
)
|
|
137
|
-
response.raise_for_status() # Check for HTTP errors
|
|
138
|
-
|
|
139
|
-
data = response.json()
|
|
140
|
-
self.x_anonymous_user_id = data.get("client", temp_id)
|
|
141
|
-
return data
|
|
142
|
-
|
|
143
|
-
except CurlError as e: # Catch CurlError
|
|
144
|
-
self.x_anonymous_user_id = temp_id
|
|
145
|
-
return {"client": temp_id, "error": f"CurlError: {e}"}
|
|
146
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
147
|
-
self.x_anonymous_user_id = temp_id
|
|
148
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
149
|
-
return {"client": temp_id, "error": f"{type(e).__name__}: {e} - {err_text}"}
|
|
150
|
-
|
|
151
|
-
@staticmethod
|
|
152
|
-
def _allenai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
153
|
-
"""Extracts content from AllenAI stream JSON objects."""
|
|
154
|
-
if isinstance(chunk, dict):
|
|
155
|
-
if chunk.get("message", "").startswith("msg_") and "content" in chunk:
|
|
156
|
-
return chunk.get("content")
|
|
157
|
-
elif "message" in chunk and chunk.get("content"): # Legacy handling
|
|
158
|
-
return chunk.get("content")
|
|
159
|
-
return None
|
|
160
|
-
|
|
161
|
-
def ask(
|
|
162
|
-
self,
|
|
163
|
-
prompt: str,
|
|
164
|
-
stream: bool = False, # API supports streaming
|
|
165
|
-
raw: bool = False,
|
|
166
|
-
optimizer: str = None,
|
|
167
|
-
conversationally: bool = False,
|
|
168
|
-
host: str = None,
|
|
169
|
-
private: bool = False,
|
|
170
|
-
top_p: float = None,
|
|
171
|
-
temperature: float = None,
|
|
172
|
-
options: dict = None,
|
|
173
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
174
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
175
|
-
if optimizer:
|
|
176
|
-
if optimizer in self.__available_optimizers:
|
|
177
|
-
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
178
|
-
else:
|
|
179
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
180
|
-
|
|
181
|
-
# Ensure we have a user ID
|
|
182
|
-
if not self.x_anonymous_user_id:
|
|
183
|
-
self.whoami()
|
|
184
|
-
# Check if whoami failed and we still don't have an ID
|
|
185
|
-
if not self.x_anonymous_user_id:
|
|
186
|
-
raise exceptions.AuthenticationError("Failed to obtain anonymous user ID.")
|
|
187
|
-
|
|
188
|
-
# Prepare the API request headers for this specific request
|
|
189
|
-
request_headers = self.session.headers.copy()
|
|
190
|
-
request_headers.update({
|
|
191
|
-
"x-anonymous-user-id": self.x_anonymous_user_id,
|
|
192
|
-
"Content-Type": "application/json" # Ensure Content-Type is set
|
|
193
|
-
})
|
|
194
|
-
|
|
195
|
-
# Create options dictionary
|
|
196
|
-
opts = self.options.copy()
|
|
197
|
-
if temperature is not None:
|
|
198
|
-
opts["temperature"] = temperature
|
|
199
|
-
if top_p is not None:
|
|
200
|
-
opts["top_p"] = top_p
|
|
201
|
-
if options:
|
|
202
|
-
opts.update(options)
|
|
203
|
-
|
|
204
|
-
# Use the host param or the default host
|
|
205
|
-
use_host = host or self.host
|
|
206
|
-
|
|
207
|
-
# List of hosts to try - start with provided host, then try alternative hosts
|
|
208
|
-
hosts_to_try = [use_host]
|
|
209
|
-
if use_host == 'modal':
|
|
210
|
-
hosts_to_try.append('inferd')
|
|
211
|
-
else:
|
|
212
|
-
hosts_to_try.append('modal')
|
|
213
|
-
|
|
214
|
-
last_error = None
|
|
215
|
-
|
|
216
|
-
# Try each host until one works
|
|
217
|
-
for current_host in hosts_to_try:
|
|
218
|
-
# Create the JSON payload as per the JS implementation
|
|
219
|
-
payload = {
|
|
220
|
-
"content": conversation_prompt,
|
|
221
|
-
"private": private,
|
|
222
|
-
"model": self.model,
|
|
223
|
-
"host": current_host,
|
|
224
|
-
"opts": opts
|
|
225
|
-
}
|
|
226
|
-
payload["host"] = current_host # Ensure host is updated in payload
|
|
227
|
-
|
|
228
|
-
try:
|
|
229
|
-
if stream:
|
|
230
|
-
# Pass request_headers to the stream method
|
|
231
|
-
return self._stream_request(payload, prompt, request_headers, raw)
|
|
232
|
-
else:
|
|
233
|
-
# Pass request_headers to the non-stream method
|
|
234
|
-
return self._non_stream_request(payload, prompt, request_headers, raw)
|
|
235
|
-
except (exceptions.FailedToGenerateResponseError, CurlError, Exception) as e:
|
|
236
|
-
last_error = e
|
|
237
|
-
# Log the error but continue to try other hosts
|
|
238
|
-
print(f"Host '{current_host}' failed for model '{self.model}' ({type(e).__name__}), trying next host...")
|
|
239
|
-
continue
|
|
240
|
-
|
|
241
|
-
# If we've tried all hosts and none worked, raise the last error
|
|
242
|
-
raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
|
|
243
|
-
|
|
244
|
-
def _stream_request(self, payload, prompt, request_headers, raw=False):
|
|
245
|
-
"""Handle streaming requests with the given payload and headers"""
|
|
246
|
-
streaming_text = "" # Initialize outside try block
|
|
247
|
-
current_parent = None # Initialize outside try block
|
|
248
|
-
try:
|
|
249
|
-
# Use curl_cffi session post with impersonate
|
|
250
|
-
response = self.session.post(
|
|
251
|
-
self.api_endpoint,
|
|
252
|
-
headers=request_headers, # Use headers passed to this method
|
|
253
|
-
json=payload,
|
|
254
|
-
stream=True,
|
|
255
|
-
timeout=self.timeout,
|
|
256
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
257
|
-
)
|
|
258
|
-
response.raise_for_status() # Check for HTTP errors
|
|
259
|
-
|
|
260
|
-
# Use sanitize_stream
|
|
261
|
-
processed_stream = sanitize_stream(
|
|
262
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
263
|
-
intro_value=None, # No prefix
|
|
264
|
-
to_json=True, # Stream sends JSON lines
|
|
265
|
-
content_extractor=self._allenai_extractor, # Use the specific extractor
|
|
266
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
267
|
-
)
|
|
268
|
-
|
|
269
|
-
for content_chunk in processed_stream:
|
|
270
|
-
# content_chunk is the string extracted by _allenai_extractor
|
|
271
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
272
|
-
streaming_text += content_chunk
|
|
273
|
-
resp = dict(text=content_chunk)
|
|
274
|
-
yield resp if not raw else content_chunk
|
|
275
|
-
|
|
276
|
-
# Try to extract parent ID from the *last* raw line (less reliable than before)
|
|
277
|
-
# This part is tricky as sanitize_stream consumes the raw lines.
|
|
278
|
-
# We might need to re-fetch or adjust if parent ID is critical per stream.
|
|
279
|
-
# For now, we'll rely on the non-stream request to update parent ID more reliably.
|
|
280
|
-
# Example placeholder logic (might not work reliably):
|
|
281
|
-
try:
|
|
282
|
-
last_line_data = json.loads(response.text.splitlines()[-1]) # Get last line if possible
|
|
283
|
-
if last_line_data.get("id"):
|
|
284
|
-
current_parent = last_line_data.get("id")
|
|
285
|
-
elif last_line_data.get("children"):
|
|
286
|
-
for child in last_line_data["children"]: # Use last_line_data here
|
|
287
|
-
if child.get("role") == "assistant":
|
|
288
|
-
current_parent = child.get("id")
|
|
289
|
-
break
|
|
290
|
-
|
|
291
|
-
# Handle completion
|
|
292
|
-
if last_line_data.get("final") or last_line_data.get("finish_reason") == "stop":
|
|
293
|
-
if current_parent:
|
|
294
|
-
self.parent = current_parent
|
|
295
|
-
|
|
296
|
-
# Update conversation history only if not empty
|
|
297
|
-
if streaming_text.strip():
|
|
298
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
299
|
-
self.last_response = {"text": streaming_text} # Update last response here
|
|
300
|
-
return # End the generator
|
|
301
|
-
except Exception as e:
|
|
302
|
-
# Log the error but continue with the rest of the function
|
|
303
|
-
print(f"Error processing response data: {str(e)}")
|
|
304
|
-
|
|
305
|
-
# If loop finishes without returning (e.g., no final message), update history
|
|
306
|
-
if current_parent:
|
|
307
|
-
self.parent = current_parent
|
|
308
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
309
|
-
self.last_response = {"text": streaming_text}
|
|
310
|
-
|
|
311
|
-
except CurlError as e: # Catch CurlError
|
|
312
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
313
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
314
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
315
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
def _non_stream_request(self, payload, prompt, request_headers, raw=False):
|
|
319
|
-
"""Handle non-streaming requests with the given payload and headers"""
|
|
320
|
-
try:
|
|
321
|
-
# Use curl_cffi session post with impersonate
|
|
322
|
-
response = self.session.post(
|
|
323
|
-
self.api_endpoint,
|
|
324
|
-
headers=request_headers, # Use headers passed to this method
|
|
325
|
-
json=payload,
|
|
326
|
-
stream=False, # Explicitly set stream to False
|
|
327
|
-
timeout=self.timeout,
|
|
328
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
329
|
-
)
|
|
330
|
-
response.raise_for_status() # Check for HTTP errors
|
|
331
|
-
|
|
332
|
-
raw_response = response.text # Get raw text
|
|
333
|
-
|
|
334
|
-
# Process the full text using sanitize_stream line by line
|
|
335
|
-
processed_stream = sanitize_stream(
|
|
336
|
-
data=raw_response.splitlines(), # Split into lines
|
|
337
|
-
intro_value=None,
|
|
338
|
-
to_json=True,
|
|
339
|
-
content_extractor=self._allenai_extractor,
|
|
340
|
-
yield_raw_on_error=False
|
|
341
|
-
)
|
|
342
|
-
# Aggregate the results
|
|
343
|
-
parsed_response = "".join(list(processed_stream))
|
|
344
|
-
|
|
345
|
-
# Update parent ID from the full response if possible (might need adjustment based on actual non-stream response structure)
|
|
346
|
-
# This part is speculative as the non-stream structure isn't fully clear from the stream logic
|
|
347
|
-
try:
|
|
348
|
-
lines = raw_response.splitlines()
|
|
349
|
-
if lines:
|
|
350
|
-
last_line_data = json.loads(lines[-1])
|
|
351
|
-
if last_line_data.get("id"):
|
|
352
|
-
self.parent = last_line_data.get("id")
|
|
353
|
-
elif last_line_data.get("children"):
|
|
354
|
-
for child in last_line_data["children"]:
|
|
355
|
-
if child.get("role") == "assistant":
|
|
356
|
-
self.parent = child.get("id")
|
|
357
|
-
break
|
|
358
|
-
except (json.JSONDecodeError, IndexError):
|
|
359
|
-
pass # Ignore errors parsing parent ID from non-stream
|
|
360
|
-
|
|
361
|
-
self.conversation.update_chat_history(prompt, parsed_response)
|
|
362
|
-
self.last_response = {"text": parsed_response}
|
|
363
|
-
return self.last_response if not raw else parsed_response # Return dict or raw string
|
|
364
|
-
|
|
365
|
-
except CurlError as e: # Catch CurlError
|
|
366
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
367
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
368
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
369
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
def chat(
|
|
373
|
-
self,
|
|
374
|
-
prompt: str,
|
|
375
|
-
stream: bool = False,
|
|
376
|
-
optimizer: str = None,
|
|
377
|
-
conversationally: bool = False,
|
|
378
|
-
host: str = None,
|
|
379
|
-
options: dict = None,
|
|
380
|
-
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
381
|
-
def for_stream_chat(): # Renamed inner function
|
|
382
|
-
# ask() yields dicts or strings when streaming
|
|
383
|
-
gen = self.ask(
|
|
384
|
-
prompt,
|
|
385
|
-
stream=True,
|
|
386
|
-
raw=False, # Ensure ask yields dicts
|
|
387
|
-
optimizer=optimizer,
|
|
388
|
-
conversationally=conversationally,
|
|
389
|
-
host=host,
|
|
390
|
-
options=options
|
|
391
|
-
)
|
|
392
|
-
for response_dict in gen:
|
|
393
|
-
yield self.get_message(response_dict) # get_message expects dict
|
|
394
|
-
|
|
395
|
-
def for_non_stream_chat(): # Renamed inner function
|
|
396
|
-
# ask() returns dict or str when not streaming
|
|
397
|
-
response_data = self.ask(
|
|
398
|
-
prompt,
|
|
399
|
-
stream=False,
|
|
400
|
-
raw=False, # Ensure ask returns dict
|
|
401
|
-
optimizer=optimizer,
|
|
402
|
-
conversationally=conversationally,
|
|
403
|
-
host=host,
|
|
404
|
-
options=options
|
|
405
|
-
)
|
|
406
|
-
return self.get_message(response_data) # get_message expects dict
|
|
407
|
-
|
|
408
|
-
return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
|
|
409
|
-
|
|
410
|
-
def get_message(self, response: dict) -> str:
|
|
411
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
412
|
-
return response["text"]
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
if __name__ == "__main__":
|
|
417
|
-
# Ensure curl_cffi is installed
|
|
418
|
-
print("-" * 80)
|
|
419
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
420
|
-
print("-" * 80)
|
|
421
|
-
|
|
422
|
-
for model in AllenAI.AVAILABLE_MODELS:
|
|
423
|
-
try:
|
|
424
|
-
# Auto-detect host
|
|
425
|
-
test_ai = AllenAI(model=model, timeout=60)
|
|
426
|
-
# Pass the host explicitly to display accurate error messages
|
|
427
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
428
|
-
response_text = response
|
|
429
|
-
|
|
430
|
-
if response_text and len(response_text.strip()) > 0:
|
|
431
|
-
status = "✓"
|
|
432
|
-
# Truncate response if too long
|
|
433
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
434
|
-
print(f"{model:<50} {status:<10} {display_text} (host: {test_ai.host})")
|
|
435
|
-
else:
|
|
436
|
-
status = "✗"
|
|
437
|
-
display_text = "Empty or invalid response"
|
|
438
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
439
|
-
except Exception as e:
|
|
440
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|