webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -1,119 +0,0 @@
|
|
|
1
|
-
import cloudscraper
|
|
2
|
-
|
|
3
|
-
def main():
|
|
4
|
-
print("Testing cloudscraper access to LMArena...")
|
|
5
|
-
try:
|
|
6
|
-
scraper = cloudscraper.create_scraper(browser={
|
|
7
|
-
'browser': 'chrome',
|
|
8
|
-
'platform': 'windows',
|
|
9
|
-
'desktop': True
|
|
10
|
-
})
|
|
11
|
-
|
|
12
|
-
# Test basic access
|
|
13
|
-
response = scraper.get("https://lmarena.ai")
|
|
14
|
-
print(f"Status code: {response.status_code}")
|
|
15
|
-
print(f"Response length: {len(response.text)}")
|
|
16
|
-
print("Cloudscraper test successful!")
|
|
17
|
-
|
|
18
|
-
# Generate a session hash
|
|
19
|
-
import uuid
|
|
20
|
-
session_hash = str(uuid.uuid4()).replace("-", "")
|
|
21
|
-
print(f"Session hash: {session_hash}")
|
|
22
|
-
|
|
23
|
-
# Create payloads
|
|
24
|
-
model_id = "gpt-4o"
|
|
25
|
-
prompt = "Hello, what is your name?"
|
|
26
|
-
|
|
27
|
-
first_payload = {
|
|
28
|
-
"data": [
|
|
29
|
-
None,
|
|
30
|
-
model_id,
|
|
31
|
-
{"text": prompt, "files": []},
|
|
32
|
-
{
|
|
33
|
-
"text_models": [model_id],
|
|
34
|
-
"all_text_models": [model_id],
|
|
35
|
-
"vision_models": [],
|
|
36
|
-
"all_vision_models": [],
|
|
37
|
-
"image_gen_models": [],
|
|
38
|
-
"all_image_gen_models": [],
|
|
39
|
-
"search_models": [],
|
|
40
|
-
"all_search_models": [],
|
|
41
|
-
"models": [model_id],
|
|
42
|
-
"all_models": [model_id],
|
|
43
|
-
"arena_type": "text-arena"
|
|
44
|
-
}
|
|
45
|
-
],
|
|
46
|
-
"event_data": None,
|
|
47
|
-
"fn_index": 117,
|
|
48
|
-
"trigger_id": 159,
|
|
49
|
-
"session_hash": session_hash
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
second_payload = {
|
|
53
|
-
"data": [],
|
|
54
|
-
"event_data": None,
|
|
55
|
-
"fn_index": 118,
|
|
56
|
-
"trigger_id": 159,
|
|
57
|
-
"session_hash": session_hash
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
third_payload = {
|
|
61
|
-
"data": [None, 0.7, 1, 2048],
|
|
62
|
-
"event_data": None,
|
|
63
|
-
"fn_index": 119,
|
|
64
|
-
"trigger_id": 159,
|
|
65
|
-
"session_hash": session_hash
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
# Set up headers
|
|
69
|
-
headers = {
|
|
70
|
-
"Content-Type": "application/json",
|
|
71
|
-
"Accept": "application/json",
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
# Make requests
|
|
75
|
-
print("Sending first request...")
|
|
76
|
-
response = scraper.post(
|
|
77
|
-
"https://lmarena.ai/queue/join?",
|
|
78
|
-
json=first_payload,
|
|
79
|
-
headers=headers
|
|
80
|
-
)
|
|
81
|
-
print(f"First response status: {response.status_code}")
|
|
82
|
-
|
|
83
|
-
print("Sending second request...")
|
|
84
|
-
response = scraper.post(
|
|
85
|
-
"https://lmarena.ai/queue/join?",
|
|
86
|
-
json=second_payload,
|
|
87
|
-
headers=headers
|
|
88
|
-
)
|
|
89
|
-
print(f"Second response status: {response.status_code}")
|
|
90
|
-
|
|
91
|
-
print("Sending third request...")
|
|
92
|
-
response = scraper.post(
|
|
93
|
-
"https://lmarena.ai/queue/join?",
|
|
94
|
-
json=third_payload,
|
|
95
|
-
headers=headers
|
|
96
|
-
)
|
|
97
|
-
print(f"Third response status: {response.status_code}")
|
|
98
|
-
|
|
99
|
-
# Stream the response
|
|
100
|
-
stream_url = f"https://lmarena.ai/queue/data?session_hash={session_hash}"
|
|
101
|
-
print(f"Streaming from: {stream_url}")
|
|
102
|
-
|
|
103
|
-
with scraper.get(stream_url, headers={"Accept": "text/event-stream"}, stream=True) as response:
|
|
104
|
-
print(f"Stream response status: {response.status_code}")
|
|
105
|
-
text_position = 0
|
|
106
|
-
response_text = ""
|
|
107
|
-
|
|
108
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
109
|
-
if line:
|
|
110
|
-
print(line)
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
except Exception as e:
|
|
114
|
-
print(f"Error: {e}")
|
|
115
|
-
import traceback
|
|
116
|
-
traceback.print_exc()
|
|
117
|
-
|
|
118
|
-
if __name__ == "__main__":
|
|
119
|
-
main()
|
webscout/Provider/Venice.py
DELETED
|
@@ -1,258 +0,0 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session # Import Session
|
|
3
|
-
import json
|
|
4
|
-
from typing import Generator, Dict, Any, List, Optional, Union
|
|
5
|
-
from uuid import uuid4
|
|
6
|
-
import random
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class Venice(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the Venice AI API.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
AVAILABLE_MODELS = [
|
|
21
|
-
"mistral-31-24b",
|
|
22
|
-
"dolphin-3.0-mistral-24b",
|
|
23
|
-
"llama-3.2-3b-akash",
|
|
24
|
-
"qwen2dot5-coder-32b",
|
|
25
|
-
"deepseek-coder-v2-lite",
|
|
26
|
-
|
|
27
|
-
]
|
|
28
|
-
|
|
29
|
-
def __init__(
|
|
30
|
-
self,
|
|
31
|
-
is_conversation: bool = True,
|
|
32
|
-
max_tokens: int = 2000,
|
|
33
|
-
timeout: int = 30,
|
|
34
|
-
temperature: float = 0.8, # Keep temperature, user might want to adjust
|
|
35
|
-
top_p: float = 0.9, # Keep top_p
|
|
36
|
-
intro: str = None,
|
|
37
|
-
filepath: str = None,
|
|
38
|
-
update_file: bool = True,
|
|
39
|
-
proxies: dict = {},
|
|
40
|
-
history_offset: int = 10250,
|
|
41
|
-
act: str = None,
|
|
42
|
-
model: str = "mistral-31-24b",
|
|
43
|
-
# System prompt is empty in the example, but keep it configurable
|
|
44
|
-
system_prompt: str = ""
|
|
45
|
-
):
|
|
46
|
-
"""Initialize Venice AI client"""
|
|
47
|
-
if model not in self.AVAILABLE_MODELS:
|
|
48
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
49
|
-
|
|
50
|
-
# Update API endpoint
|
|
51
|
-
self.api_endpoint = "https://outerface.venice.ai/api/inference/chat"
|
|
52
|
-
# Initialize curl_cffi Session
|
|
53
|
-
self.session = Session()
|
|
54
|
-
self.is_conversation = is_conversation
|
|
55
|
-
self.max_tokens_to_sample = max_tokens
|
|
56
|
-
self.temperature = temperature
|
|
57
|
-
self.top_p = top_p
|
|
58
|
-
self.timeout = timeout
|
|
59
|
-
self.model = model
|
|
60
|
-
self.system_prompt = system_prompt
|
|
61
|
-
self.last_response = {}
|
|
62
|
-
|
|
63
|
-
# Update Headers based on successful request
|
|
64
|
-
self.headers = {
|
|
65
|
-
"User-Agent": LitAgent().random(), # Keep using LitAgent
|
|
66
|
-
"accept": "*/*",
|
|
67
|
-
"accept-language": "en-US,en;q=0.9", # Keep existing
|
|
68
|
-
"content-type": "application/json",
|
|
69
|
-
"origin": "https://venice.ai",
|
|
70
|
-
"referer": "https://venice.ai/", # Update referer
|
|
71
|
-
# Update sec-ch-ua to match example
|
|
72
|
-
"sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
73
|
-
"sec-ch-ua-mobile": "?0",
|
|
74
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
75
|
-
"sec-fetch-dest": "empty",
|
|
76
|
-
"sec-fetch-mode": "cors",
|
|
77
|
-
# Update sec-fetch-site to match example
|
|
78
|
-
"sec-fetch-site": "same-site",
|
|
79
|
-
# Add missing headers from example
|
|
80
|
-
"priority": "u=1, i",
|
|
81
|
-
"sec-gpc": "1",
|
|
82
|
-
"x-venice-version": "interface@20250424.065523+50bac27" # Add version header
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
# Update curl_cffi session headers and proxies
|
|
86
|
-
self.session.headers.update(self.headers)
|
|
87
|
-
self.session.proxies.update(proxies)
|
|
88
|
-
|
|
89
|
-
self.__available_optimizers = (
|
|
90
|
-
method
|
|
91
|
-
for method in dir(Optimizers)
|
|
92
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
93
|
-
)
|
|
94
|
-
Conversation.intro = (
|
|
95
|
-
AwesomePrompts().get_act(
|
|
96
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
97
|
-
)
|
|
98
|
-
if act
|
|
99
|
-
else intro or Conversation.intro
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
self.conversation = Conversation(
|
|
103
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
104
|
-
)
|
|
105
|
-
self.conversation.history_offset = history_offset
|
|
106
|
-
|
|
107
|
-
@staticmethod
|
|
108
|
-
def _venice_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
109
|
-
"""Extracts content from Venice stream JSON objects."""
|
|
110
|
-
if isinstance(chunk, dict) and chunk.get("kind") == "content":
|
|
111
|
-
return chunk.get("content")
|
|
112
|
-
return None
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
def ask(
|
|
116
|
-
self,
|
|
117
|
-
prompt: str,
|
|
118
|
-
stream: bool = False,
|
|
119
|
-
raw: bool = False,
|
|
120
|
-
optimizer: str = None,
|
|
121
|
-
conversationally: bool = False,
|
|
122
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
123
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
124
|
-
if optimizer:
|
|
125
|
-
if optimizer in self.__available_optimizers:
|
|
126
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
127
|
-
conversation_prompt if conversationally else prompt
|
|
128
|
-
)
|
|
129
|
-
else:
|
|
130
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
131
|
-
|
|
132
|
-
# Update Payload construction based on successful request
|
|
133
|
-
payload = {
|
|
134
|
-
"requestId": str(uuid4())[:7], # Keep generating request ID
|
|
135
|
-
"modelId": self.model,
|
|
136
|
-
"prompt": [{"content": conversation_prompt, "role": "user"}],
|
|
137
|
-
"systemPrompt": self.system_prompt, # Use configured system prompt
|
|
138
|
-
"conversationType": "text",
|
|
139
|
-
"temperature": self.temperature, # Use configured temperature
|
|
140
|
-
"webEnabled": True, # Keep webEnabled
|
|
141
|
-
"topP": self.top_p, # Use configured topP
|
|
142
|
-
"includeVeniceSystemPrompt": True, # Set to True as per example
|
|
143
|
-
"isCharacter": False, # Keep as False
|
|
144
|
-
# Add missing fields from example payload
|
|
145
|
-
"userId": "user_anon_" + str(random.randint(1000000000, 9999999999)), # Generate anon user ID
|
|
146
|
-
"isDefault": True,
|
|
147
|
-
"textToSpeech": {"voiceId": "af_sky", "speed": 1},
|
|
148
|
-
"clientProcessingTime": random.randint(10, 50) # Randomize slightly
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
def for_stream():
|
|
152
|
-
try:
|
|
153
|
-
# Use curl_cffi session post
|
|
154
|
-
response = self.session.post(
|
|
155
|
-
self.api_endpoint,
|
|
156
|
-
json=payload,
|
|
157
|
-
stream=True,
|
|
158
|
-
timeout=self.timeout,
|
|
159
|
-
impersonate="edge101" # Match impersonation closer to headers
|
|
160
|
-
)
|
|
161
|
-
# Check response status after the call
|
|
162
|
-
if response.status_code != 200:
|
|
163
|
-
# Include response text in error
|
|
164
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
165
|
-
f"Request failed with status code {response.status_code} - {response.text}"
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
streaming_text = ""
|
|
169
|
-
# Use sanitize_stream with the custom extractor
|
|
170
|
-
processed_stream = sanitize_stream(
|
|
171
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
172
|
-
intro_value=None, # No simple prefix
|
|
173
|
-
to_json=True, # Each line is JSON
|
|
174
|
-
content_extractor=self._venice_extractor, # Use the specific extractor
|
|
175
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
176
|
-
)
|
|
177
|
-
|
|
178
|
-
for content_chunk in processed_stream:
|
|
179
|
-
# content_chunk is the string extracted by _venice_extractor
|
|
180
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
181
|
-
streaming_text += content_chunk
|
|
182
|
-
yield content_chunk if raw else dict(text=content_chunk)
|
|
183
|
-
|
|
184
|
-
# Update history and last response after stream finishes
|
|
185
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
186
|
-
self.last_response = {"text": streaming_text}
|
|
187
|
-
|
|
188
|
-
except CurlError as e:
|
|
189
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
190
|
-
# Catch requests.exceptions.RequestException if needed, but CurlError is primary for curl_cffi
|
|
191
|
-
except Exception as e:
|
|
192
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
193
|
-
|
|
194
|
-
def for_non_stream():
|
|
195
|
-
full_text = ""
|
|
196
|
-
# Iterate through the generator provided by for_stream
|
|
197
|
-
for chunk_data in for_stream():
|
|
198
|
-
# Check if chunk_data is a dict (not raw) and has 'text'
|
|
199
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
200
|
-
full_text += chunk_data["text"]
|
|
201
|
-
# If raw=True, chunk_data is the string content itself
|
|
202
|
-
elif isinstance(chunk_data, str):
|
|
203
|
-
full_text += chunk_data
|
|
204
|
-
# Update last_response after aggregation
|
|
205
|
-
self.last_response = {"text": full_text}
|
|
206
|
-
return self.last_response
|
|
207
|
-
|
|
208
|
-
return for_stream() if stream else for_non_stream()
|
|
209
|
-
|
|
210
|
-
def chat(
|
|
211
|
-
self,
|
|
212
|
-
prompt: str,
|
|
213
|
-
stream: bool = False,
|
|
214
|
-
optimizer: str = None,
|
|
215
|
-
conversationally: bool = False,
|
|
216
|
-
) -> Union[str, Generator]:
|
|
217
|
-
def for_stream():
|
|
218
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
219
|
-
yield self.get_message(response)
|
|
220
|
-
def for_non_stream():
|
|
221
|
-
return self.get_message(
|
|
222
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
223
|
-
)
|
|
224
|
-
return for_stream() if stream else for_non_stream()
|
|
225
|
-
|
|
226
|
-
def get_message(self, response: dict) -> str:
|
|
227
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
228
|
-
return response["text"]
|
|
229
|
-
|
|
230
|
-
if __name__ == "__main__":
|
|
231
|
-
# Ensure curl_cffi is installed
|
|
232
|
-
print("-" * 80)
|
|
233
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
234
|
-
print("-" * 80)
|
|
235
|
-
|
|
236
|
-
# Test all available models
|
|
237
|
-
working = 0
|
|
238
|
-
total = len(Venice.AVAILABLE_MODELS)
|
|
239
|
-
|
|
240
|
-
for model in Venice.AVAILABLE_MODELS:
|
|
241
|
-
try:
|
|
242
|
-
test_ai = Venice(model=model, timeout=60)
|
|
243
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
244
|
-
response_text = ""
|
|
245
|
-
for chunk in response:
|
|
246
|
-
response_text += chunk
|
|
247
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
248
|
-
|
|
249
|
-
if response_text and len(response_text.strip()) > 0:
|
|
250
|
-
status = "✓"
|
|
251
|
-
# Truncate response if too long
|
|
252
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
253
|
-
else:
|
|
254
|
-
status = "✗"
|
|
255
|
-
display_text = "Empty or invalid response"
|
|
256
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
257
|
-
except Exception as e:
|
|
258
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/VercelAI.py
DELETED
|
@@ -1,253 +0,0 @@
|
|
|
1
|
-
import re
|
|
2
|
-
import time
|
|
3
|
-
from curl_cffi import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import Union, Any, Dict, Generator, Optional
|
|
6
|
-
import uuid
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class VercelAI(Provider):
|
|
17
|
-
"""
|
|
18
|
-
A class to interact with the Vercel AI API.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
AVAILABLE_MODELS = [
|
|
22
|
-
"chat-model",
|
|
23
|
-
"chat-model-reasoning"
|
|
24
|
-
]
|
|
25
|
-
|
|
26
|
-
def __init__(
|
|
27
|
-
self,
|
|
28
|
-
is_conversation: bool = True,
|
|
29
|
-
max_tokens: int = 600,
|
|
30
|
-
timeout: int = 30,
|
|
31
|
-
intro: str = None,
|
|
32
|
-
filepath: str = None,
|
|
33
|
-
update_file: bool = True,
|
|
34
|
-
proxies: dict = {},
|
|
35
|
-
history_offset: int = 10250,
|
|
36
|
-
act: str = None,
|
|
37
|
-
model: str = "chat-model",
|
|
38
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
39
|
-
):
|
|
40
|
-
"""Initializes the Vercel AI API client."""
|
|
41
|
-
|
|
42
|
-
if model not in self.AVAILABLE_MODELS:
|
|
43
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
44
|
-
|
|
45
|
-
self.session = requests.Session()
|
|
46
|
-
self.is_conversation = is_conversation
|
|
47
|
-
self.max_tokens_to_sample = max_tokens
|
|
48
|
-
self.api_endpoint = "https://chat.vercel.ai/api/chat"
|
|
49
|
-
self.stream_chunk_size = 64
|
|
50
|
-
self.timeout = timeout
|
|
51
|
-
self.last_response = {}
|
|
52
|
-
self.model = model
|
|
53
|
-
self.system_prompt = system_prompt
|
|
54
|
-
self.litagent = LitAgent()
|
|
55
|
-
self.headers = self.litagent.generate_fingerprint()
|
|
56
|
-
self.session.headers.update(self.headers)
|
|
57
|
-
self.session.proxies = proxies
|
|
58
|
-
|
|
59
|
-
# Add Vercel AI specific headers
|
|
60
|
-
self.session.headers.update({
|
|
61
|
-
"authority": "chat.vercel.ai",
|
|
62
|
-
"accept": "*/*",
|
|
63
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
64
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
65
|
-
"content-type": "application/json",
|
|
66
|
-
"dnt": "1",
|
|
67
|
-
"origin": "https://chat.vercel.ai",
|
|
68
|
-
"priority": "u=1, i",
|
|
69
|
-
"referer": "https://chat.vercel.ai/",
|
|
70
|
-
"sec-ch-ua-mobile": "?0",
|
|
71
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
72
|
-
"sec-fetch-dest": "empty",
|
|
73
|
-
"sec-fetch-mode": "cors",
|
|
74
|
-
"sec-fetch-site": "same-origin",
|
|
75
|
-
"sec-gpc": "1",
|
|
76
|
-
"x-kpsdk-c": "1-Cl4OUDwFNA",
|
|
77
|
-
"x-kpsdk-cd": json.dumps({
|
|
78
|
-
"workTime": int(time.time() * 1000),
|
|
79
|
-
"id": str(uuid.uuid4()),
|
|
80
|
-
"answers": [5, 5],
|
|
81
|
-
"duration": 26.9,
|
|
82
|
-
"d": 1981,
|
|
83
|
-
"st": int(time.time() * 1000) - 1000,
|
|
84
|
-
"rst": int(time.time() * 1000) - 500
|
|
85
|
-
}),
|
|
86
|
-
"x-kpsdk-ct": str(uuid.uuid4()),
|
|
87
|
-
"x-kpsdk-r": "1-B1NfB2A",
|
|
88
|
-
"x-kpsdk-v": "j-1.0.0"
|
|
89
|
-
})
|
|
90
|
-
|
|
91
|
-
# Add cookies
|
|
92
|
-
self.session.cookies.update({
|
|
93
|
-
"KP_UIDz": str(uuid.uuid4()),
|
|
94
|
-
"KP_UIDz-ssn": str(uuid.uuid4())
|
|
95
|
-
})
|
|
96
|
-
|
|
97
|
-
self.__available_optimizers = (
|
|
98
|
-
method
|
|
99
|
-
for method in dir(Optimizers)
|
|
100
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
101
|
-
)
|
|
102
|
-
Conversation.intro = (
|
|
103
|
-
AwesomePrompts().get_act(
|
|
104
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
105
|
-
)
|
|
106
|
-
if act
|
|
107
|
-
else intro or Conversation.intro
|
|
108
|
-
)
|
|
109
|
-
self.conversation = Conversation(
|
|
110
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
111
|
-
)
|
|
112
|
-
self.conversation.history_offset = history_offset
|
|
113
|
-
|
|
114
|
-
@staticmethod
|
|
115
|
-
def _vercelai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
116
|
-
"""Extracts content from the VercelAI stream format '0:"..."'."""
|
|
117
|
-
if isinstance(chunk, str):
|
|
118
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
119
|
-
if match:
|
|
120
|
-
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
121
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
122
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
123
|
-
return None
|
|
124
|
-
|
|
125
|
-
def ask(
|
|
126
|
-
self,
|
|
127
|
-
prompt: str,
|
|
128
|
-
stream: bool = False,
|
|
129
|
-
raw: bool = False,
|
|
130
|
-
optimizer: str = None,
|
|
131
|
-
conversationally: bool = False,
|
|
132
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
133
|
-
"""Chat with AI"""
|
|
134
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
135
|
-
if optimizer:
|
|
136
|
-
if optimizer in self.__available_optimizers:
|
|
137
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
138
|
-
conversation_prompt if conversationally else prompt
|
|
139
|
-
)
|
|
140
|
-
else:
|
|
141
|
-
raise Exception(
|
|
142
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
payload = {
|
|
146
|
-
"id": "guest",
|
|
147
|
-
"messages": [
|
|
148
|
-
{
|
|
149
|
-
"id": str(uuid.uuid4()),
|
|
150
|
-
"createdAt": "2025-03-29T09:13:16.992Z",
|
|
151
|
-
"role": "user",
|
|
152
|
-
"content": conversation_prompt,
|
|
153
|
-
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
154
|
-
}
|
|
155
|
-
],
|
|
156
|
-
"selectedChatModelId": self.model
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
def for_stream():
|
|
160
|
-
response = self.session.post(
|
|
161
|
-
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
162
|
-
)
|
|
163
|
-
if not response.ok:
|
|
164
|
-
error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
165
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
166
|
-
|
|
167
|
-
streaming_text = ""
|
|
168
|
-
# Use sanitize_stream with the custom extractor
|
|
169
|
-
processed_stream = sanitize_stream(
|
|
170
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
171
|
-
intro_value=None, # No simple prefix
|
|
172
|
-
to_json=False, # Content is not JSON
|
|
173
|
-
content_extractor=self._vercelai_extractor # Use the specific extractor
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
for content_chunk in processed_stream:
|
|
177
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
178
|
-
streaming_text += content_chunk
|
|
179
|
-
yield content_chunk if raw else dict(text=content_chunk)
|
|
180
|
-
|
|
181
|
-
self.last_response.update(dict(text=streaming_text))
|
|
182
|
-
self.conversation.update_chat_history(
|
|
183
|
-
prompt, self.get_message(self.last_response)
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
def for_non_stream():
|
|
187
|
-
for _ in for_stream():
|
|
188
|
-
pass
|
|
189
|
-
return self.last_response
|
|
190
|
-
|
|
191
|
-
return for_stream() if stream else for_non_stream()
|
|
192
|
-
|
|
193
|
-
def chat(
|
|
194
|
-
self,
|
|
195
|
-
prompt: str,
|
|
196
|
-
stream: bool = False,
|
|
197
|
-
optimizer: str = None,
|
|
198
|
-
conversationally: bool = False,
|
|
199
|
-
) -> str:
|
|
200
|
-
"""Generate response `str`"""
|
|
201
|
-
def for_stream():
|
|
202
|
-
for response in self.ask(
|
|
203
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
204
|
-
):
|
|
205
|
-
yield self.get_message(response)
|
|
206
|
-
|
|
207
|
-
def for_non_stream():
|
|
208
|
-
return self.get_message(
|
|
209
|
-
self.ask(
|
|
210
|
-
prompt,
|
|
211
|
-
False,
|
|
212
|
-
optimizer=optimizer,
|
|
213
|
-
conversationally=conversationally,
|
|
214
|
-
)
|
|
215
|
-
)
|
|
216
|
-
|
|
217
|
-
return for_stream() if stream else for_non_stream()
|
|
218
|
-
|
|
219
|
-
def get_message(self, response: dict) -> str:
|
|
220
|
-
"""Retrieves message only from response"""
|
|
221
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
222
|
-
# Formatting is handled by the extractor now
|
|
223
|
-
text = response.get("text", "")
|
|
224
|
-
return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement if needed
|
|
225
|
-
|
|
226
|
-
if __name__ == "__main__":
|
|
227
|
-
print("-" * 80)
|
|
228
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
229
|
-
print("-" * 80)
|
|
230
|
-
|
|
231
|
-
# Test all available models
|
|
232
|
-
working = 0
|
|
233
|
-
total = len(VercelAI.AVAILABLE_MODELS)
|
|
234
|
-
|
|
235
|
-
for model in VercelAI.AVAILABLE_MODELS:
|
|
236
|
-
try:
|
|
237
|
-
test_ai = VercelAI(model=model, timeout=60)
|
|
238
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
239
|
-
response_text = ""
|
|
240
|
-
for chunk in response:
|
|
241
|
-
response_text += chunk
|
|
242
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
243
|
-
|
|
244
|
-
if response_text and len(response_text.strip()) > 0:
|
|
245
|
-
status = "✓"
|
|
246
|
-
# Truncate response if too long
|
|
247
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
248
|
-
else:
|
|
249
|
-
status = "✗"
|
|
250
|
-
display_text = "Empty or invalid response"
|
|
251
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
252
|
-
except Exception as e:
|
|
253
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|