webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
"""
|
|
2
|
+
A class to interact with the Apriel Gradio chat API (servicenow-ai-apriel-chat.hf.space).
|
|
3
|
+
|
|
4
|
+
This provider integrates the Apriel chat model into the Webscout framework.
|
|
5
|
+
"""
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any, Dict, Generator, Optional, Union, cast
|
|
8
|
+
|
|
9
|
+
from curl_cffi import CurlError
|
|
10
|
+
from curl_cffi.requests import Session
|
|
11
|
+
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.AIbase import Provider, Response
|
|
14
|
+
from webscout.AIutel import AwesomePrompts, Conversation, Optimizers
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
from webscout.sanitize import sanitize_stream
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Apriel(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the Apriel Gradio chat API.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> from webscout.Provider.apriel import Apriel
|
|
28
|
+
>>> ai = Apriel()
|
|
29
|
+
>>> response = ai.chat("What's the weather today?")
|
|
30
|
+
>>> print(response)
|
|
31
|
+
'The weather today is sunny with a high of 75°F.'
|
|
32
|
+
"""
|
|
33
|
+
required_auth = False
|
|
34
|
+
AVAILABLE_MODELS = ["UNKNOWN"]
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 600,
|
|
40
|
+
timeout: int = 30,
|
|
41
|
+
intro: Optional[str] = None,
|
|
42
|
+
filepath: Optional[str] = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: Optional[str] = None,
|
|
47
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
48
|
+
model: str = "UNKNOWN"
|
|
49
|
+
):
|
|
50
|
+
"""
|
|
51
|
+
Initializes the Apriel API with given parameters.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
55
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
56
|
+
timeout (int): Timeout for API requests.
|
|
57
|
+
intro (str): Introduction message for the conversation.
|
|
58
|
+
filepath (str): Filepath for storing conversation history.
|
|
59
|
+
update_file (bool): Whether to update the conversation history file.
|
|
60
|
+
proxies (dict): Proxies for the API requests.
|
|
61
|
+
history_offset (int): Offset for conversation history.
|
|
62
|
+
act (str): Act for the conversation.
|
|
63
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
64
|
+
"""
|
|
65
|
+
self.session = Session()
|
|
66
|
+
self.is_conversation = is_conversation
|
|
67
|
+
self.max_tokens_to_sample = max_tokens
|
|
68
|
+
self.api_endpoint = "https://servicenow-ai-apriel-chat.hf.space"
|
|
69
|
+
self.timeout = timeout
|
|
70
|
+
self.last_response = {}
|
|
71
|
+
self.system_prompt = system_prompt
|
|
72
|
+
|
|
73
|
+
# Initialize LitAgent for user agent generation
|
|
74
|
+
self.agent = LitAgent()
|
|
75
|
+
|
|
76
|
+
self.headers = {
|
|
77
|
+
"Content-Type": "application/json",
|
|
78
|
+
"User-Agent": self.agent.random(),
|
|
79
|
+
"Accept": "text/event-stream",
|
|
80
|
+
"Cache-Control": "no-cache",
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
self.__available_optimizers = (
|
|
84
|
+
method
|
|
85
|
+
for method in dir(Optimizers)
|
|
86
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
|
+
)
|
|
88
|
+
self.session.headers.update(self.headers)
|
|
89
|
+
if proxies:
|
|
90
|
+
self.session.proxies.update(proxies)
|
|
91
|
+
|
|
92
|
+
self.conversation = Conversation(
|
|
93
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
94
|
+
)
|
|
95
|
+
self.conversation.history_offset = history_offset
|
|
96
|
+
|
|
97
|
+
if act:
|
|
98
|
+
self.conversation.intro = AwesomePrompts().get_act(cast(Union[str, int], act), default=self.conversation.intro, case_insensitive=True
|
|
99
|
+
) or self.conversation.intro
|
|
100
|
+
elif intro:
|
|
101
|
+
self.conversation.intro = intro
|
|
102
|
+
|
|
103
|
+
def _get_session_hash(self) -> str:
|
|
104
|
+
"""Generate or get a session hash for the Gradio API."""
|
|
105
|
+
try:
|
|
106
|
+
url = f"{self.api_endpoint}/gradio_api/heartbeat"
|
|
107
|
+
response = self.session.get(url, timeout=self.timeout)
|
|
108
|
+
response.raise_for_status()
|
|
109
|
+
return str(int(time.time()))
|
|
110
|
+
except Exception:
|
|
111
|
+
return str(int(time.time()))
|
|
112
|
+
|
|
113
|
+
def _join_queue(self, session_hash: str, message: str, fn_index: int = 1, trigger_id: int = 16) -> Optional[str]:
|
|
114
|
+
"""Send the user message to /gradio_api/queue/join and return event_id if available."""
|
|
115
|
+
url = f"{self.api_endpoint}/gradio_api/queue/join"
|
|
116
|
+
payload = {
|
|
117
|
+
"data": [[], {"text": message, "files": []}, None],
|
|
118
|
+
"event_data": None,
|
|
119
|
+
"fn_index": fn_index,
|
|
120
|
+
"trigger_id": trigger_id,
|
|
121
|
+
"session_hash": session_hash,
|
|
122
|
+
}
|
|
123
|
+
response = self.session.post(url, json=payload, timeout=self.timeout)
|
|
124
|
+
response.raise_for_status()
|
|
125
|
+
try:
|
|
126
|
+
return response.json().get("event_id")
|
|
127
|
+
except Exception:
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
def _run_predict(self, session_hash: str, fn_index: int = 3, trigger_id: int = 16) -> None:
|
|
131
|
+
"""Call /gradio_api/run/predict to start processing the queued request."""
|
|
132
|
+
url = f"{self.api_endpoint}/gradio_api/run/predict"
|
|
133
|
+
payload = {
|
|
134
|
+
"data": [],
|
|
135
|
+
"event_data": None,
|
|
136
|
+
"fn_index": fn_index,
|
|
137
|
+
"trigger_id": trigger_id,
|
|
138
|
+
"session_hash": session_hash
|
|
139
|
+
}
|
|
140
|
+
response = self.session.post(url, json=payload, timeout=self.timeout)
|
|
141
|
+
response.raise_for_status()
|
|
142
|
+
|
|
143
|
+
@staticmethod
|
|
144
|
+
def _apriel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
145
|
+
"""Extracts content from Apriel Gradio stream JSON objects."""
|
|
146
|
+
if isinstance(chunk, dict):
|
|
147
|
+
msg = chunk.get("msg")
|
|
148
|
+
if msg == "process_generating":
|
|
149
|
+
output = chunk.get("output", {})
|
|
150
|
+
data = output.get("data")
|
|
151
|
+
if data and isinstance(data, list) and len(data) > 0:
|
|
152
|
+
ops = data[0]
|
|
153
|
+
for op in ops:
|
|
154
|
+
if isinstance(op, list) and len(op) > 2 and op[0] == "append":
|
|
155
|
+
return op[2]
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
def ask(
|
|
159
|
+
self,
|
|
160
|
+
prompt: str,
|
|
161
|
+
stream: bool = False,
|
|
162
|
+
raw: bool = False,
|
|
163
|
+
optimizer: Optional[str] = None,
|
|
164
|
+
conversationally: bool = False,
|
|
165
|
+
**kwargs: Any,
|
|
166
|
+
) -> Response:
|
|
167
|
+
"""
|
|
168
|
+
Sends a prompt to the Apriel Gradio API and returns the response.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
prompt (str): The prompt to send to the API.
|
|
172
|
+
stream (bool): Whether to stream the response.
|
|
173
|
+
raw (bool): Whether to return the raw response.
|
|
174
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
175
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Dict[str, Any]: The API response.
|
|
179
|
+
"""
|
|
180
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
181
|
+
if optimizer:
|
|
182
|
+
if optimizer in self.__available_optimizers:
|
|
183
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
184
|
+
conversation_prompt if conversationally else prompt
|
|
185
|
+
)
|
|
186
|
+
else:
|
|
187
|
+
raise Exception(
|
|
188
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
session_hash = self._get_session_hash()
|
|
192
|
+
self._join_queue(session_hash, conversation_prompt)
|
|
193
|
+
self._run_predict(session_hash)
|
|
194
|
+
|
|
195
|
+
def for_stream():
|
|
196
|
+
streaming_text = ""
|
|
197
|
+
try:
|
|
198
|
+
url = f"{self.api_endpoint}/gradio_api/queue/data?session_hash={session_hash}"
|
|
199
|
+
response = self.session.get(
|
|
200
|
+
url,
|
|
201
|
+
stream=True,
|
|
202
|
+
timeout=self.timeout,
|
|
203
|
+
impersonate="chrome110"
|
|
204
|
+
)
|
|
205
|
+
if not response.ok:
|
|
206
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
207
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Use sanitize_stream
|
|
211
|
+
processed_stream = sanitize_stream(
|
|
212
|
+
data=response.iter_content(chunk_size=None),
|
|
213
|
+
intro_value="data:",
|
|
214
|
+
to_json=True,
|
|
215
|
+
content_extractor=self._apriel_extractor,
|
|
216
|
+
yield_raw_on_error=False,
|
|
217
|
+
raw=raw
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
for content_chunk in processed_stream:
|
|
221
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
222
|
+
if raw:
|
|
223
|
+
yield content_chunk
|
|
224
|
+
else:
|
|
225
|
+
streaming_text += content_chunk
|
|
226
|
+
resp = dict(text=content_chunk)
|
|
227
|
+
yield resp
|
|
228
|
+
|
|
229
|
+
except CurlError as e:
|
|
230
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
231
|
+
except Exception as e:
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
233
|
+
finally:
|
|
234
|
+
if streaming_text:
|
|
235
|
+
self.last_response = {"text": streaming_text}
|
|
236
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
237
|
+
|
|
238
|
+
def for_non_stream():
|
|
239
|
+
for _ in for_stream():
|
|
240
|
+
pass
|
|
241
|
+
return self.last_response if not raw else self.last_response.get("text", "")
|
|
242
|
+
|
|
243
|
+
return for_stream() if stream else for_non_stream()
|
|
244
|
+
|
|
245
|
+
def chat(
|
|
246
|
+
self,
|
|
247
|
+
prompt: str,
|
|
248
|
+
stream: bool = False,
|
|
249
|
+
optimizer: Optional[str] = None,
|
|
250
|
+
conversationally: bool = False,
|
|
251
|
+
**kwargs: Any,
|
|
252
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
253
|
+
"""
|
|
254
|
+
Generates a response from the Apriel API.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
prompt (str): The prompt to send to the API.
|
|
258
|
+
stream (bool): Whether to stream the response.
|
|
259
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
260
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
261
|
+
**kwargs: Additional parameters including raw.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
str: The API response.
|
|
265
|
+
"""
|
|
266
|
+
raw = kwargs.get("raw", False)
|
|
267
|
+
def for_stream():
|
|
268
|
+
for response in self.ask(
|
|
269
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
270
|
+
):
|
|
271
|
+
if raw:
|
|
272
|
+
yield response
|
|
273
|
+
else:
|
|
274
|
+
yield self.get_message(response)
|
|
275
|
+
|
|
276
|
+
def for_non_stream():
|
|
277
|
+
result = self.ask(
|
|
278
|
+
prompt,
|
|
279
|
+
False,
|
|
280
|
+
raw=raw,
|
|
281
|
+
optimizer=optimizer,
|
|
282
|
+
conversationally=conversationally,
|
|
283
|
+
)
|
|
284
|
+
if raw:
|
|
285
|
+
return cast(str, result)
|
|
286
|
+
else:
|
|
287
|
+
return self.get_message(result)
|
|
288
|
+
|
|
289
|
+
return for_stream() if stream else for_non_stream()
|
|
290
|
+
|
|
291
|
+
def get_message(self, response: Response) -> str:
|
|
292
|
+
"""
|
|
293
|
+
Extracts the message from the API response.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
response (Response): The API response.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
str: The message content.
|
|
300
|
+
"""
|
|
301
|
+
if not isinstance(response, dict):
|
|
302
|
+
return str(response)
|
|
303
|
+
return cast(Dict[str, Any], response).get("text", "")
|
|
304
|
+
|
|
305
|
+
if __name__ == "__main__":
|
|
306
|
+
from rich import print
|
|
307
|
+
ai = Apriel(timeout=60)
|
|
308
|
+
response = ai.chat("write a poem about AI", stream=True, raw=False)
|
|
309
|
+
if hasattr(response, "__iter__") and not isinstance(response, (str, bytes)):
|
|
310
|
+
for chunk in response:
|
|
311
|
+
print(chunk, end="", flush=True)
|
|
312
|
+
else:
|
|
313
|
+
print(response)
|
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import Any, Dict, Generator, Optional, Union, cast
|
|
4
|
+
|
|
5
|
+
from curl_cffi import CurlError
|
|
6
|
+
from curl_cffi.requests import Response as CurlResponse # Import Response
|
|
7
|
+
from curl_cffi.requests import Session
|
|
8
|
+
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.AIbase import Provider, Response
|
|
11
|
+
from webscout.AIutel import ( # Import sanitize_stream
|
|
12
|
+
AwesomePrompts,
|
|
13
|
+
Conversation,
|
|
14
|
+
Optimizers,
|
|
15
|
+
sanitize_stream,
|
|
16
|
+
)
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
|
|
19
|
+
# Model configurations
|
|
20
|
+
MODEL_CONFIGS: Dict[str, Dict[str, Any]] = {
|
|
21
|
+
"ayle": {
|
|
22
|
+
"endpoint": "https://ayle.chat/api/chat",
|
|
23
|
+
"models": [
|
|
24
|
+
# Google Generative AI
|
|
25
|
+
"gemini-2.5-flash",
|
|
26
|
+
# Groq
|
|
27
|
+
"openai/gpt-oss-20b",
|
|
28
|
+
"openai/gpt-oss-120b",
|
|
29
|
+
"llama-3.1-8b-instant",
|
|
30
|
+
"llama-3.3-70b-versatile",
|
|
31
|
+
# OpenRouter
|
|
32
|
+
"mistralai/devstral-2512:free",
|
|
33
|
+
"z-ai/glm-4.5-air:free",
|
|
34
|
+
# Inception AI
|
|
35
|
+
"mercury",
|
|
36
|
+
"mercury-coder",
|
|
37
|
+
# Perplexity
|
|
38
|
+
"sonar",
|
|
39
|
+
"sonar-pro",
|
|
40
|
+
],
|
|
41
|
+
},
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Ayle(Provider):
|
|
46
|
+
"""
|
|
47
|
+
A class to interact with multiple AI APIs through the Ayle Chat interface.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
required_auth = False
|
|
51
|
+
AVAILABLE_MODELS = [
|
|
52
|
+
# Google Generative AI
|
|
53
|
+
"gemini-2.5-flash",
|
|
54
|
+
# Groq
|
|
55
|
+
"openai/gpt-oss-20b",
|
|
56
|
+
"openai/gpt-oss-120b",
|
|
57
|
+
"llama-3.1-8b-instant",
|
|
58
|
+
"llama-3.3-70b-versatile",
|
|
59
|
+
# OpenRouter
|
|
60
|
+
"mistralai/devstral-2512:free",
|
|
61
|
+
"z-ai/glm-4.5-air:free",
|
|
62
|
+
# Inception AI
|
|
63
|
+
"mercury",
|
|
64
|
+
"mercury-coder",
|
|
65
|
+
# Perplexity
|
|
66
|
+
"sonar",
|
|
67
|
+
"sonar-pro",
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
def __init__(
|
|
71
|
+
self,
|
|
72
|
+
is_conversation: bool = True,
|
|
73
|
+
max_tokens: int = 4000,
|
|
74
|
+
timeout: int = 30,
|
|
75
|
+
intro: Optional[str] = None,
|
|
76
|
+
filepath: Optional[str] = None,
|
|
77
|
+
update_file: bool = True,
|
|
78
|
+
proxies: dict = {},
|
|
79
|
+
history_offset: int = 10250,
|
|
80
|
+
act: Optional[str] = None,
|
|
81
|
+
model: str = "gemini-2.5-flash",
|
|
82
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
83
|
+
temperature: float = 0.5,
|
|
84
|
+
presence_penalty: int = 0,
|
|
85
|
+
frequency_penalty: int = 0,
|
|
86
|
+
top_p: float = 1,
|
|
87
|
+
):
|
|
88
|
+
"""Initializes the Ayle client."""
|
|
89
|
+
if model not in self.AVAILABLE_MODELS:
|
|
90
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
91
|
+
|
|
92
|
+
self.session = Session() # Use curl_cffi Session
|
|
93
|
+
self.is_conversation = is_conversation
|
|
94
|
+
self.max_tokens_to_sample = max_tokens
|
|
95
|
+
self.timeout = timeout
|
|
96
|
+
self.last_response = {}
|
|
97
|
+
self.model = model
|
|
98
|
+
self.system_prompt = system_prompt
|
|
99
|
+
self.temperature = temperature
|
|
100
|
+
self.presence_penalty = presence_penalty
|
|
101
|
+
self.frequency_penalty = frequency_penalty
|
|
102
|
+
self.top_p = top_p
|
|
103
|
+
|
|
104
|
+
# Initialize LitAgent for user agent generation
|
|
105
|
+
self.agent = LitAgent()
|
|
106
|
+
|
|
107
|
+
self.headers = {
|
|
108
|
+
"accept": "*/*",
|
|
109
|
+
"accept-language": "en-US,en;q=0.9",
|
|
110
|
+
"content-type": "application/json",
|
|
111
|
+
"origin": "https://ayle.chat/",
|
|
112
|
+
"referer": "https://ayle.chat/",
|
|
113
|
+
"user-agent": self.agent.random(),
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
self.session.headers.update(self.headers)
|
|
117
|
+
if proxies:
|
|
118
|
+
self.session.proxies.update(cast(Any, proxies)) # Assign proxies directly
|
|
119
|
+
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
120
|
+
|
|
121
|
+
self.__available_optimizers = (
|
|
122
|
+
method
|
|
123
|
+
for method in dir(Optimizers)
|
|
124
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
self.conversation = Conversation(
|
|
128
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
129
|
+
)
|
|
130
|
+
self.conversation.history_offset = history_offset
|
|
131
|
+
|
|
132
|
+
if act:
|
|
133
|
+
self.conversation.intro = (
|
|
134
|
+
AwesomePrompts().get_act(
|
|
135
|
+
cast(Union[str, int], act),
|
|
136
|
+
default=self.conversation.intro,
|
|
137
|
+
case_insensitive=True,
|
|
138
|
+
)
|
|
139
|
+
or self.conversation.intro
|
|
140
|
+
)
|
|
141
|
+
elif intro:
|
|
142
|
+
self.conversation.intro = intro
|
|
143
|
+
|
|
144
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
145
|
+
self.model_name = self.model
|
|
146
|
+
|
|
147
|
+
def _get_endpoint(self) -> str:
|
|
148
|
+
"""Get the API endpoint for the current provider."""
|
|
149
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
150
|
+
|
|
151
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
152
|
+
"""Determine the provider based on the model name."""
|
|
153
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
154
|
+
if model in config["models"]:
|
|
155
|
+
return provider
|
|
156
|
+
|
|
157
|
+
available_models = []
|
|
158
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
159
|
+
for model_name in config["models"]:
|
|
160
|
+
available_models.append(f"{provider}/{model_name}")
|
|
161
|
+
|
|
162
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
163
|
+
raise ValueError(error_msg)
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def _ayle_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
167
|
+
"""Extracts content from Ayle stream."""
|
|
168
|
+
if isinstance(chunk, str):
|
|
169
|
+
if chunk.startswith('0:"'):
|
|
170
|
+
try:
|
|
171
|
+
return json.loads(chunk[2:])
|
|
172
|
+
except Exception:
|
|
173
|
+
return None
|
|
174
|
+
elif isinstance(chunk, dict):
|
|
175
|
+
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
def _make_request(
|
|
179
|
+
self, payload: Dict[str, Any]
|
|
180
|
+
) -> CurlResponse: # Change type hint to Response
|
|
181
|
+
"""Make the API request with proper error handling."""
|
|
182
|
+
try:
|
|
183
|
+
response = self.session.post(
|
|
184
|
+
self._get_endpoint(),
|
|
185
|
+
headers=self.headers,
|
|
186
|
+
json=payload,
|
|
187
|
+
timeout=self.timeout,
|
|
188
|
+
stream=True, # Enable streaming for the request
|
|
189
|
+
impersonate="chrome120", # Add impersonate
|
|
190
|
+
)
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
return response
|
|
193
|
+
except (
|
|
194
|
+
CurlError,
|
|
195
|
+
exceptions.FailedToGenerateResponseError,
|
|
196
|
+
Exception,
|
|
197
|
+
) as e: # Catch CurlError and others
|
|
198
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
199
|
+
|
|
200
|
+
def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
|
|
201
|
+
"""Build the appropriate payload based on the provider."""
|
|
202
|
+
return {"messages": [{"role": "user", "content": conversation_prompt}], "model": self.model}
|
|
203
|
+
|
|
204
|
+
def ask(
|
|
205
|
+
self,
|
|
206
|
+
prompt: str,
|
|
207
|
+
stream: bool = False,
|
|
208
|
+
raw: bool = False,
|
|
209
|
+
optimizer: Optional[str] = None,
|
|
210
|
+
conversationally: bool = False,
|
|
211
|
+
**kwargs: Any,
|
|
212
|
+
) -> Response:
|
|
213
|
+
"""Sends a prompt to the API and returns the response."""
|
|
214
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
215
|
+
if optimizer:
|
|
216
|
+
if optimizer in self.__available_optimizers:
|
|
217
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
218
|
+
conversation_prompt if conversationally else prompt
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
222
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
223
|
+
|
|
224
|
+
payload = self._build_payload(conversation_prompt)
|
|
225
|
+
response = self._make_request(payload)
|
|
226
|
+
processed_stream = sanitize_stream(
|
|
227
|
+
data=response.iter_content(chunk_size=None),
|
|
228
|
+
intro_value=None,
|
|
229
|
+
to_json=False,
|
|
230
|
+
content_extractor=self._ayle_extractor,
|
|
231
|
+
yield_raw_on_error=False,
|
|
232
|
+
raw=raw,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
if stream:
|
|
236
|
+
return self._ask_stream(prompt, processed_stream, raw)
|
|
237
|
+
else:
|
|
238
|
+
return self._ask_non_stream(prompt, processed_stream, raw)
|
|
239
|
+
|
|
240
|
+
def _ask_stream(self, prompt: str, processed_stream: Generator, raw: bool) -> Generator:
|
|
241
|
+
streaming_text = ""
|
|
242
|
+
for content_chunk in processed_stream:
|
|
243
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
244
|
+
content_chunk = content_chunk.replace("\\\\", "\\").replace('\\"', '"')
|
|
245
|
+
if raw:
|
|
246
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
247
|
+
streaming_text += content_chunk
|
|
248
|
+
yield content_chunk
|
|
249
|
+
else:
|
|
250
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
251
|
+
streaming_text += content_chunk
|
|
252
|
+
yield dict(text=content_chunk)
|
|
253
|
+
self.last_response = {"text": streaming_text}
|
|
254
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
255
|
+
|
|
256
|
+
def _ask_non_stream(
|
|
257
|
+
self, prompt: str, processed_stream: Generator, raw: bool
|
|
258
|
+
) -> Union[Dict[str, Any], str]:
|
|
259
|
+
full_response = ""
|
|
260
|
+
for content_chunk in processed_stream:
|
|
261
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
262
|
+
content_chunk = content_chunk.replace("\\\\", "\\").replace('\\"', '"')
|
|
263
|
+
if raw:
|
|
264
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
265
|
+
full_response += content_chunk
|
|
266
|
+
else:
|
|
267
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
268
|
+
full_response += content_chunk
|
|
269
|
+
self.last_response = {"text": full_response}
|
|
270
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
271
|
+
return self.last_response if not raw else full_response
|
|
272
|
+
|
|
273
|
+
def chat(
|
|
274
|
+
self,
|
|
275
|
+
prompt: str,
|
|
276
|
+
stream: bool = False,
|
|
277
|
+
optimizer: Optional[str] = None,
|
|
278
|
+
conversationally: bool = False,
|
|
279
|
+
**kwargs: Any,
|
|
280
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
281
|
+
raw = kwargs.get("raw", False)
|
|
282
|
+
|
|
283
|
+
def for_stream() -> Generator[str, None, None]:
|
|
284
|
+
for response in self.ask(
|
|
285
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
286
|
+
):
|
|
287
|
+
if raw:
|
|
288
|
+
yield response
|
|
289
|
+
else:
|
|
290
|
+
yield self.get_message(response)
|
|
291
|
+
|
|
292
|
+
def for_non_stream() -> str:
|
|
293
|
+
result = self.ask(
|
|
294
|
+
prompt,
|
|
295
|
+
stream=False,
|
|
296
|
+
raw=raw,
|
|
297
|
+
optimizer=optimizer,
|
|
298
|
+
conversationally=conversationally,
|
|
299
|
+
)
|
|
300
|
+
if raw:
|
|
301
|
+
return result if isinstance(result, str) else str(result)
|
|
302
|
+
return self.get_message(result)
|
|
303
|
+
|
|
304
|
+
return for_stream() if stream else for_non_stream()
|
|
305
|
+
|
|
306
|
+
def get_message(self, response: Response) -> str:
|
|
307
|
+
if isinstance(response, dict):
|
|
308
|
+
text = cast(Dict[str, Any], response).get("text", "")
|
|
309
|
+
else:
|
|
310
|
+
text = str(response)
|
|
311
|
+
return text.replace("\\\\", "\\").replace('\\"', '"')
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
if __name__ == "__main__":
|
|
315
|
+
from rich import print
|
|
316
|
+
|
|
317
|
+
ai = Ayle(model="gemini-2.5-flash")
|
|
318
|
+
response = ai.chat("tell me a joke", stream=True, raw=False)
|
|
319
|
+
if hasattr(response, "__iter__") and not isinstance(response, (str, bytes)):
|
|
320
|
+
for chunk in response:
|
|
321
|
+
print(chunk, end="", flush=True)
|
|
322
|
+
else:
|
|
323
|
+
print(response)
|