webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +524 -251
- webscout/AIbase.py +247 -319
- webscout/AIutel.py +68 -703
- webscout/Bard.py +1072 -1026
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
- webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
- webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
- webscout/Extra/GitToolkit/gitapi/search.py +162 -0
- webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
- webscout/Extra/GitToolkit/gitapi/user.py +128 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
- webscout/Extra/YTToolkit/README.md +443 -375
- webscout/Extra/YTToolkit/YTdownloader.py +953 -957
- webscout/Extra/YTToolkit/__init__.py +3 -3
- webscout/Extra/YTToolkit/transcriber.py +595 -476
- webscout/Extra/YTToolkit/ytapi/README.md +230 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
- webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
- webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
- webscout/Extra/YTToolkit/ytapi/https.py +89 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
- webscout/Extra/YTToolkit/ytapi/query.py +143 -40
- webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
- webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
- webscout/Extra/YTToolkit/ytapi/video.py +403 -232
- webscout/Extra/__init__.py +2 -3
- webscout/Extra/gguf.py +1298 -684
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +28 -28
- webscout/Extra/tempmail/async_utils.py +143 -141
- webscout/Extra/tempmail/base.py +172 -161
- webscout/Extra/tempmail/cli.py +191 -187
- webscout/Extra/tempmail/emailnator.py +88 -84
- webscout/Extra/tempmail/mail_tm.py +378 -361
- webscout/Extra/tempmail/temp_mail_io.py +304 -292
- webscout/Extra/weather.py +196 -194
- webscout/Extra/weather_ascii.py +17 -15
- webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
- webscout/Provider/AISEARCH/Perplexity.py +292 -333
- webscout/Provider/AISEARCH/README.md +106 -279
- webscout/Provider/AISEARCH/__init__.py +16 -9
- webscout/Provider/AISEARCH/brave_search.py +298 -0
- webscout/Provider/AISEARCH/iask_search.py +357 -410
- webscout/Provider/AISEARCH/monica_search.py +200 -220
- webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
- webscout/Provider/Algion.py +413 -0
- webscout/Provider/Andi.py +74 -69
- webscout/Provider/Apriel.py +313 -0
- webscout/Provider/Ayle.py +323 -0
- webscout/Provider/ChatSandbox.py +329 -342
- webscout/Provider/ClaudeOnline.py +365 -0
- webscout/Provider/Cohere.py +232 -208
- webscout/Provider/DeepAI.py +367 -0
- webscout/Provider/Deepinfra.py +467 -340
- webscout/Provider/EssentialAI.py +217 -0
- webscout/Provider/ExaAI.py +274 -261
- webscout/Provider/Gemini.py +175 -169
- webscout/Provider/GithubChat.py +385 -369
- webscout/Provider/Gradient.py +286 -0
- webscout/Provider/Groq.py +556 -801
- webscout/Provider/HadadXYZ.py +323 -0
- webscout/Provider/HeckAI.py +392 -375
- webscout/Provider/HuggingFace.py +387 -0
- webscout/Provider/IBM.py +340 -0
- webscout/Provider/Jadve.py +317 -291
- webscout/Provider/K2Think.py +306 -0
- webscout/Provider/Koboldai.py +221 -384
- webscout/Provider/Netwrck.py +273 -270
- webscout/Provider/Nvidia.py +310 -0
- webscout/Provider/OPENAI/DeepAI.py +489 -0
- webscout/Provider/OPENAI/K2Think.py +423 -0
- webscout/Provider/OPENAI/PI.py +463 -0
- webscout/Provider/OPENAI/README.md +890 -952
- webscout/Provider/OPENAI/TogetherAI.py +405 -0
- webscout/Provider/OPENAI/TwoAI.py +255 -357
- webscout/Provider/OPENAI/__init__.py +148 -40
- webscout/Provider/OPENAI/ai4chat.py +348 -293
- webscout/Provider/OPENAI/akashgpt.py +436 -0
- webscout/Provider/OPENAI/algion.py +303 -0
- webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
- webscout/Provider/OPENAI/base.py +253 -249
- webscout/Provider/OPENAI/cerebras.py +296 -0
- webscout/Provider/OPENAI/chatgpt.py +870 -556
- webscout/Provider/OPENAI/chatsandbox.py +233 -173
- webscout/Provider/OPENAI/deepinfra.py +403 -322
- webscout/Provider/OPENAI/e2b.py +2370 -1414
- webscout/Provider/OPENAI/elmo.py +278 -0
- webscout/Provider/OPENAI/exaai.py +452 -417
- webscout/Provider/OPENAI/freeassist.py +446 -0
- webscout/Provider/OPENAI/gradient.py +448 -0
- webscout/Provider/OPENAI/groq.py +380 -364
- webscout/Provider/OPENAI/hadadxyz.py +292 -0
- webscout/Provider/OPENAI/heckai.py +333 -308
- webscout/Provider/OPENAI/huggingface.py +321 -0
- webscout/Provider/OPENAI/ibm.py +425 -0
- webscout/Provider/OPENAI/llmchat.py +253 -0
- webscout/Provider/OPENAI/llmchatco.py +378 -335
- webscout/Provider/OPENAI/meta.py +541 -0
- webscout/Provider/OPENAI/netwrck.py +374 -357
- webscout/Provider/OPENAI/nvidia.py +317 -0
- webscout/Provider/OPENAI/oivscode.py +348 -287
- webscout/Provider/OPENAI/openrouter.py +328 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/sambanova.py +397 -0
- webscout/Provider/OPENAI/sonus.py +305 -304
- webscout/Provider/OPENAI/textpollinations.py +370 -339
- webscout/Provider/OPENAI/toolbaz.py +375 -413
- webscout/Provider/OPENAI/typefully.py +419 -355
- webscout/Provider/OPENAI/typliai.py +279 -0
- webscout/Provider/OPENAI/utils.py +314 -318
- webscout/Provider/OPENAI/wisecat.py +359 -387
- webscout/Provider/OPENAI/writecream.py +185 -163
- webscout/Provider/OPENAI/x0gpt.py +462 -365
- webscout/Provider/OPENAI/zenmux.py +380 -0
- webscout/Provider/OpenRouter.py +386 -0
- webscout/Provider/Openai.py +337 -496
- webscout/Provider/PI.py +443 -429
- webscout/Provider/QwenLM.py +346 -254
- webscout/Provider/STT/__init__.py +28 -0
- webscout/Provider/STT/base.py +303 -0
- webscout/Provider/STT/elevenlabs.py +264 -0
- webscout/Provider/Sambanova.py +317 -0
- webscout/Provider/TTI/README.md +69 -82
- webscout/Provider/TTI/__init__.py +37 -7
- webscout/Provider/TTI/base.py +147 -64
- webscout/Provider/TTI/claudeonline.py +393 -0
- webscout/Provider/TTI/magicstudio.py +292 -201
- webscout/Provider/TTI/miragic.py +180 -0
- webscout/Provider/TTI/pollinations.py +331 -221
- webscout/Provider/TTI/together.py +334 -0
- webscout/Provider/TTI/utils.py +14 -11
- webscout/Provider/TTS/README.md +186 -192
- webscout/Provider/TTS/__init__.py +43 -10
- webscout/Provider/TTS/base.py +523 -159
- webscout/Provider/TTS/deepgram.py +286 -156
- webscout/Provider/TTS/elevenlabs.py +189 -111
- webscout/Provider/TTS/freetts.py +218 -0
- webscout/Provider/TTS/murfai.py +288 -113
- webscout/Provider/TTS/openai_fm.py +364 -129
- webscout/Provider/TTS/parler.py +203 -111
- webscout/Provider/TTS/qwen.py +334 -0
- webscout/Provider/TTS/sherpa.py +286 -0
- webscout/Provider/TTS/speechma.py +693 -580
- webscout/Provider/TTS/streamElements.py +275 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TextPollinationsAI.py +331 -308
- webscout/Provider/TogetherAI.py +450 -0
- webscout/Provider/TwoAI.py +309 -475
- webscout/Provider/TypliAI.py +311 -305
- webscout/Provider/UNFINISHED/ChatHub.py +219 -209
- webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
- webscout/Provider/UNFINISHED/Qodo.py +481 -0
- webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
- webscout/Provider/UNFINISHED/Youchat.py +347 -330
- webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
- webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
- webscout/Provider/UNFINISHED/liner.py +342 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
- webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
- webscout/Provider/WiseCat.py +256 -233
- webscout/Provider/WrDoChat.py +390 -370
- webscout/Provider/__init__.py +115 -174
- webscout/Provider/ai4chat.py +181 -174
- webscout/Provider/akashgpt.py +330 -335
- webscout/Provider/cerebras.py +397 -290
- webscout/Provider/cleeai.py +236 -213
- webscout/Provider/elmo.py +291 -283
- webscout/Provider/geminiapi.py +343 -208
- webscout/Provider/julius.py +245 -223
- webscout/Provider/learnfastai.py +333 -325
- webscout/Provider/llama3mitril.py +230 -215
- webscout/Provider/llmchat.py +308 -258
- webscout/Provider/llmchatco.py +321 -306
- webscout/Provider/meta.py +996 -801
- webscout/Provider/oivscode.py +332 -309
- webscout/Provider/searchchat.py +316 -292
- webscout/Provider/sonus.py +264 -258
- webscout/Provider/toolbaz.py +359 -353
- webscout/Provider/turboseek.py +332 -266
- webscout/Provider/typefully.py +262 -202
- webscout/Provider/x0gpt.py +332 -299
- webscout/__init__.py +31 -39
- webscout/__main__.py +5 -5
- webscout/cli.py +585 -524
- webscout/client.py +1497 -70
- webscout/conversation.py +140 -436
- webscout/exceptions.py +383 -362
- webscout/litagent/__init__.py +29 -29
- webscout/litagent/agent.py +492 -455
- webscout/litagent/constants.py +60 -60
- webscout/models.py +505 -181
- webscout/optimizers.py +74 -420
- webscout/prompt_manager.py +376 -288
- webscout/sanitize.py +1514 -0
- webscout/scout/README.md +452 -404
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +7 -7
- webscout/scout/core/crawler.py +330 -210
- webscout/scout/core/scout.py +800 -607
- webscout/scout/core/search_result.py +51 -96
- webscout/scout/core/text_analyzer.py +64 -63
- webscout/scout/core/text_utils.py +412 -277
- webscout/scout/core/web_analyzer.py +54 -52
- webscout/scout/element.py +872 -478
- webscout/scout/parsers/__init__.py +70 -69
- webscout/scout/parsers/html5lib_parser.py +182 -172
- webscout/scout/parsers/html_parser.py +238 -236
- webscout/scout/parsers/lxml_parser.py +203 -178
- webscout/scout/utils.py +38 -37
- webscout/search/__init__.py +47 -0
- webscout/search/base.py +201 -0
- webscout/search/bing_main.py +45 -0
- webscout/search/brave_main.py +92 -0
- webscout/search/duckduckgo_main.py +57 -0
- webscout/search/engines/__init__.py +127 -0
- webscout/search/engines/bing/__init__.py +15 -0
- webscout/search/engines/bing/base.py +35 -0
- webscout/search/engines/bing/images.py +114 -0
- webscout/search/engines/bing/news.py +96 -0
- webscout/search/engines/bing/suggestions.py +36 -0
- webscout/search/engines/bing/text.py +109 -0
- webscout/search/engines/brave/__init__.py +19 -0
- webscout/search/engines/brave/base.py +47 -0
- webscout/search/engines/brave/images.py +213 -0
- webscout/search/engines/brave/news.py +353 -0
- webscout/search/engines/brave/suggestions.py +318 -0
- webscout/search/engines/brave/text.py +167 -0
- webscout/search/engines/brave/videos.py +364 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +80 -0
- webscout/search/engines/duckduckgo/base.py +189 -0
- webscout/search/engines/duckduckgo/images.py +100 -0
- webscout/search/engines/duckduckgo/maps.py +183 -0
- webscout/search/engines/duckduckgo/news.py +70 -0
- webscout/search/engines/duckduckgo/suggestions.py +22 -0
- webscout/search/engines/duckduckgo/text.py +221 -0
- webscout/search/engines/duckduckgo/translate.py +48 -0
- webscout/search/engines/duckduckgo/videos.py +80 -0
- webscout/search/engines/duckduckgo/weather.py +84 -0
- webscout/search/engines/mojeek.py +61 -0
- webscout/search/engines/wikipedia.py +77 -0
- webscout/search/engines/yahoo/__init__.py +41 -0
- webscout/search/engines/yahoo/answers.py +19 -0
- webscout/search/engines/yahoo/base.py +34 -0
- webscout/search/engines/yahoo/images.py +323 -0
- webscout/search/engines/yahoo/maps.py +19 -0
- webscout/search/engines/yahoo/news.py +258 -0
- webscout/search/engines/yahoo/suggestions.py +140 -0
- webscout/search/engines/yahoo/text.py +273 -0
- webscout/search/engines/yahoo/translate.py +19 -0
- webscout/search/engines/yahoo/videos.py +302 -0
- webscout/search/engines/yahoo/weather.py +220 -0
- webscout/search/engines/yandex.py +67 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +34 -0
- webscout/search/engines/yep/images.py +101 -0
- webscout/search/engines/yep/suggestions.py +38 -0
- webscout/search/engines/yep/text.py +99 -0
- webscout/search/http_client.py +172 -0
- webscout/search/results.py +141 -0
- webscout/search/yahoo_main.py +57 -0
- webscout/search/yep_main.py +48 -0
- webscout/server/__init__.py +48 -0
- webscout/server/config.py +78 -0
- webscout/server/exceptions.py +69 -0
- webscout/server/providers.py +286 -0
- webscout/server/request_models.py +131 -0
- webscout/server/request_processing.py +404 -0
- webscout/server/routes.py +642 -0
- webscout/server/server.py +351 -0
- webscout/server/ui_templates.py +1171 -0
- webscout/swiftcli/__init__.py +79 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +574 -297
- webscout/swiftcli/core/context.py +98 -104
- webscout/swiftcli/core/group.py +268 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +243 -221
- webscout/swiftcli/decorators/options.py +247 -220
- webscout/swiftcli/decorators/output.py +392 -252
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +134 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +58 -59
- webscout/swiftcli/utils/formatting.py +251 -252
- webscout/swiftcli/utils/parsing.py +368 -267
- webscout/update_checker.py +280 -136
- webscout/utils.py +28 -14
- webscout/version.py +2 -1
- webscout/version.py.bak +3 -0
- webscout/zeroart/__init__.py +218 -135
- webscout/zeroart/base.py +70 -66
- webscout/zeroart/effects.py +155 -101
- webscout/zeroart/fonts.py +1799 -1239
- webscout-2026.1.19.dist-info/METADATA +638 -0
- webscout-2026.1.19.dist-info/RECORD +312 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
- webscout/DWEBS.py +0 -520
- webscout/Extra/Act.md +0 -309
- webscout/Extra/GitToolkit/gitapi/README.md +0 -110
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.md +0 -430
- webscout/Extra/weather.md +0 -281
- webscout/Litlogger/README.md +0 -10
- webscout/Litlogger/__init__.py +0 -15
- webscout/Litlogger/formats.py +0 -4
- webscout/Litlogger/handlers.py +0 -103
- webscout/Litlogger/levels.py +0 -13
- webscout/Litlogger/logger.py +0 -92
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AISEARCH/felo_search.py +0 -202
- webscout/Provider/AISEARCH/genspark_search.py +0 -324
- webscout/Provider/AISEARCH/hika_search.py +0 -186
- webscout/Provider/AISEARCH/scira_search.py +0 -298
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -791
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Cloudflare.py +0 -324
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Flowith.py +0 -217
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
- webscout/Provider/OPENAI/Cloudflare.py +0 -378
- webscout/Provider/OPENAI/FreeGemini.py +0 -283
- webscout/Provider/OPENAI/NEMOTRON.py +0 -232
- webscout/Provider/OPENAI/Qwen3.py +0 -283
- webscout/Provider/OPENAI/api.py +0 -969
- webscout/Provider/OPENAI/c4ai.py +0 -373
- webscout/Provider/OPENAI/chatgptclone.py +0 -494
- webscout/Provider/OPENAI/copilot.py +0 -242
- webscout/Provider/OPENAI/flowith.py +0 -162
- webscout/Provider/OPENAI/freeaichat.py +0 -359
- webscout/Provider/OPENAI/mcpcore.py +0 -389
- webscout/Provider/OPENAI/multichat.py +0 -376
- webscout/Provider/OPENAI/opkfc.py +0 -496
- webscout/Provider/OPENAI/scirachat.py +0 -477
- webscout/Provider/OPENAI/standardinput.py +0 -433
- webscout/Provider/OPENAI/typegpt.py +0 -364
- webscout/Provider/OPENAI/uncovrAI.py +0 -463
- webscout/Provider/OPENAI/venice.py +0 -431
- webscout/Provider/OPENAI/yep.py +0 -382
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/aiarta.py +0 -365
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/fastflux.py +0 -200
- webscout/Provider/TTI/piclumen.py +0 -203
- webscout/Provider/TTI/pixelmuse.py +0 -225
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/TeachAnything.py +0 -229
- webscout/Provider/UNFINISHED/puterjs.py +0 -635
- webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/Writecream.py +0 -246
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/koala.py +0 -170
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -299
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/yep.py +0 -389
- webscout/litagent/Readme.md +0 -276
- webscout/litprinter/__init__.py +0 -59
- webscout/swiftcli/Readme.md +0 -323
- webscout/tempid.py +0 -128
- webscout/webscout_search.py +0 -1184
- webscout/webscout_search_async.py +0 -654
- webscout/yep_search.py +0 -347
- webscout/zeroart/README.md +0 -89
- webscout-8.2.9.dist-info/METADATA +0 -1033
- webscout-8.2.9.dist-info/RECORD +0 -289
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
webscout/Provider/TwoAI.py
CHANGED
|
@@ -1,475 +1,309 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
import
|
|
4
|
-
|
|
5
|
-
import
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
import
|
|
9
|
-
|
|
10
|
-
from webscout.AIutel import Optimizers
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
"model": self.model,
|
|
311
|
-
"temperature": self.temperature,
|
|
312
|
-
"max_tokens": self.max_tokens_to_sample,
|
|
313
|
-
"stream": stream,
|
|
314
|
-
"extra_body": {
|
|
315
|
-
"online_search": online_search,
|
|
316
|
-
}
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
def for_stream():
|
|
320
|
-
streaming_text = "" # Initialize outside try block
|
|
321
|
-
try:
|
|
322
|
-
response = self.session.post(
|
|
323
|
-
self.url,
|
|
324
|
-
json=payload,
|
|
325
|
-
stream=True,
|
|
326
|
-
timeout=self.timeout,
|
|
327
|
-
impersonate="chrome110"
|
|
328
|
-
)
|
|
329
|
-
|
|
330
|
-
if response.status_code != 200:
|
|
331
|
-
error_detail = response.text
|
|
332
|
-
try:
|
|
333
|
-
error_json = response.json()
|
|
334
|
-
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
335
|
-
except json.JSONDecodeError:
|
|
336
|
-
pass
|
|
337
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
338
|
-
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
339
|
-
)
|
|
340
|
-
|
|
341
|
-
# Use sanitize_stream for SSE processing
|
|
342
|
-
processed_stream = sanitize_stream(
|
|
343
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
344
|
-
intro_value="data:",
|
|
345
|
-
to_json=True, # Stream sends JSON
|
|
346
|
-
skip_markers=["[DONE]"],
|
|
347
|
-
content_extractor=self._twoai_extractor, # Use the specific extractor
|
|
348
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
349
|
-
)
|
|
350
|
-
|
|
351
|
-
for content_chunk in processed_stream:
|
|
352
|
-
# content_chunk is the string extracted by _twoai_extractor
|
|
353
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
354
|
-
streaming_text += content_chunk
|
|
355
|
-
resp = dict(text=content_chunk)
|
|
356
|
-
yield resp if not raw else content_chunk
|
|
357
|
-
|
|
358
|
-
# If stream completes successfully, update history
|
|
359
|
-
self.last_response = {"text": streaming_text}
|
|
360
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
361
|
-
|
|
362
|
-
except CurlError as e:
|
|
363
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
364
|
-
except exceptions.FailedToGenerateResponseError:
|
|
365
|
-
raise # Re-raise specific exception
|
|
366
|
-
except Exception as e:
|
|
367
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
368
|
-
finally:
|
|
369
|
-
# Ensure history is updated even if stream ends abruptly but text was received
|
|
370
|
-
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
371
|
-
self.last_response = {"text": streaming_text}
|
|
372
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
def for_non_stream():
|
|
376
|
-
# Non-stream still uses the stream internally and aggregates
|
|
377
|
-
streaming_text = ""
|
|
378
|
-
# We need to consume the generator from for_stream()
|
|
379
|
-
gen = for_stream()
|
|
380
|
-
try:
|
|
381
|
-
for chunk_data in gen:
|
|
382
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
383
|
-
streaming_text += chunk_data["text"]
|
|
384
|
-
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
385
|
-
streaming_text += chunk_data
|
|
386
|
-
except exceptions.FailedToGenerateResponseError:
|
|
387
|
-
# If the underlying stream fails, re-raise the error
|
|
388
|
-
raise
|
|
389
|
-
# self.last_response and history are updated within for_stream's try/finally
|
|
390
|
-
return self.last_response # Return the final aggregated dict
|
|
391
|
-
|
|
392
|
-
effective_stream = stream if stream is not None else True
|
|
393
|
-
return for_stream() if effective_stream else for_non_stream()
|
|
394
|
-
|
|
395
|
-
def chat(
|
|
396
|
-
self,
|
|
397
|
-
prompt: str,
|
|
398
|
-
stream: bool = True,
|
|
399
|
-
optimizer: str = None,
|
|
400
|
-
conversationally: bool = False,
|
|
401
|
-
online_search: bool = True,
|
|
402
|
-
image_path: str = None,
|
|
403
|
-
) -> str:
|
|
404
|
-
effective_stream = stream if stream is not None else True
|
|
405
|
-
|
|
406
|
-
def for_stream_chat():
|
|
407
|
-
# ask() yields dicts when raw=False (default for chat)
|
|
408
|
-
gen = self.ask(
|
|
409
|
-
prompt,
|
|
410
|
-
stream=True,
|
|
411
|
-
raw=False, # Ensure ask yields dicts
|
|
412
|
-
optimizer=optimizer,
|
|
413
|
-
conversationally=conversationally,
|
|
414
|
-
online_search=online_search,
|
|
415
|
-
image_path=image_path,
|
|
416
|
-
)
|
|
417
|
-
for response_dict in gen:
|
|
418
|
-
yield self.get_message(response_dict) # get_message expects dict
|
|
419
|
-
|
|
420
|
-
def for_non_stream_chat():
|
|
421
|
-
# ask() returns a dict when stream=False
|
|
422
|
-
response_dict = self.ask(
|
|
423
|
-
prompt,
|
|
424
|
-
stream=False, # Ensure ask returns dict
|
|
425
|
-
raw=False,
|
|
426
|
-
optimizer=optimizer,
|
|
427
|
-
conversationally=conversationally,
|
|
428
|
-
online_search=online_search,
|
|
429
|
-
image_path=image_path,
|
|
430
|
-
)
|
|
431
|
-
return self.get_message(response_dict) # get_message expects dict
|
|
432
|
-
|
|
433
|
-
return for_stream_chat() if effective_stream else for_non_stream_chat()
|
|
434
|
-
|
|
435
|
-
def get_message(self, response: dict) -> str:
|
|
436
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
437
|
-
return response.get("text", "") # Use .get for safety
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
if __name__ == "__main__":
|
|
441
|
-
print("-" * 80)
|
|
442
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
443
|
-
print("-" * 80)
|
|
444
|
-
|
|
445
|
-
for model in TwoAI.AVAILABLE_MODELS:
|
|
446
|
-
try:
|
|
447
|
-
test_ai = TwoAI(model=model, timeout=60)
|
|
448
|
-
# Test stream first
|
|
449
|
-
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
450
|
-
response_text = ""
|
|
451
|
-
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
452
|
-
for chunk in response_stream:
|
|
453
|
-
response_text += chunk
|
|
454
|
-
# Optional: print chunks as they arrive for visual feedback
|
|
455
|
-
# print(chunk, end="", flush=True)
|
|
456
|
-
|
|
457
|
-
if response_text and len(response_text.strip()) > 0:
|
|
458
|
-
status = "✓"
|
|
459
|
-
# Clean and truncate response
|
|
460
|
-
clean_text = response_text.strip() # Already decoded in get_message
|
|
461
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
462
|
-
else:
|
|
463
|
-
status = "✗ (Stream)"
|
|
464
|
-
display_text = "Empty or invalid stream response"
|
|
465
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
466
|
-
|
|
467
|
-
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
468
|
-
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
469
|
-
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
470
|
-
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
471
|
-
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
except Exception as e:
|
|
475
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
1
|
+
import base64
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Generator, Optional, Union, cast
|
|
4
|
+
|
|
5
|
+
from curl_cffi import CurlError
|
|
6
|
+
from curl_cffi.requests import Session
|
|
7
|
+
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.AIbase import Provider, Response
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, Conversation, Optimizers, sanitize_stream
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TwoAI(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Two AI API (v2) with LitAgent user-agent.
|
|
16
|
+
SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
|
|
17
|
+
SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
|
|
18
|
+
delivering cost-efficient multilingual capabilities for over 50+ languages.
|
|
19
|
+
|
|
20
|
+
API keys must be provided directly by the user.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
required_auth = True
|
|
24
|
+
AVAILABLE_MODELS = [
|
|
25
|
+
"sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
|
|
26
|
+
"sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
api_key: str,
|
|
32
|
+
is_conversation: bool = True,
|
|
33
|
+
max_tokens: int = 1024,
|
|
34
|
+
timeout: int = 30,
|
|
35
|
+
intro: Optional[str] = None,
|
|
36
|
+
filepath: Optional[str] = None,
|
|
37
|
+
update_file: bool = True,
|
|
38
|
+
proxies: dict = {},
|
|
39
|
+
history_offset: int = 10250,
|
|
40
|
+
act: Optional[str] = None,
|
|
41
|
+
model: str = "sutra-v2", # Default model
|
|
42
|
+
temperature: float = 0.6,
|
|
43
|
+
system_message: str = "You are a helpful assistant."
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Initializes the TwoAI API client.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
api_key: TwoAI API key (required).
|
|
50
|
+
is_conversation: Whether to maintain conversation history.
|
|
51
|
+
max_tokens: Maximum number of tokens to generate.
|
|
52
|
+
timeout: Request timeout in seconds.
|
|
53
|
+
intro: Introduction text for the conversation.
|
|
54
|
+
filepath: Path to save conversation history.
|
|
55
|
+
update_file: Whether to update the conversation history file.
|
|
56
|
+
proxies: Proxy configuration for requests.
|
|
57
|
+
history_offset: Maximum history length in characters.
|
|
58
|
+
act: Persona for the conversation.
|
|
59
|
+
model: Model to use. Must be one of AVAILABLE_MODELS.
|
|
60
|
+
temperature: Temperature for generation (0.0 to 1.0).
|
|
61
|
+
system_message: System message to use for the conversation.
|
|
62
|
+
"""
|
|
63
|
+
if model not in self.AVAILABLE_MODELS:
|
|
64
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
+
|
|
66
|
+
if not api_key:
|
|
67
|
+
raise exceptions.AuthenticationError("TwoAI API key is required.")
|
|
68
|
+
|
|
69
|
+
self.url = "https://chatsutra-server.account-2b0.workers.dev/v2/chat/completions" # Correct API endpoint
|
|
70
|
+
self.headers = {
|
|
71
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0',
|
|
72
|
+
'Accept': 'application/json',
|
|
73
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
74
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
75
|
+
'Content-Type': 'application/json',
|
|
76
|
+
'Origin': 'https://chat.two.ai',
|
|
77
|
+
'Referer': 'https://chatsutra-server.account-2b0.workers.dev/',
|
|
78
|
+
'Sec-Ch-Ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
79
|
+
'Sec-Ch-Ua-Mobile': '?0',
|
|
80
|
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
|
81
|
+
'Sec-Fetch-Dest': 'empty',
|
|
82
|
+
'Sec-Fetch-Mode': 'cors',
|
|
83
|
+
'Sec-Fetch-Site': 'cross-site',
|
|
84
|
+
'Sec-Gpc': '1',
|
|
85
|
+
'Dnt': '1',
|
|
86
|
+
'X-Session-Token': api_key # Using session token instead of Bearer auth
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
# Initialize curl_cffi Session
|
|
90
|
+
self.session = Session()
|
|
91
|
+
self.session.headers.update(self.headers)
|
|
92
|
+
if proxies:
|
|
93
|
+
self.session.proxies.update(proxies)
|
|
94
|
+
|
|
95
|
+
self.is_conversation = is_conversation
|
|
96
|
+
self.max_tokens_to_sample = max_tokens
|
|
97
|
+
self.timeout = timeout
|
|
98
|
+
self.last_response = {}
|
|
99
|
+
self.model = model
|
|
100
|
+
self.temperature = temperature
|
|
101
|
+
self.system_message = system_message
|
|
102
|
+
self.api_key = api_key
|
|
103
|
+
|
|
104
|
+
self.__available_optimizers = (
|
|
105
|
+
method
|
|
106
|
+
for method in dir(Optimizers)
|
|
107
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
108
|
+
)
|
|
109
|
+
self.conversation = Conversation(
|
|
110
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
111
|
+
)
|
|
112
|
+
self.conversation.history_offset = history_offset
|
|
113
|
+
|
|
114
|
+
if act:
|
|
115
|
+
self.conversation.intro = AwesomePrompts().get_act(cast(Union[str, int], act), default=self.conversation.intro, case_insensitive=True
|
|
116
|
+
) or self.conversation.intro
|
|
117
|
+
elif intro:
|
|
118
|
+
self.conversation.intro = intro
|
|
119
|
+
|
|
120
|
+
@staticmethod
|
|
121
|
+
def _twoai_extractor(chunk_json: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
122
|
+
"""Extracts content from TwoAI v2 stream JSON objects."""
|
|
123
|
+
if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
delta = chunk_json["choices"][0].get("delta")
|
|
127
|
+
if not isinstance(delta, dict):
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
content = delta.get("content")
|
|
131
|
+
return content if isinstance(content, str) else None
|
|
132
|
+
|
|
133
|
+
def encode_image(self, image_path: str) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Encode an image file to base64 string.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
image_path: Path to the image file
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Base64 encoded string of the image
|
|
142
|
+
"""
|
|
143
|
+
with open(image_path, "rb") as image_file:
|
|
144
|
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
145
|
+
|
|
146
|
+
def ask(
|
|
147
|
+
self,
|
|
148
|
+
prompt: str,
|
|
149
|
+
stream: bool = True,
|
|
150
|
+
raw: bool = False,
|
|
151
|
+
optimizer: Optional[str] = None,
|
|
152
|
+
conversationally: bool = False,
|
|
153
|
+
**kwargs: Any,
|
|
154
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
155
|
+
online_search = kwargs.get("online_search", True)
|
|
156
|
+
image_path = kwargs.get("image_path")
|
|
157
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
158
|
+
if optimizer:
|
|
159
|
+
if optimizer in self.__available_optimizers:
|
|
160
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
163
|
+
|
|
164
|
+
# Prepare messages with image if provided
|
|
165
|
+
if image_path:
|
|
166
|
+
# Create a message with image content
|
|
167
|
+
image_content = {
|
|
168
|
+
"type": "image_url",
|
|
169
|
+
"image_url": {
|
|
170
|
+
"url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
user_message = {
|
|
174
|
+
"role": "user",
|
|
175
|
+
"content": [
|
|
176
|
+
{"type": "text", "text": conversation_prompt},
|
|
177
|
+
image_content
|
|
178
|
+
]
|
|
179
|
+
}
|
|
180
|
+
else:
|
|
181
|
+
# Text-only message
|
|
182
|
+
user_message = {"role": "user", "content": conversation_prompt}
|
|
183
|
+
|
|
184
|
+
# Prepare the payload
|
|
185
|
+
payload = {
|
|
186
|
+
"messages": [
|
|
187
|
+
*([{"role": "system", "content": self.system_message}] if self.system_message else []),
|
|
188
|
+
user_message
|
|
189
|
+
],
|
|
190
|
+
"model": self.model,
|
|
191
|
+
"temperature": self.temperature,
|
|
192
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
193
|
+
"extra_body": {
|
|
194
|
+
"online_search": online_search,
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
def for_stream():
|
|
199
|
+
streaming_text = "" # Initialize outside try block
|
|
200
|
+
try:
|
|
201
|
+
response = self.session.post(
|
|
202
|
+
self.url,
|
|
203
|
+
json=payload,
|
|
204
|
+
stream=True,
|
|
205
|
+
timeout=self.timeout
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
if response.status_code != 200:
|
|
209
|
+
error_detail = response.text
|
|
210
|
+
try:
|
|
211
|
+
error_json = response.json()
|
|
212
|
+
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
213
|
+
except json.JSONDecodeError:
|
|
214
|
+
pass
|
|
215
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
216
|
+
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Use sanitize_stream to process the SSE stream
|
|
220
|
+
processed_stream = sanitize_stream(
|
|
221
|
+
data=response.iter_content(chunk_size=None),
|
|
222
|
+
intro_value="data:",
|
|
223
|
+
to_json=True,
|
|
224
|
+
skip_markers=["[DONE]"],
|
|
225
|
+
content_extractor=self._twoai_extractor,
|
|
226
|
+
yield_raw_on_error=False
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
for content_chunk in processed_stream:
|
|
230
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
231
|
+
streaming_text += content_chunk
|
|
232
|
+
resp = {"text": content_chunk}
|
|
233
|
+
yield resp if not raw else content_chunk
|
|
234
|
+
|
|
235
|
+
# If stream completes successfully, update history
|
|
236
|
+
self.last_response = {"text": streaming_text}
|
|
237
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
238
|
+
|
|
239
|
+
except CurlError as e:
|
|
240
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
241
|
+
except exceptions.FailedToGenerateResponseError:
|
|
242
|
+
raise # Re-raise specific exception
|
|
243
|
+
except Exception as e:
|
|
244
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
245
|
+
finally:
|
|
246
|
+
# Ensure history is updated even if stream ends abruptly but text was received
|
|
247
|
+
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
248
|
+
self.last_response = {"text": streaming_text}
|
|
249
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def for_non_stream():
|
|
253
|
+
# Non-stream still uses the stream internally and aggregates
|
|
254
|
+
streaming_text = ""
|
|
255
|
+
# We need to consume the generator from for_stream()
|
|
256
|
+
gen = for_stream()
|
|
257
|
+
try:
|
|
258
|
+
for chunk_data in gen:
|
|
259
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
260
|
+
streaming_text += chunk_data["text"]
|
|
261
|
+
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
262
|
+
streaming_text += chunk_data
|
|
263
|
+
except exceptions.FailedToGenerateResponseError:
|
|
264
|
+
# If the underlying stream fails, re-raise the error
|
|
265
|
+
raise
|
|
266
|
+
# self.last_response and history are updated within for_stream's try/finally
|
|
267
|
+
return self.last_response # Return the final aggregated dict
|
|
268
|
+
|
|
269
|
+
# The API uses SSE streaming for all requests, so we always use streaming
|
|
270
|
+
return for_stream()
|
|
271
|
+
|
|
272
|
+
def chat(
|
|
273
|
+
self,
|
|
274
|
+
prompt: str,
|
|
275
|
+
stream: bool = True,
|
|
276
|
+
optimizer: Optional[str] = None,
|
|
277
|
+
conversationally: bool = False,
|
|
278
|
+
**kwargs: Any,
|
|
279
|
+
) -> Generator[str, None, None]:
|
|
280
|
+
raw = kwargs.get("raw", False)
|
|
281
|
+
def stream_generator():
|
|
282
|
+
gen = self.ask(
|
|
283
|
+
prompt,
|
|
284
|
+
stream=True, # API always uses streaming
|
|
285
|
+
raw=raw,
|
|
286
|
+
optimizer=optimizer,
|
|
287
|
+
conversationally=conversationally,
|
|
288
|
+
**kwargs
|
|
289
|
+
)
|
|
290
|
+
for response in gen:
|
|
291
|
+
if raw:
|
|
292
|
+
yield cast(str, response)
|
|
293
|
+
else:
|
|
294
|
+
yield self.get_message(cast(Response, response))
|
|
295
|
+
return stream_generator()
|
|
296
|
+
|
|
297
|
+
def get_message(self, response: Response) -> str:
|
|
298
|
+
if not isinstance(response, dict):
|
|
299
|
+
return str(response)
|
|
300
|
+
response_dict = cast(Dict[str, Any], response)
|
|
301
|
+
return response_dict.get("text", "")
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
if __name__ == "__main__":
|
|
305
|
+
from rich import print
|
|
306
|
+
ai = TwoAI(api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NTc4NTEyMzYsImV4cCI6MTc1Nzg1MjEzNn0.ilTYrHRdN3_cme6VW3knWWfbypY_n_gsUe9DeDhEwrM", model="sutra-v2", temperature=0.7)
|
|
307
|
+
response = ai.chat("Write a poem about AI in the style of Shakespeare.")
|
|
308
|
+
for chunk in response:
|
|
309
|
+
print(chunk, end="", flush=True)
|