webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import cloudscraper
|
|
3
|
+
import json
|
|
4
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class Talkai(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Talkai.info API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2048,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
model: str = "gpt-4o-mini", # Default model
|
|
30
|
+
):
|
|
31
|
+
"""
|
|
32
|
+
Initializes the Talkai.info API with given parameters.
|
|
33
|
+
"""
|
|
34
|
+
self.session = cloudscraper.create_scraper()
|
|
35
|
+
self.is_conversation = is_conversation
|
|
36
|
+
self.max_tokens_to_sample = max_tokens
|
|
37
|
+
self.api_endpoint = "https://talkai.info/chat/send/"
|
|
38
|
+
self.timeout = timeout
|
|
39
|
+
self.last_response = {}
|
|
40
|
+
self.model = model
|
|
41
|
+
self.headers = {
|
|
42
|
+
'Accept': 'application/json, text/event-stream',
|
|
43
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
44
|
+
'Content-Type': 'application/json',
|
|
45
|
+
'Origin': 'https://talkai.info',
|
|
46
|
+
'Referer': 'https://talkai.info/chat/',
|
|
47
|
+
'User-Agent': LitAgent().random(),
|
|
48
|
+
'Cookie': '_csrf-front=e19e203a958c74e439261f6860535403324c9ab2ede76449e6407e54e1f366afa%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22QbnGY7XS5q9i3JnDvi6KRzrOk0D6XFnk%22%3B%7D; _ga=GA1.1.1383924142.1734246140; _ym_uid=1723397035198647017; _ym_d=1734246141; _ym_isad=1; _ym_visorc=b; talkai-front=ngbj23of1t0ujg2raoa3l57vqe; _ga_FB7V9WMN30=GS1.1.1734246139.1.1734246143.0.0.0'
|
|
49
|
+
}
|
|
50
|
+
self.__available_optimizers = (
|
|
51
|
+
method
|
|
52
|
+
for method in dir(Optimizers)
|
|
53
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
54
|
+
)
|
|
55
|
+
Conversation.intro = (
|
|
56
|
+
AwesomePrompts().get_act(
|
|
57
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
58
|
+
)
|
|
59
|
+
if act
|
|
60
|
+
else intro or Conversation.intro
|
|
61
|
+
)
|
|
62
|
+
self.conversation = Conversation(
|
|
63
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
64
|
+
)
|
|
65
|
+
self.conversation.history_offset = history_offset
|
|
66
|
+
self.session.proxies = proxies
|
|
67
|
+
|
|
68
|
+
def ask(
|
|
69
|
+
self,
|
|
70
|
+
prompt: str,
|
|
71
|
+
stream: bool = False,
|
|
72
|
+
raw: bool = False,
|
|
73
|
+
optimizer: str = None,
|
|
74
|
+
conversationally: bool = False,
|
|
75
|
+
) -> Dict[str, Any]:
|
|
76
|
+
"""Chat with Talkai
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
prompt (str): Prompt to be sent.
|
|
80
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
81
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
82
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
83
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
84
|
+
Returns:
|
|
85
|
+
dict: Response dictionary.
|
|
86
|
+
"""
|
|
87
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
88
|
+
if optimizer:
|
|
89
|
+
if optimizer in self.__available_optimizers:
|
|
90
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
91
|
+
else:
|
|
92
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
93
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
payload = {
|
|
97
|
+
"type": "chat",
|
|
98
|
+
"messagesHistory": [
|
|
99
|
+
{
|
|
100
|
+
"id": str(uuid.uuid4()),
|
|
101
|
+
"from": "you",
|
|
102
|
+
"content": conversation_prompt
|
|
103
|
+
}
|
|
104
|
+
],
|
|
105
|
+
"settings": {
|
|
106
|
+
"model": self.model
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
def for_stream():
|
|
111
|
+
try:
|
|
112
|
+
with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
113
|
+
response.raise_for_status()
|
|
114
|
+
|
|
115
|
+
full_response = ""
|
|
116
|
+
for line in response.iter_lines():
|
|
117
|
+
if line:
|
|
118
|
+
decoded_line = line.decode('utf-8')
|
|
119
|
+
if 'event: trylimit' in decoded_line:
|
|
120
|
+
break # Stop if trylimit event is encountered
|
|
121
|
+
if decoded_line.startswith('data:'):
|
|
122
|
+
data = decoded_line[6:] # Remove 'data: ' prefix
|
|
123
|
+
full_response += data
|
|
124
|
+
yield data if raw else dict(text=data)
|
|
125
|
+
|
|
126
|
+
self.last_response.update(dict(text=full_response))
|
|
127
|
+
self.conversation.update_chat_history(
|
|
128
|
+
prompt, self.get_message(self.last_response)
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
except cloudscraper.exceptions as e:
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
133
|
+
|
|
134
|
+
def for_non_stream():
|
|
135
|
+
full_response = ""
|
|
136
|
+
for line in for_stream():
|
|
137
|
+
full_response += line['text'] if not raw else line
|
|
138
|
+
return dict(text=full_response)
|
|
139
|
+
|
|
140
|
+
return for_stream() if stream else for_non_stream()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def chat(
|
|
144
|
+
self,
|
|
145
|
+
prompt: str,
|
|
146
|
+
stream: bool = False,
|
|
147
|
+
optimizer: str = None,
|
|
148
|
+
conversationally: bool = False,
|
|
149
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
150
|
+
"""Generate response `str`
|
|
151
|
+
Args:
|
|
152
|
+
prompt (str): Prompt to be send.
|
|
153
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
154
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
155
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
156
|
+
Returns:
|
|
157
|
+
str: Response generated
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
def for_stream():
|
|
161
|
+
for response in self.ask(
|
|
162
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
163
|
+
):
|
|
164
|
+
yield self.get_message(response)
|
|
165
|
+
|
|
166
|
+
def for_non_stream():
|
|
167
|
+
return self.get_message(
|
|
168
|
+
self.ask(
|
|
169
|
+
prompt,
|
|
170
|
+
False,
|
|
171
|
+
optimizer=optimizer,
|
|
172
|
+
conversationally=conversationally,
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
return for_stream() if stream else for_non_stream()
|
|
177
|
+
|
|
178
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
179
|
+
"""Retrieves message only from response.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
response (dict): Response generated by `self.ask`
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
str: Message extracted.
|
|
186
|
+
"""
|
|
187
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
188
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
189
|
+
|
|
190
|
+
if __name__ == "__main__":
|
|
191
|
+
t = Talkai()
|
|
192
|
+
resp = t.chat("write me about AI", stream=True)
|
|
193
|
+
for chunk in resp:
|
|
194
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
import uuid
|
|
5
|
+
import base64
|
|
6
|
+
import json
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
import time
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Any, Dict, Optional, Generator, Union, List
|
|
12
|
+
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
from webscout.AIutel import Optimizers
|
|
15
|
+
from webscout.AIutel import Conversation
|
|
16
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
17
|
+
from webscout.AIbase import Provider
|
|
18
|
+
|
|
19
|
+
class Toolbaz(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the Toolbaz API. Supports streaming responses.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
AVAILABLE_MODELS = [
|
|
25
|
+
"gemini-2.5-flash",
|
|
26
|
+
"gemini-2.0-flash-thinking",
|
|
27
|
+
"gemini-2.0-flash",
|
|
28
|
+
"gemini-1.5-flash",
|
|
29
|
+
"o3-mini",
|
|
30
|
+
"gpt-4o-latest",
|
|
31
|
+
"gpt-4o",
|
|
32
|
+
"deepseek-r1",
|
|
33
|
+
"Llama-4-Maverick",
|
|
34
|
+
"Llama-4-Scout",
|
|
35
|
+
"Llama-3.3-70B",
|
|
36
|
+
"Qwen2.5-72B",
|
|
37
|
+
"Qwen2-72B",
|
|
38
|
+
"grok-2-1212",
|
|
39
|
+
"grok-3-beta",
|
|
40
|
+
"toolbaz_v3.5_pro",
|
|
41
|
+
"toolbaz_v3",
|
|
42
|
+
"mixtral_8x22b",
|
|
43
|
+
"L3-70B-Euryale-v2.1",
|
|
44
|
+
"midnight-rose",
|
|
45
|
+
"unity",
|
|
46
|
+
"unfiltered_x"
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
is_conversation: bool = True,
|
|
52
|
+
max_tokens: int = 600, # Note: max_tokens is not directly used by the API
|
|
53
|
+
timeout: int = 30,
|
|
54
|
+
intro: str = None,
|
|
55
|
+
filepath: str = None,
|
|
56
|
+
update_file: bool = True,
|
|
57
|
+
proxies: dict = {},
|
|
58
|
+
history_offset: int = 10250,
|
|
59
|
+
act: str = None,
|
|
60
|
+
model: str = "gemini-2.0-flash",
|
|
61
|
+
system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not directly used by the API
|
|
62
|
+
):
|
|
63
|
+
"""
|
|
64
|
+
Initializes the Toolbaz API with given parameters.
|
|
65
|
+
"""
|
|
66
|
+
if model not in self.AVAILABLE_MODELS:
|
|
67
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
68
|
+
|
|
69
|
+
# Initialize curl_cffi Session
|
|
70
|
+
self.session = Session()
|
|
71
|
+
self.is_conversation = is_conversation
|
|
72
|
+
self.max_tokens_to_sample = max_tokens
|
|
73
|
+
self.timeout = timeout
|
|
74
|
+
self.last_response = {}
|
|
75
|
+
self.system_prompt = system_prompt
|
|
76
|
+
self.model = model
|
|
77
|
+
self.proxies = proxies # Store proxies for later use in requests
|
|
78
|
+
|
|
79
|
+
# Set up headers for the curl_cffi session
|
|
80
|
+
self.session.headers.update({
|
|
81
|
+
"user-agent": "Mozilla/5.0 (Linux; Android 10)", # Keep specific user-agent
|
|
82
|
+
"accept": "*/*",
|
|
83
|
+
"accept-language": "en-US",
|
|
84
|
+
"cache-control": "no-cache",
|
|
85
|
+
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
86
|
+
"origin": "https://toolbaz.com",
|
|
87
|
+
"pragma": "no-cache",
|
|
88
|
+
"referer": "https://toolbaz.com/",
|
|
89
|
+
"sec-fetch-mode": "cors"
|
|
90
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
91
|
+
})
|
|
92
|
+
# Assign proxies directly to the session
|
|
93
|
+
self.session.proxies = proxies
|
|
94
|
+
|
|
95
|
+
# Initialize conversation history
|
|
96
|
+
self.__available_optimizers = (
|
|
97
|
+
method
|
|
98
|
+
for method in dir(Optimizers)
|
|
99
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
Conversation.intro = (
|
|
103
|
+
AwesomePrompts().get_act(
|
|
104
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
105
|
+
)
|
|
106
|
+
if act
|
|
107
|
+
else intro or Conversation.intro
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
self.conversation = Conversation(
|
|
111
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
112
|
+
)
|
|
113
|
+
self.conversation.history_offset = history_offset
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def _toolbaz_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
117
|
+
"""Removes [model:...] tags from a string chunk."""
|
|
118
|
+
if isinstance(chunk, str):
|
|
119
|
+
return re.sub(r"\[model:.*?\]", "", chunk)
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
def random_string(self, length):
|
|
123
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
124
|
+
|
|
125
|
+
def generate_token(self):
|
|
126
|
+
payload = {
|
|
127
|
+
"bR6wF": {
|
|
128
|
+
"nV5kP": "Mozilla/5.0 (Linux; Android 10)",
|
|
129
|
+
"lQ9jX": "en-US",
|
|
130
|
+
"sD2zR": "431x958",
|
|
131
|
+
"tY4hL": time.tzname[0] if time.tzname else "UTC",
|
|
132
|
+
"pL8mC": "Linux armv81",
|
|
133
|
+
"cQ3vD": datetime.now().year,
|
|
134
|
+
"hK7jN": datetime.now().hour
|
|
135
|
+
},
|
|
136
|
+
"uT4bX": {
|
|
137
|
+
"mM9wZ": [],
|
|
138
|
+
"kP8jY": []
|
|
139
|
+
},
|
|
140
|
+
"tuTcS": int(time.time()),
|
|
141
|
+
"tDfxy": None,
|
|
142
|
+
"RtyJt": str(uuid.uuid4())
|
|
143
|
+
}
|
|
144
|
+
return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
|
|
145
|
+
|
|
146
|
+
def get_auth(self):
|
|
147
|
+
try:
|
|
148
|
+
session_id = self.random_string(36)
|
|
149
|
+
token = self.generate_token()
|
|
150
|
+
data = {
|
|
151
|
+
"session_id": session_id,
|
|
152
|
+
"token": token
|
|
153
|
+
}
|
|
154
|
+
# Use curl_cffi session post WITHOUT impersonate for token request
|
|
155
|
+
resp = self.session.post(
|
|
156
|
+
"https://data.toolbaz.com/token.php",
|
|
157
|
+
data=data
|
|
158
|
+
# Removed impersonate="chrome110" for this specific request
|
|
159
|
+
)
|
|
160
|
+
resp.raise_for_status() # Check for HTTP errors
|
|
161
|
+
result = resp.json()
|
|
162
|
+
if result.get("success"):
|
|
163
|
+
return {"token": result["token"], "session_id": session_id}
|
|
164
|
+
# Raise error if success is not true
|
|
165
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: API response indicates failure. Response: {result}")
|
|
166
|
+
except CurlError as e: # Catch CurlError specifically
|
|
167
|
+
# Raise a specific error indicating CurlError during auth
|
|
168
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to network error (CurlError): {e}") from e
|
|
169
|
+
except json.JSONDecodeError as e:
|
|
170
|
+
# Raise error for JSON decoding issues
|
|
171
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: Could not decode JSON response. Error: {e}. Response text: {getattr(resp, 'text', 'N/A')}") from e
|
|
172
|
+
except Exception as e: # Catch other potential errors (like HTTPError from raise_for_status)
|
|
173
|
+
# Raise a specific error indicating a general failure during auth
|
|
174
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
175
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to an unexpected error ({type(e).__name__}): {e} - {err_text}") from e
|
|
176
|
+
|
|
177
|
+
def ask(
|
|
178
|
+
self,
|
|
179
|
+
prompt: str,
|
|
180
|
+
stream: bool = False,
|
|
181
|
+
raw: bool = False, # Kept for compatibility, but output is always dict/string
|
|
182
|
+
optimizer: Optional[str] = None,
|
|
183
|
+
conversationally: bool = False,
|
|
184
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
185
|
+
"""Sends a prompt to the Toolbaz API and returns the response."""
|
|
186
|
+
if optimizer and optimizer not in self.__available_optimizers:
|
|
187
|
+
raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
188
|
+
|
|
189
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
190
|
+
if optimizer:
|
|
191
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
192
|
+
conversation_prompt if conversationally else prompt
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# get_auth now raises exceptions on failure
|
|
196
|
+
auth = self.get_auth()
|
|
197
|
+
# No need to check if auth is None, as an exception would have been raised
|
|
198
|
+
|
|
199
|
+
data = {
|
|
200
|
+
"text": conversation_prompt,
|
|
201
|
+
"capcha": auth["token"],
|
|
202
|
+
"model": self.model,
|
|
203
|
+
"session_id": auth["session_id"]
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
def for_stream():
|
|
207
|
+
try:
|
|
208
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
209
|
+
resp = self.session.post(
|
|
210
|
+
"https://data.toolbaz.com/writing.php",
|
|
211
|
+
data=data,
|
|
212
|
+
stream=True,
|
|
213
|
+
timeout=self.timeout,
|
|
214
|
+
impersonate="chrome110" # Keep impersonate here
|
|
215
|
+
)
|
|
216
|
+
resp.raise_for_status()
|
|
217
|
+
|
|
218
|
+
streaming_text = ""
|
|
219
|
+
|
|
220
|
+
# Use sanitize_stream with the custom extractor
|
|
221
|
+
# It will decode bytes and yield processed string chunks
|
|
222
|
+
processed_stream = sanitize_stream(
|
|
223
|
+
data=resp.iter_content(chunk_size=None), # Pass byte iterator
|
|
224
|
+
intro_value=None, # No simple prefix
|
|
225
|
+
to_json=False, # Content is text
|
|
226
|
+
content_extractor=self._toolbaz_extractor, # Use the tag remover
|
|
227
|
+
yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
for content_chunk in processed_stream:
|
|
231
|
+
# content_chunk is the string with tags removed
|
|
232
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
233
|
+
streaming_text += content_chunk
|
|
234
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
235
|
+
|
|
236
|
+
self.last_response = {"text": streaming_text}
|
|
237
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
238
|
+
|
|
239
|
+
except CurlError as e: # Catch CurlError
|
|
240
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
241
|
+
except Exception as e: # Catch other exceptions
|
|
242
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error during stream: {str(e)}") from e
|
|
243
|
+
|
|
244
|
+
def for_non_stream():
|
|
245
|
+
try:
|
|
246
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
247
|
+
resp = self.session.post(
|
|
248
|
+
"https://data.toolbaz.com/writing.php",
|
|
249
|
+
data=data,
|
|
250
|
+
timeout=self.timeout,
|
|
251
|
+
impersonate="chrome110" # Keep impersonate here
|
|
252
|
+
)
|
|
253
|
+
resp.raise_for_status()
|
|
254
|
+
|
|
255
|
+
# Use response.text which is already decoded
|
|
256
|
+
text = resp.text
|
|
257
|
+
# Remove [model: ...] tags
|
|
258
|
+
text = re.sub(r"\[model:.*?\]", "", text)
|
|
259
|
+
|
|
260
|
+
self.last_response = {"text": text}
|
|
261
|
+
self.conversation.update_chat_history(prompt, text)
|
|
262
|
+
|
|
263
|
+
return self.last_response
|
|
264
|
+
|
|
265
|
+
except CurlError as e: # Catch CurlError
|
|
266
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
267
|
+
except Exception as e: # Catch other exceptions
|
|
268
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
|
|
269
|
+
|
|
270
|
+
return for_stream() if stream else for_non_stream()
|
|
271
|
+
|
|
272
|
+
def chat(
|
|
273
|
+
self,
|
|
274
|
+
prompt: str,
|
|
275
|
+
stream: bool = False,
|
|
276
|
+
optimizer: Optional[str] = None,
|
|
277
|
+
conversationally: bool = False,
|
|
278
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
279
|
+
"""Generates a response from the Toolbaz API."""
|
|
280
|
+
def for_stream_chat():
|
|
281
|
+
# ask() yields dicts when raw=False
|
|
282
|
+
for response_dict in self.ask(
|
|
283
|
+
prompt,
|
|
284
|
+
stream=True,
|
|
285
|
+
raw=False, # Ensure ask yields dicts
|
|
286
|
+
optimizer=optimizer,
|
|
287
|
+
conversationally=conversationally
|
|
288
|
+
):
|
|
289
|
+
yield self.get_message(response_dict)
|
|
290
|
+
|
|
291
|
+
def for_non_stream_chat():
|
|
292
|
+
# ask() returns a dict when stream=False
|
|
293
|
+
response_dict = self.ask(
|
|
294
|
+
prompt,
|
|
295
|
+
stream=False,
|
|
296
|
+
optimizer=optimizer,
|
|
297
|
+
conversationally=conversationally,
|
|
298
|
+
)
|
|
299
|
+
return self.get_message(response_dict)
|
|
300
|
+
|
|
301
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
302
|
+
|
|
303
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
304
|
+
"""Extract the message from the response.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
response: Response dictionary
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
str: Message extracted
|
|
311
|
+
"""
|
|
312
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
313
|
+
return response.get("text", "")
|
|
314
|
+
|
|
315
|
+
# Example usage
|
|
316
|
+
if __name__ == "__main__":
|
|
317
|
+
# Ensure curl_cffi is installed
|
|
318
|
+
from rich import print # Use rich print if available
|
|
319
|
+
print("-" * 80)
|
|
320
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
321
|
+
print("-" * 80)
|
|
322
|
+
# Test the provider with different models
|
|
323
|
+
for model in Toolbaz.AVAILABLE_MODELS:
|
|
324
|
+
try:
|
|
325
|
+
test_ai = Toolbaz(model=model, timeout=60)
|
|
326
|
+
# Test stream first
|
|
327
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
328
|
+
response_text = ""
|
|
329
|
+
# print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
330
|
+
for chunk in response_stream:
|
|
331
|
+
response_text += chunk
|
|
332
|
+
# Optional: print chunks for visual feedback
|
|
333
|
+
# print(chunk, end="", flush=True)
|
|
334
|
+
|
|
335
|
+
if response_text and len(response_text.strip()) > 0:
|
|
336
|
+
status = "✓"
|
|
337
|
+
# Clean and truncate response
|
|
338
|
+
clean_text = response_text.strip()
|
|
339
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
340
|
+
else:
|
|
341
|
+
status = "✗ (Stream)"
|
|
342
|
+
display_text = "Empty or invalid stream response"
|
|
343
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
344
|
+
|
|
345
|
+
# Optional: Add non-stream test if needed
|
|
346
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
347
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
348
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
349
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
350
|
+
|
|
351
|
+
except Exception as e:
|
|
352
|
+
# Print full error for debugging
|
|
353
|
+
print(f"\r{model:<50} {'✗':<10} Error: {str(e)}")
|