webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,311 +1,311 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
-
|
|
6
|
-
import requests
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent as Lit
|
|
11
|
-
|
|
12
|
-
class TextPollinationsAI(Provider):
|
|
13
|
-
"""
|
|
14
|
-
A class to interact with the Pollinations AI API.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
required_auth = False
|
|
18
|
-
AVAILABLE_MODELS = [
|
|
19
|
-
"deepseek-reasoning",
|
|
20
|
-
"gemini",
|
|
21
|
-
"mistral",
|
|
22
|
-
"nova-fast",
|
|
23
|
-
"openai",
|
|
24
|
-
"openai-audio",
|
|
25
|
-
"openai-fast",
|
|
26
|
-
"openai-reasoning",
|
|
27
|
-
"qwen-coder",
|
|
28
|
-
"roblox-rp",
|
|
29
|
-
"bidara",
|
|
30
|
-
"evil",
|
|
31
|
-
"midijourney",
|
|
32
|
-
"mirexa",
|
|
33
|
-
"rtist",
|
|
34
|
-
"unity",
|
|
35
|
-
]
|
|
36
|
-
_models_url = "https://text.pollinations.ai/models"
|
|
37
|
-
|
|
38
|
-
def __init__(self,
|
|
39
|
-
is_conversation: bool = True,
|
|
40
|
-
max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
|
|
41
|
-
timeout: int = 30,
|
|
42
|
-
intro: str = None,
|
|
43
|
-
filepath: str = None,
|
|
44
|
-
update_file: bool = True,
|
|
45
|
-
proxies: dict = {},
|
|
46
|
-
history_offset: int = 10250,
|
|
47
|
-
act: str = None,
|
|
48
|
-
model: str = "openai-large",
|
|
49
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
50
|
-
):
|
|
51
|
-
"""Initializes the TextPollinationsAI API client."""
|
|
52
|
-
self.session = Session()
|
|
53
|
-
self.is_conversation = is_conversation
|
|
54
|
-
self.max_tokens_to_sample = max_tokens
|
|
55
|
-
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
56
|
-
self.stream_chunk_size = 64
|
|
57
|
-
self.timeout = timeout
|
|
58
|
-
self.last_response = {}
|
|
59
|
-
self.model = model
|
|
60
|
-
self.system_prompt = system_prompt
|
|
61
|
-
|
|
62
|
-
# Validate against the hardcoded list
|
|
63
|
-
if model not in self.AVAILABLE_MODELS:
|
|
64
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
-
|
|
66
|
-
self.headers = {
|
|
67
|
-
'Accept': '*/*',
|
|
68
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
69
|
-
'User-Agent': Lit().random(),
|
|
70
|
-
'Content-Type': 'application/json',
|
|
71
|
-
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
# Update curl_cffi session headers and proxies
|
|
75
|
-
self.session.headers.update(self.headers)
|
|
76
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
77
|
-
|
|
78
|
-
self.__available_optimizers = (
|
|
79
|
-
method for method in dir(Optimizers)
|
|
80
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
Conversation.intro = (
|
|
84
|
-
AwesomePrompts().get_act(
|
|
85
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
86
|
-
)
|
|
87
|
-
if act
|
|
88
|
-
else intro or Conversation.intro
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
self.conversation = Conversation(
|
|
92
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
|
-
)
|
|
94
|
-
self.conversation.history_offset = history_offset
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
def ask(
|
|
98
|
-
self,
|
|
99
|
-
prompt: str,
|
|
100
|
-
stream: bool = False,
|
|
101
|
-
raw: bool = False,
|
|
102
|
-
optimizer: str = None,
|
|
103
|
-
conversationally: bool = False,
|
|
104
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
105
|
-
tool_choice: Optional[Dict[str, Any]] = None,
|
|
106
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
107
|
-
"""Chat with AI"""
|
|
108
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
-
if optimizer:
|
|
110
|
-
if optimizer in self.__available_optimizers:
|
|
111
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
-
conversation_prompt if conversationally else prompt
|
|
113
|
-
)
|
|
114
|
-
else:
|
|
115
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
116
|
-
|
|
117
|
-
payload = {
|
|
118
|
-
"messages": [
|
|
119
|
-
{"role": "system", "content": self.system_prompt},
|
|
120
|
-
{"role": "user", "content": conversation_prompt}
|
|
121
|
-
],
|
|
122
|
-
"model": self.model,
|
|
123
|
-
"stream": stream,
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
# Add function calling parameters if provided
|
|
127
|
-
if tools:
|
|
128
|
-
payload["tools"] = tools
|
|
129
|
-
if tool_choice:
|
|
130
|
-
payload["tool_choice"] = tool_choice
|
|
131
|
-
|
|
132
|
-
def for_stream():
|
|
133
|
-
try:
|
|
134
|
-
response = self.session.post(
|
|
135
|
-
self.api_endpoint,
|
|
136
|
-
json=payload,
|
|
137
|
-
stream=True,
|
|
138
|
-
timeout=self.timeout,
|
|
139
|
-
impersonate="chrome120"
|
|
140
|
-
)
|
|
141
|
-
if not response.ok:
|
|
142
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
|
-
)
|
|
145
|
-
streaming_text = ""
|
|
146
|
-
processed_stream = sanitize_stream(
|
|
147
|
-
data=response.iter_content(chunk_size=None),
|
|
148
|
-
intro_value="data:",
|
|
149
|
-
to_json=True,
|
|
150
|
-
skip_markers=["[DONE]"],
|
|
151
|
-
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
152
|
-
yield_raw_on_error=False,
|
|
153
|
-
raw=raw
|
|
154
|
-
)
|
|
155
|
-
for delta in processed_stream:
|
|
156
|
-
if isinstance(delta, bytes):
|
|
157
|
-
delta = delta.decode('utf-8', errors='ignore')
|
|
158
|
-
if delta is None:
|
|
159
|
-
continue
|
|
160
|
-
if raw:
|
|
161
|
-
# Only yield content or tool_calls as string
|
|
162
|
-
if isinstance(delta, dict):
|
|
163
|
-
if 'content' in delta and delta['content'] is not None:
|
|
164
|
-
content = delta['content']
|
|
165
|
-
streaming_text += content
|
|
166
|
-
yield content
|
|
167
|
-
elif 'tool_calls' in delta:
|
|
168
|
-
tool_calls = delta['tool_calls']
|
|
169
|
-
yield json.dumps(tool_calls)
|
|
170
|
-
elif isinstance(delta, str):
|
|
171
|
-
streaming_text += delta
|
|
172
|
-
yield delta
|
|
173
|
-
else:
|
|
174
|
-
if isinstance(delta, dict):
|
|
175
|
-
if 'content' in delta and delta['content'] is not None:
|
|
176
|
-
content = delta['content']
|
|
177
|
-
streaming_text += content
|
|
178
|
-
yield dict(text=content)
|
|
179
|
-
elif 'tool_calls' in delta:
|
|
180
|
-
tool_calls = delta['tool_calls']
|
|
181
|
-
yield dict(tool_calls=tool_calls)
|
|
182
|
-
self.last_response.update(dict(text=streaming_text))
|
|
183
|
-
if streaming_text:
|
|
184
|
-
self.conversation.update_chat_history(
|
|
185
|
-
prompt, streaming_text
|
|
186
|
-
)
|
|
187
|
-
except CurlError as e:
|
|
188
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
189
|
-
except Exception as e:
|
|
190
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
191
|
-
def for_non_stream():
|
|
192
|
-
final_content = ""
|
|
193
|
-
tool_calls_aggregated = None
|
|
194
|
-
try:
|
|
195
|
-
for chunk_data in for_stream():
|
|
196
|
-
if raw:
|
|
197
|
-
if isinstance(chunk_data, str):
|
|
198
|
-
final_content += chunk_data
|
|
199
|
-
elif isinstance(chunk_data, bytes):
|
|
200
|
-
final_content += chunk_data.decode('utf-8', errors='ignore')
|
|
201
|
-
elif isinstance(chunk_data, list):
|
|
202
|
-
if tool_calls_aggregated is None:
|
|
203
|
-
tool_calls_aggregated = []
|
|
204
|
-
tool_calls_aggregated.extend(chunk_data)
|
|
205
|
-
else:
|
|
206
|
-
if isinstance(chunk_data, dict):
|
|
207
|
-
if "text" in chunk_data:
|
|
208
|
-
final_content += chunk_data["text"]
|
|
209
|
-
elif "tool_calls" in chunk_data:
|
|
210
|
-
if tool_calls_aggregated is None:
|
|
211
|
-
tool_calls_aggregated = []
|
|
212
|
-
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
213
|
-
elif isinstance(chunk_data, str):
|
|
214
|
-
final_content += chunk_data
|
|
215
|
-
except Exception as e:
|
|
216
|
-
if not final_content and not tool_calls_aggregated:
|
|
217
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
218
|
-
result = {}
|
|
219
|
-
if final_content:
|
|
220
|
-
result["text"] = final_content
|
|
221
|
-
if tool_calls_aggregated:
|
|
222
|
-
result["tool_calls"] = tool_calls_aggregated
|
|
223
|
-
self.last_response = result
|
|
224
|
-
return self.last_response if not raw else (final_content if final_content else json.dumps(tool_calls_aggregated) if tool_calls_aggregated else "")
|
|
225
|
-
return for_stream() if stream else for_non_stream()
|
|
226
|
-
|
|
227
|
-
def chat(
|
|
228
|
-
self,
|
|
229
|
-
prompt: str,
|
|
230
|
-
stream: bool = False,
|
|
231
|
-
optimizer: str = None,
|
|
232
|
-
conversationally: bool = False,
|
|
233
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
234
|
-
tool_choice: Optional[Dict[str, Any]] = None,
|
|
235
|
-
raw: bool = False, # Added raw parameter
|
|
236
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
237
|
-
"""Generate response as a string"""
|
|
238
|
-
def for_stream():
|
|
239
|
-
for response in self.ask(
|
|
240
|
-
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally,
|
|
241
|
-
tools=tools, tool_choice=tool_choice
|
|
242
|
-
):
|
|
243
|
-
if raw:
|
|
244
|
-
yield response
|
|
245
|
-
else:
|
|
246
|
-
yield self.get_message(response)
|
|
247
|
-
def for_non_stream():
|
|
248
|
-
result = self.ask(
|
|
249
|
-
prompt,
|
|
250
|
-
False,
|
|
251
|
-
raw=raw,
|
|
252
|
-
optimizer=optimizer,
|
|
253
|
-
conversationally=conversationally,
|
|
254
|
-
tools=tools,
|
|
255
|
-
tool_choice=tool_choice,
|
|
256
|
-
)
|
|
257
|
-
if raw:
|
|
258
|
-
return result if isinstance(result, str) else (result.get("text", "") if isinstance(result, dict) else str(result))
|
|
259
|
-
else:
|
|
260
|
-
return self.get_message(result)
|
|
261
|
-
return for_stream() if stream else for_non_stream()
|
|
262
|
-
|
|
263
|
-
def get_message(self, response: dict) -> str:
|
|
264
|
-
"""Retrieves message only from response"""
|
|
265
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
266
|
-
if "text" in response:
|
|
267
|
-
return response["text"]
|
|
268
|
-
elif "tool_calls" in response:
|
|
269
|
-
# For tool calls, return a string representation
|
|
270
|
-
return json.dumps(response["tool_calls"])
|
|
271
|
-
return "" # Return empty string if neither text nor tool_calls found
|
|
272
|
-
|
|
273
|
-
if __name__ == "__main__":
|
|
274
|
-
# Ensure curl_cffi is installed
|
|
275
|
-
print("-" * 80)
|
|
276
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
277
|
-
print("-" * 80)
|
|
278
|
-
|
|
279
|
-
# Test all available models
|
|
280
|
-
working = 0
|
|
281
|
-
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
285
|
-
try:
|
|
286
|
-
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
287
|
-
# Test stream first
|
|
288
|
-
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
289
|
-
response_text = ""
|
|
290
|
-
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
291
|
-
for chunk in response_stream:
|
|
292
|
-
response_text += chunk
|
|
293
|
-
|
|
294
|
-
if response_text and len(response_text.strip()) > 0:
|
|
295
|
-
status = "✓"
|
|
296
|
-
# Clean and truncate response
|
|
297
|
-
clean_text = response_text.strip()
|
|
298
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
299
|
-
else:
|
|
300
|
-
status = "✗ (Stream)"
|
|
301
|
-
display_text = "Empty or invalid stream response"
|
|
302
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
303
|
-
|
|
304
|
-
# Optional: Add non-stream test if needed
|
|
305
|
-
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
306
|
-
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
307
|
-
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
308
|
-
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
309
|
-
|
|
310
|
-
except Exception as e:
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent as Lit
|
|
11
|
+
|
|
12
|
+
class TextPollinationsAI(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Pollinations AI API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
required_auth = False
|
|
18
|
+
AVAILABLE_MODELS = [
|
|
19
|
+
"deepseek-reasoning",
|
|
20
|
+
"gemini",
|
|
21
|
+
"mistral",
|
|
22
|
+
"nova-fast",
|
|
23
|
+
"openai",
|
|
24
|
+
"openai-audio",
|
|
25
|
+
"openai-fast",
|
|
26
|
+
"openai-reasoning",
|
|
27
|
+
"qwen-coder",
|
|
28
|
+
"roblox-rp",
|
|
29
|
+
"bidara",
|
|
30
|
+
"evil",
|
|
31
|
+
"midijourney",
|
|
32
|
+
"mirexa",
|
|
33
|
+
"rtist",
|
|
34
|
+
"unity",
|
|
35
|
+
]
|
|
36
|
+
_models_url = "https://text.pollinations.ai/models"
|
|
37
|
+
|
|
38
|
+
def __init__(self,
|
|
39
|
+
is_conversation: bool = True,
|
|
40
|
+
max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
|
|
41
|
+
timeout: int = 30,
|
|
42
|
+
intro: str = None,
|
|
43
|
+
filepath: str = None,
|
|
44
|
+
update_file: bool = True,
|
|
45
|
+
proxies: dict = {},
|
|
46
|
+
history_offset: int = 10250,
|
|
47
|
+
act: str = None,
|
|
48
|
+
model: str = "openai-large",
|
|
49
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
50
|
+
):
|
|
51
|
+
"""Initializes the TextPollinationsAI API client."""
|
|
52
|
+
self.session = Session()
|
|
53
|
+
self.is_conversation = is_conversation
|
|
54
|
+
self.max_tokens_to_sample = max_tokens
|
|
55
|
+
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
56
|
+
self.stream_chunk_size = 64
|
|
57
|
+
self.timeout = timeout
|
|
58
|
+
self.last_response = {}
|
|
59
|
+
self.model = model
|
|
60
|
+
self.system_prompt = system_prompt
|
|
61
|
+
|
|
62
|
+
# Validate against the hardcoded list
|
|
63
|
+
if model not in self.AVAILABLE_MODELS:
|
|
64
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
+
|
|
66
|
+
self.headers = {
|
|
67
|
+
'Accept': '*/*',
|
|
68
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
69
|
+
'User-Agent': Lit().random(),
|
|
70
|
+
'Content-Type': 'application/json',
|
|
71
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Update curl_cffi session headers and proxies
|
|
75
|
+
self.session.headers.update(self.headers)
|
|
76
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
77
|
+
|
|
78
|
+
self.__available_optimizers = (
|
|
79
|
+
method for method in dir(Optimizers)
|
|
80
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
Conversation.intro = (
|
|
84
|
+
AwesomePrompts().get_act(
|
|
85
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
86
|
+
)
|
|
87
|
+
if act
|
|
88
|
+
else intro or Conversation.intro
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
self.conversation = Conversation(
|
|
92
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def ask(
|
|
98
|
+
self,
|
|
99
|
+
prompt: str,
|
|
100
|
+
stream: bool = False,
|
|
101
|
+
raw: bool = False,
|
|
102
|
+
optimizer: str = None,
|
|
103
|
+
conversationally: bool = False,
|
|
104
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
105
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
|
106
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
107
|
+
"""Chat with AI"""
|
|
108
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
+
if optimizer:
|
|
110
|
+
if optimizer in self.__available_optimizers:
|
|
111
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
+
conversation_prompt if conversationally else prompt
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
116
|
+
|
|
117
|
+
payload = {
|
|
118
|
+
"messages": [
|
|
119
|
+
{"role": "system", "content": self.system_prompt},
|
|
120
|
+
{"role": "user", "content": conversation_prompt}
|
|
121
|
+
],
|
|
122
|
+
"model": self.model,
|
|
123
|
+
"stream": stream,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
# Add function calling parameters if provided
|
|
127
|
+
if tools:
|
|
128
|
+
payload["tools"] = tools
|
|
129
|
+
if tool_choice:
|
|
130
|
+
payload["tool_choice"] = tool_choice
|
|
131
|
+
|
|
132
|
+
def for_stream():
|
|
133
|
+
try:
|
|
134
|
+
response = self.session.post(
|
|
135
|
+
self.api_endpoint,
|
|
136
|
+
json=payload,
|
|
137
|
+
stream=True,
|
|
138
|
+
timeout=self.timeout,
|
|
139
|
+
impersonate="chrome120"
|
|
140
|
+
)
|
|
141
|
+
if not response.ok:
|
|
142
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
|
+
)
|
|
145
|
+
streaming_text = ""
|
|
146
|
+
processed_stream = sanitize_stream(
|
|
147
|
+
data=response.iter_content(chunk_size=None),
|
|
148
|
+
intro_value="data:",
|
|
149
|
+
to_json=True,
|
|
150
|
+
skip_markers=["[DONE]"],
|
|
151
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
152
|
+
yield_raw_on_error=False,
|
|
153
|
+
raw=raw
|
|
154
|
+
)
|
|
155
|
+
for delta in processed_stream:
|
|
156
|
+
if isinstance(delta, bytes):
|
|
157
|
+
delta = delta.decode('utf-8', errors='ignore')
|
|
158
|
+
if delta is None:
|
|
159
|
+
continue
|
|
160
|
+
if raw:
|
|
161
|
+
# Only yield content or tool_calls as string
|
|
162
|
+
if isinstance(delta, dict):
|
|
163
|
+
if 'content' in delta and delta['content'] is not None:
|
|
164
|
+
content = delta['content']
|
|
165
|
+
streaming_text += content
|
|
166
|
+
yield content
|
|
167
|
+
elif 'tool_calls' in delta:
|
|
168
|
+
tool_calls = delta['tool_calls']
|
|
169
|
+
yield json.dumps(tool_calls)
|
|
170
|
+
elif isinstance(delta, str):
|
|
171
|
+
streaming_text += delta
|
|
172
|
+
yield delta
|
|
173
|
+
else:
|
|
174
|
+
if isinstance(delta, dict):
|
|
175
|
+
if 'content' in delta and delta['content'] is not None:
|
|
176
|
+
content = delta['content']
|
|
177
|
+
streaming_text += content
|
|
178
|
+
yield dict(text=content)
|
|
179
|
+
elif 'tool_calls' in delta:
|
|
180
|
+
tool_calls = delta['tool_calls']
|
|
181
|
+
yield dict(tool_calls=tool_calls)
|
|
182
|
+
self.last_response.update(dict(text=streaming_text))
|
|
183
|
+
if streaming_text:
|
|
184
|
+
self.conversation.update_chat_history(
|
|
185
|
+
prompt, streaming_text
|
|
186
|
+
)
|
|
187
|
+
except CurlError as e:
|
|
188
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
189
|
+
except Exception as e:
|
|
190
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
191
|
+
def for_non_stream():
|
|
192
|
+
final_content = ""
|
|
193
|
+
tool_calls_aggregated = None
|
|
194
|
+
try:
|
|
195
|
+
for chunk_data in for_stream():
|
|
196
|
+
if raw:
|
|
197
|
+
if isinstance(chunk_data, str):
|
|
198
|
+
final_content += chunk_data
|
|
199
|
+
elif isinstance(chunk_data, bytes):
|
|
200
|
+
final_content += chunk_data.decode('utf-8', errors='ignore')
|
|
201
|
+
elif isinstance(chunk_data, list):
|
|
202
|
+
if tool_calls_aggregated is None:
|
|
203
|
+
tool_calls_aggregated = []
|
|
204
|
+
tool_calls_aggregated.extend(chunk_data)
|
|
205
|
+
else:
|
|
206
|
+
if isinstance(chunk_data, dict):
|
|
207
|
+
if "text" in chunk_data:
|
|
208
|
+
final_content += chunk_data["text"]
|
|
209
|
+
elif "tool_calls" in chunk_data:
|
|
210
|
+
if tool_calls_aggregated is None:
|
|
211
|
+
tool_calls_aggregated = []
|
|
212
|
+
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
213
|
+
elif isinstance(chunk_data, str):
|
|
214
|
+
final_content += chunk_data
|
|
215
|
+
except Exception as e:
|
|
216
|
+
if not final_content and not tool_calls_aggregated:
|
|
217
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
218
|
+
result = {}
|
|
219
|
+
if final_content:
|
|
220
|
+
result["text"] = final_content
|
|
221
|
+
if tool_calls_aggregated:
|
|
222
|
+
result["tool_calls"] = tool_calls_aggregated
|
|
223
|
+
self.last_response = result
|
|
224
|
+
return self.last_response if not raw else (final_content if final_content else json.dumps(tool_calls_aggregated) if tool_calls_aggregated else "")
|
|
225
|
+
return for_stream() if stream else for_non_stream()
|
|
226
|
+
|
|
227
|
+
def chat(
|
|
228
|
+
self,
|
|
229
|
+
prompt: str,
|
|
230
|
+
stream: bool = False,
|
|
231
|
+
optimizer: str = None,
|
|
232
|
+
conversationally: bool = False,
|
|
233
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
234
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
|
235
|
+
raw: bool = False, # Added raw parameter
|
|
236
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
237
|
+
"""Generate response as a string"""
|
|
238
|
+
def for_stream():
|
|
239
|
+
for response in self.ask(
|
|
240
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally,
|
|
241
|
+
tools=tools, tool_choice=tool_choice
|
|
242
|
+
):
|
|
243
|
+
if raw:
|
|
244
|
+
yield response
|
|
245
|
+
else:
|
|
246
|
+
yield self.get_message(response)
|
|
247
|
+
def for_non_stream():
|
|
248
|
+
result = self.ask(
|
|
249
|
+
prompt,
|
|
250
|
+
False,
|
|
251
|
+
raw=raw,
|
|
252
|
+
optimizer=optimizer,
|
|
253
|
+
conversationally=conversationally,
|
|
254
|
+
tools=tools,
|
|
255
|
+
tool_choice=tool_choice,
|
|
256
|
+
)
|
|
257
|
+
if raw:
|
|
258
|
+
return result if isinstance(result, str) else (result.get("text", "") if isinstance(result, dict) else str(result))
|
|
259
|
+
else:
|
|
260
|
+
return self.get_message(result)
|
|
261
|
+
return for_stream() if stream else for_non_stream()
|
|
262
|
+
|
|
263
|
+
def get_message(self, response: dict) -> str:
|
|
264
|
+
"""Retrieves message only from response"""
|
|
265
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
266
|
+
if "text" in response:
|
|
267
|
+
return response["text"]
|
|
268
|
+
elif "tool_calls" in response:
|
|
269
|
+
# For tool calls, return a string representation
|
|
270
|
+
return json.dumps(response["tool_calls"])
|
|
271
|
+
return "" # Return empty string if neither text nor tool_calls found
|
|
272
|
+
|
|
273
|
+
if __name__ == "__main__":
|
|
274
|
+
# Ensure curl_cffi is installed
|
|
275
|
+
print("-" * 80)
|
|
276
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
277
|
+
print("-" * 80)
|
|
278
|
+
|
|
279
|
+
# Test all available models
|
|
280
|
+
working = 0
|
|
281
|
+
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
285
|
+
try:
|
|
286
|
+
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
287
|
+
# Test stream first
|
|
288
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
289
|
+
response_text = ""
|
|
290
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
291
|
+
for chunk in response_stream:
|
|
292
|
+
response_text += chunk
|
|
293
|
+
|
|
294
|
+
if response_text and len(response_text.strip()) > 0:
|
|
295
|
+
status = "✓"
|
|
296
|
+
# Clean and truncate response
|
|
297
|
+
clean_text = response_text.strip()
|
|
298
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
299
|
+
else:
|
|
300
|
+
status = "✗ (Stream)"
|
|
301
|
+
display_text = "Empty or invalid stream response"
|
|
302
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
303
|
+
|
|
304
|
+
# Optional: Add non-stream test if needed
|
|
305
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
306
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
307
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
308
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
309
|
+
|
|
310
|
+
except Exception as e:
|
|
311
311
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|