webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -1,308 +1,308 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
-
|
|
6
|
-
import requests
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent as Lit
|
|
11
|
-
|
|
12
|
-
class TextPollinationsAI(Provider):
|
|
13
|
-
"""
|
|
14
|
-
A class to interact with the Pollinations AI API.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
AVAILABLE_MODELS = [
|
|
18
|
-
"openai",
|
|
19
|
-
"openai-fast",
|
|
20
|
-
"openai-large",
|
|
21
|
-
"openai-roblox",
|
|
22
|
-
"qwen-coder",
|
|
23
|
-
"llama",
|
|
24
|
-
"llamascout",
|
|
25
|
-
"mistral",
|
|
26
|
-
"unity",
|
|
27
|
-
"mirexa",
|
|
28
|
-
"midijourney",
|
|
29
|
-
"rtist",
|
|
30
|
-
"searchgpt",
|
|
31
|
-
"evil",
|
|
32
|
-
"deepseek-reasoning",
|
|
33
|
-
"phi",
|
|
34
|
-
"hormoz",
|
|
35
|
-
"hypnosis-tracy",
|
|
36
|
-
"deepseek",
|
|
37
|
-
"sur",
|
|
38
|
-
"bidara",
|
|
39
|
-
"openai-audio",
|
|
40
|
-
]
|
|
41
|
-
_models_url = "https://text.pollinations.ai/models"
|
|
42
|
-
|
|
43
|
-
def __init__(self,
|
|
44
|
-
is_conversation: bool = True,
|
|
45
|
-
max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
|
|
46
|
-
timeout: int = 30,
|
|
47
|
-
intro: str = None,
|
|
48
|
-
filepath: str = None,
|
|
49
|
-
update_file: bool = True,
|
|
50
|
-
proxies: dict = {},
|
|
51
|
-
history_offset: int = 10250,
|
|
52
|
-
act: str = None,
|
|
53
|
-
model: str = "openai-large",
|
|
54
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
55
|
-
):
|
|
56
|
-
"""Initializes the TextPollinationsAI API client."""
|
|
57
|
-
self.session = Session()
|
|
58
|
-
self.is_conversation = is_conversation
|
|
59
|
-
self.max_tokens_to_sample = max_tokens
|
|
60
|
-
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
61
|
-
self.stream_chunk_size = 64
|
|
62
|
-
self.timeout = timeout
|
|
63
|
-
self.last_response = {}
|
|
64
|
-
self.model = model
|
|
65
|
-
self.system_prompt = system_prompt
|
|
66
|
-
|
|
67
|
-
# Validate against the hardcoded list
|
|
68
|
-
if model not in self.AVAILABLE_MODELS:
|
|
69
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
70
|
-
|
|
71
|
-
self.headers = {
|
|
72
|
-
'Accept': '*/*',
|
|
73
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
74
|
-
'User-Agent': Lit().random(),
|
|
75
|
-
'Content-Type': 'application/json',
|
|
76
|
-
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
# Update curl_cffi session headers and proxies
|
|
80
|
-
self.session.headers.update(self.headers)
|
|
81
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
82
|
-
|
|
83
|
-
self.__available_optimizers = (
|
|
84
|
-
method for method in dir(Optimizers)
|
|
85
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
Conversation.intro = (
|
|
89
|
-
AwesomePrompts().get_act(
|
|
90
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
91
|
-
)
|
|
92
|
-
if act
|
|
93
|
-
else intro or Conversation.intro
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
self.conversation = Conversation(
|
|
97
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
-
)
|
|
99
|
-
self.conversation.history_offset = history_offset
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def ask(
|
|
103
|
-
self,
|
|
104
|
-
prompt: str,
|
|
105
|
-
stream: bool = False,
|
|
106
|
-
raw: bool = False,
|
|
107
|
-
optimizer: str = None,
|
|
108
|
-
conversationally: bool = False,
|
|
109
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
110
|
-
tool_choice: Optional[Dict[str, Any]] = None,
|
|
111
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
112
|
-
"""Chat with AI"""
|
|
113
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
-
if optimizer:
|
|
115
|
-
if optimizer in self.__available_optimizers:
|
|
116
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
-
conversation_prompt if conversationally else prompt
|
|
118
|
-
)
|
|
119
|
-
else:
|
|
120
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
121
|
-
|
|
122
|
-
payload = {
|
|
123
|
-
"messages": [
|
|
124
|
-
{"role": "system", "content": self.system_prompt},
|
|
125
|
-
{"role": "user", "content": conversation_prompt}
|
|
126
|
-
],
|
|
127
|
-
"model": self.model,
|
|
128
|
-
"stream": stream,
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
# Add function calling parameters if provided
|
|
132
|
-
if tools:
|
|
133
|
-
payload["tools"] = tools
|
|
134
|
-
if tool_choice:
|
|
135
|
-
payload["tool_choice"] = tool_choice
|
|
136
|
-
|
|
137
|
-
def for_stream():
|
|
138
|
-
try: # Add try block for CurlError
|
|
139
|
-
# Use curl_cffi session post with impersonate
|
|
140
|
-
response = self.session.post(
|
|
141
|
-
self.api_endpoint,
|
|
142
|
-
# headers are set on the session
|
|
143
|
-
json=payload,
|
|
144
|
-
stream=True,
|
|
145
|
-
timeout=self.timeout,
|
|
146
|
-
impersonate="chrome120" # Add impersonate
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
if not response.ok:
|
|
150
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
151
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
streaming_text = ""
|
|
155
|
-
# Use sanitize_stream
|
|
156
|
-
processed_stream = sanitize_stream(
|
|
157
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
158
|
-
intro_value="data:",
|
|
159
|
-
to_json=True, # Stream sends JSON
|
|
160
|
-
skip_markers=["[DONE]"],
|
|
161
|
-
# Extractor handles both content and tool_calls
|
|
162
|
-
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
163
|
-
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
for delta in processed_stream:
|
|
167
|
-
# delta is the extracted 'delta' object or None
|
|
168
|
-
if delta and isinstance(delta, dict):
|
|
169
|
-
if 'content' in delta and delta['content'] is not None:
|
|
170
|
-
content = delta['content']
|
|
171
|
-
streaming_text += content
|
|
172
|
-
yield content if raw else dict(text=content)
|
|
173
|
-
elif 'tool_calls' in delta:
|
|
174
|
-
tool_calls = delta['tool_calls']
|
|
175
|
-
yield tool_calls if raw else dict(tool_calls=tool_calls)
|
|
176
|
-
|
|
177
|
-
# Update history and last response after stream finishes
|
|
178
|
-
self.last_response.update(dict(text=streaming_text)) # Store aggregated text
|
|
179
|
-
if streaming_text: # Only update history if text was received
|
|
180
|
-
self.conversation.update_chat_history(
|
|
181
|
-
prompt, streaming_text # Use the fully aggregated text
|
|
182
|
-
)
|
|
183
|
-
except CurlError as e: # Catch CurlError
|
|
184
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
185
|
-
except Exception as e: # Catch other potential exceptions
|
|
186
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
def for_non_stream():
|
|
190
|
-
# Aggregate the stream using the updated for_stream logic
|
|
191
|
-
final_content = ""
|
|
192
|
-
tool_calls_aggregated = None # To store potential tool calls
|
|
193
|
-
try: # Add try block for potential errors during aggregation
|
|
194
|
-
for chunk_data in for_stream():
|
|
195
|
-
if isinstance(chunk_data, dict):
|
|
196
|
-
if "text" in chunk_data:
|
|
197
|
-
final_content += chunk_data["text"]
|
|
198
|
-
elif "tool_calls" in chunk_data:
|
|
199
|
-
# Aggregate tool calls (simple aggregation, might need refinement)
|
|
200
|
-
if tool_calls_aggregated is None:
|
|
201
|
-
tool_calls_aggregated = []
|
|
202
|
-
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
203
|
-
elif isinstance(chunk_data, str): # Handle raw stream case
|
|
204
|
-
final_content += chunk_data
|
|
205
|
-
# Handle raw tool calls list if raw=True
|
|
206
|
-
elif isinstance(chunk_data, list) and raw:
|
|
207
|
-
if tool_calls_aggregated is None:
|
|
208
|
-
tool_calls_aggregated = []
|
|
209
|
-
tool_calls_aggregated.extend(chunk_data)
|
|
210
|
-
except Exception as e:
|
|
211
|
-
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
212
|
-
if not final_content and not tool_calls_aggregated:
|
|
213
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
# last_response and history are updated within for_stream (for text)
|
|
217
|
-
# Return a dict containing text and/or tool_calls
|
|
218
|
-
result = {}
|
|
219
|
-
if final_content:
|
|
220
|
-
result["text"] = final_content
|
|
221
|
-
if tool_calls_aggregated:
|
|
222
|
-
result["tool_calls"] = tool_calls_aggregated
|
|
223
|
-
self.last_response = result # Update last_response with aggregated result
|
|
224
|
-
return self.last_response
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
return for_stream() if stream else for_non_stream()
|
|
228
|
-
|
|
229
|
-
def chat(
|
|
230
|
-
self,
|
|
231
|
-
prompt: str,
|
|
232
|
-
stream: bool = False,
|
|
233
|
-
optimizer: str = None,
|
|
234
|
-
conversationally: bool = False,
|
|
235
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
236
|
-
tool_choice: Optional[Dict[str, Any]] = None,
|
|
237
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
238
|
-
"""Generate response as a string"""
|
|
239
|
-
def for_stream():
|
|
240
|
-
for response in self.ask(
|
|
241
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally,
|
|
242
|
-
tools=tools, tool_choice=tool_choice
|
|
243
|
-
):
|
|
244
|
-
yield self.get_message(response)
|
|
245
|
-
|
|
246
|
-
def for_non_stream():
|
|
247
|
-
return self.get_message(
|
|
248
|
-
self.ask(
|
|
249
|
-
prompt,
|
|
250
|
-
False,
|
|
251
|
-
optimizer=optimizer,
|
|
252
|
-
conversationally=conversationally,
|
|
253
|
-
tools=tools,
|
|
254
|
-
tool_choice=tool_choice,
|
|
255
|
-
)
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
return for_stream() if stream else for_non_stream()
|
|
259
|
-
|
|
260
|
-
def get_message(self, response: dict) -> str:
|
|
261
|
-
"""Retrieves message only from response"""
|
|
262
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
263
|
-
if "text" in response:
|
|
264
|
-
return response["text"]
|
|
265
|
-
elif "tool_calls" in response:
|
|
266
|
-
# For tool calls, return a string representation
|
|
267
|
-
return json.dumps(response["tool_calls"])
|
|
268
|
-
return "" # Return empty string if neither text nor tool_calls found
|
|
269
|
-
|
|
270
|
-
if __name__ == "__main__":
|
|
271
|
-
# Ensure curl_cffi is installed
|
|
272
|
-
print("-" * 80)
|
|
273
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
274
|
-
print("-" * 80)
|
|
275
|
-
|
|
276
|
-
# Test all available models
|
|
277
|
-
working = 0
|
|
278
|
-
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
282
|
-
try:
|
|
283
|
-
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
284
|
-
# Test stream first
|
|
285
|
-
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
286
|
-
response_text = ""
|
|
287
|
-
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
288
|
-
for chunk in response_stream:
|
|
289
|
-
response_text += chunk
|
|
290
|
-
|
|
291
|
-
if response_text and len(response_text.strip()) > 0:
|
|
292
|
-
status = "✓"
|
|
293
|
-
# Clean and truncate response
|
|
294
|
-
clean_text = response_text.strip()
|
|
295
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
296
|
-
else:
|
|
297
|
-
status = "✗ (Stream)"
|
|
298
|
-
display_text = "Empty or invalid stream response"
|
|
299
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
300
|
-
|
|
301
|
-
# Optional: Add non-stream test if needed
|
|
302
|
-
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
303
|
-
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
304
|
-
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
305
|
-
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
306
|
-
|
|
307
|
-
except Exception as e:
|
|
308
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent as Lit
|
|
11
|
+
|
|
12
|
+
class TextPollinationsAI(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Pollinations AI API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
AVAILABLE_MODELS = [
|
|
18
|
+
"openai",
|
|
19
|
+
"openai-fast",
|
|
20
|
+
"openai-large",
|
|
21
|
+
"openai-roblox",
|
|
22
|
+
"qwen-coder",
|
|
23
|
+
"llama",
|
|
24
|
+
"llamascout",
|
|
25
|
+
"mistral",
|
|
26
|
+
"unity",
|
|
27
|
+
"mirexa",
|
|
28
|
+
"midijourney",
|
|
29
|
+
"rtist",
|
|
30
|
+
"searchgpt",
|
|
31
|
+
"evil",
|
|
32
|
+
"deepseek-reasoning",
|
|
33
|
+
"phi",
|
|
34
|
+
"hormoz",
|
|
35
|
+
"hypnosis-tracy",
|
|
36
|
+
"deepseek",
|
|
37
|
+
"sur",
|
|
38
|
+
"bidara",
|
|
39
|
+
"openai-audio",
|
|
40
|
+
]
|
|
41
|
+
_models_url = "https://text.pollinations.ai/models"
|
|
42
|
+
|
|
43
|
+
def __init__(self,
|
|
44
|
+
is_conversation: bool = True,
|
|
45
|
+
max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
|
|
46
|
+
timeout: int = 30,
|
|
47
|
+
intro: str = None,
|
|
48
|
+
filepath: str = None,
|
|
49
|
+
update_file: bool = True,
|
|
50
|
+
proxies: dict = {},
|
|
51
|
+
history_offset: int = 10250,
|
|
52
|
+
act: str = None,
|
|
53
|
+
model: str = "openai-large",
|
|
54
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
55
|
+
):
|
|
56
|
+
"""Initializes the TextPollinationsAI API client."""
|
|
57
|
+
self.session = Session()
|
|
58
|
+
self.is_conversation = is_conversation
|
|
59
|
+
self.max_tokens_to_sample = max_tokens
|
|
60
|
+
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
61
|
+
self.stream_chunk_size = 64
|
|
62
|
+
self.timeout = timeout
|
|
63
|
+
self.last_response = {}
|
|
64
|
+
self.model = model
|
|
65
|
+
self.system_prompt = system_prompt
|
|
66
|
+
|
|
67
|
+
# Validate against the hardcoded list
|
|
68
|
+
if model not in self.AVAILABLE_MODELS:
|
|
69
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
70
|
+
|
|
71
|
+
self.headers = {
|
|
72
|
+
'Accept': '*/*',
|
|
73
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
74
|
+
'User-Agent': Lit().random(),
|
|
75
|
+
'Content-Type': 'application/json',
|
|
76
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# Update curl_cffi session headers and proxies
|
|
80
|
+
self.session.headers.update(self.headers)
|
|
81
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
82
|
+
|
|
83
|
+
self.__available_optimizers = (
|
|
84
|
+
method for method in dir(Optimizers)
|
|
85
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
Conversation.intro = (
|
|
89
|
+
AwesomePrompts().get_act(
|
|
90
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
91
|
+
)
|
|
92
|
+
if act
|
|
93
|
+
else intro or Conversation.intro
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
self.conversation = Conversation(
|
|
97
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
+
)
|
|
99
|
+
self.conversation.history_offset = history_offset
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def ask(
|
|
103
|
+
self,
|
|
104
|
+
prompt: str,
|
|
105
|
+
stream: bool = False,
|
|
106
|
+
raw: bool = False,
|
|
107
|
+
optimizer: str = None,
|
|
108
|
+
conversationally: bool = False,
|
|
109
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
110
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
|
111
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
112
|
+
"""Chat with AI"""
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
121
|
+
|
|
122
|
+
payload = {
|
|
123
|
+
"messages": [
|
|
124
|
+
{"role": "system", "content": self.system_prompt},
|
|
125
|
+
{"role": "user", "content": conversation_prompt}
|
|
126
|
+
],
|
|
127
|
+
"model": self.model,
|
|
128
|
+
"stream": stream,
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
# Add function calling parameters if provided
|
|
132
|
+
if tools:
|
|
133
|
+
payload["tools"] = tools
|
|
134
|
+
if tool_choice:
|
|
135
|
+
payload["tool_choice"] = tool_choice
|
|
136
|
+
|
|
137
|
+
def for_stream():
|
|
138
|
+
try: # Add try block for CurlError
|
|
139
|
+
# Use curl_cffi session post with impersonate
|
|
140
|
+
response = self.session.post(
|
|
141
|
+
self.api_endpoint,
|
|
142
|
+
# headers are set on the session
|
|
143
|
+
json=payload,
|
|
144
|
+
stream=True,
|
|
145
|
+
timeout=self.timeout,
|
|
146
|
+
impersonate="chrome120" # Add impersonate
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
if not response.ok:
|
|
150
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
151
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
streaming_text = ""
|
|
155
|
+
# Use sanitize_stream
|
|
156
|
+
processed_stream = sanitize_stream(
|
|
157
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
158
|
+
intro_value="data:",
|
|
159
|
+
to_json=True, # Stream sends JSON
|
|
160
|
+
skip_markers=["[DONE]"],
|
|
161
|
+
# Extractor handles both content and tool_calls
|
|
162
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
163
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
for delta in processed_stream:
|
|
167
|
+
# delta is the extracted 'delta' object or None
|
|
168
|
+
if delta and isinstance(delta, dict):
|
|
169
|
+
if 'content' in delta and delta['content'] is not None:
|
|
170
|
+
content = delta['content']
|
|
171
|
+
streaming_text += content
|
|
172
|
+
yield content if raw else dict(text=content)
|
|
173
|
+
elif 'tool_calls' in delta:
|
|
174
|
+
tool_calls = delta['tool_calls']
|
|
175
|
+
yield tool_calls if raw else dict(tool_calls=tool_calls)
|
|
176
|
+
|
|
177
|
+
# Update history and last response after stream finishes
|
|
178
|
+
self.last_response.update(dict(text=streaming_text)) # Store aggregated text
|
|
179
|
+
if streaming_text: # Only update history if text was received
|
|
180
|
+
self.conversation.update_chat_history(
|
|
181
|
+
prompt, streaming_text # Use the fully aggregated text
|
|
182
|
+
)
|
|
183
|
+
except CurlError as e: # Catch CurlError
|
|
184
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
185
|
+
except Exception as e: # Catch other potential exceptions
|
|
186
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
# Aggregate the stream using the updated for_stream logic
|
|
191
|
+
final_content = ""
|
|
192
|
+
tool_calls_aggregated = None # To store potential tool calls
|
|
193
|
+
try: # Add try block for potential errors during aggregation
|
|
194
|
+
for chunk_data in for_stream():
|
|
195
|
+
if isinstance(chunk_data, dict):
|
|
196
|
+
if "text" in chunk_data:
|
|
197
|
+
final_content += chunk_data["text"]
|
|
198
|
+
elif "tool_calls" in chunk_data:
|
|
199
|
+
# Aggregate tool calls (simple aggregation, might need refinement)
|
|
200
|
+
if tool_calls_aggregated is None:
|
|
201
|
+
tool_calls_aggregated = []
|
|
202
|
+
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
203
|
+
elif isinstance(chunk_data, str): # Handle raw stream case
|
|
204
|
+
final_content += chunk_data
|
|
205
|
+
# Handle raw tool calls list if raw=True
|
|
206
|
+
elif isinstance(chunk_data, list) and raw:
|
|
207
|
+
if tool_calls_aggregated is None:
|
|
208
|
+
tool_calls_aggregated = []
|
|
209
|
+
tool_calls_aggregated.extend(chunk_data)
|
|
210
|
+
except Exception as e:
|
|
211
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
212
|
+
if not final_content and not tool_calls_aggregated:
|
|
213
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
# last_response and history are updated within for_stream (for text)
|
|
217
|
+
# Return a dict containing text and/or tool_calls
|
|
218
|
+
result = {}
|
|
219
|
+
if final_content:
|
|
220
|
+
result["text"] = final_content
|
|
221
|
+
if tool_calls_aggregated:
|
|
222
|
+
result["tool_calls"] = tool_calls_aggregated
|
|
223
|
+
self.last_response = result # Update last_response with aggregated result
|
|
224
|
+
return self.last_response
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
return for_stream() if stream else for_non_stream()
|
|
228
|
+
|
|
229
|
+
def chat(
|
|
230
|
+
self,
|
|
231
|
+
prompt: str,
|
|
232
|
+
stream: bool = False,
|
|
233
|
+
optimizer: str = None,
|
|
234
|
+
conversationally: bool = False,
|
|
235
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
236
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
|
237
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
238
|
+
"""Generate response as a string"""
|
|
239
|
+
def for_stream():
|
|
240
|
+
for response in self.ask(
|
|
241
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally,
|
|
242
|
+
tools=tools, tool_choice=tool_choice
|
|
243
|
+
):
|
|
244
|
+
yield self.get_message(response)
|
|
245
|
+
|
|
246
|
+
def for_non_stream():
|
|
247
|
+
return self.get_message(
|
|
248
|
+
self.ask(
|
|
249
|
+
prompt,
|
|
250
|
+
False,
|
|
251
|
+
optimizer=optimizer,
|
|
252
|
+
conversationally=conversationally,
|
|
253
|
+
tools=tools,
|
|
254
|
+
tool_choice=tool_choice,
|
|
255
|
+
)
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
return for_stream() if stream else for_non_stream()
|
|
259
|
+
|
|
260
|
+
def get_message(self, response: dict) -> str:
|
|
261
|
+
"""Retrieves message only from response"""
|
|
262
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
263
|
+
if "text" in response:
|
|
264
|
+
return response["text"]
|
|
265
|
+
elif "tool_calls" in response:
|
|
266
|
+
# For tool calls, return a string representation
|
|
267
|
+
return json.dumps(response["tool_calls"])
|
|
268
|
+
return "" # Return empty string if neither text nor tool_calls found
|
|
269
|
+
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
# Ensure curl_cffi is installed
|
|
272
|
+
print("-" * 80)
|
|
273
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
274
|
+
print("-" * 80)
|
|
275
|
+
|
|
276
|
+
# Test all available models
|
|
277
|
+
working = 0
|
|
278
|
+
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
282
|
+
try:
|
|
283
|
+
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
284
|
+
# Test stream first
|
|
285
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
286
|
+
response_text = ""
|
|
287
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
288
|
+
for chunk in response_stream:
|
|
289
|
+
response_text += chunk
|
|
290
|
+
|
|
291
|
+
if response_text and len(response_text.strip()) > 0:
|
|
292
|
+
status = "✓"
|
|
293
|
+
# Clean and truncate response
|
|
294
|
+
clean_text = response_text.strip()
|
|
295
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
296
|
+
else:
|
|
297
|
+
status = "✗ (Stream)"
|
|
298
|
+
display_text = "Empty or invalid stream response"
|
|
299
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
300
|
+
|
|
301
|
+
# Optional: Add non-stream test if needed
|
|
302
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
303
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
304
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
305
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
306
|
+
|
|
307
|
+
except Exception as e:
|
|
308
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|