webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +16 -7
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/__init__.py +7 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +23 -20
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/cli.py +256 -0
- webscout/conversation.py +0 -2
- webscout/exceptions.py +3 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/PI.py
CHANGED
|
@@ -5,7 +5,7 @@ import json
|
|
|
5
5
|
import re
|
|
6
6
|
import threading
|
|
7
7
|
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIutel import AwesomePrompts
|
|
10
10
|
from webscout.AIbase import Provider
|
|
11
11
|
from typing import Dict, Union, Any, Optional
|
|
@@ -122,6 +122,13 @@ class PiAI(Provider):
|
|
|
122
122
|
if self.is_conversation:
|
|
123
123
|
self.start_conversation()
|
|
124
124
|
|
|
125
|
+
@staticmethod
|
|
126
|
+
def _pi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
127
|
+
"""Extracts text content from PiAI stream JSON objects."""
|
|
128
|
+
if isinstance(chunk, dict) and 'text' in chunk and chunk['text'] is not None:
|
|
129
|
+
return chunk.get("text")
|
|
130
|
+
return None
|
|
131
|
+
|
|
125
132
|
def start_conversation(self) -> str:
|
|
126
133
|
"""
|
|
127
134
|
Initializes a new conversation and returns the conversation ID.
|
|
@@ -245,17 +252,22 @@ class PiAI(Provider):
|
|
|
245
252
|
if line_bytes:
|
|
246
253
|
line = line_bytes.decode('utf-8')
|
|
247
254
|
full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
|
|
255
|
+
|
|
248
256
|
if line.startswith("data: "):
|
|
257
|
+
json_line_str = line[6:] # Get the JSON part as string
|
|
249
258
|
try:
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
259
|
+
# Process this single JSON line string with sanitize_stream
|
|
260
|
+
processed_gen = sanitize_stream(
|
|
261
|
+
data=json_line_str,
|
|
262
|
+
to_json=True,
|
|
263
|
+
content_extractor=self._pi_extractor
|
|
264
|
+
)
|
|
265
|
+
chunk_text = next(processed_gen, None) # Get the single extracted text item
|
|
266
|
+
if chunk_text and isinstance(chunk_text, str):
|
|
253
267
|
streaming_text += chunk_text
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
continue
|
|
258
|
-
|
|
268
|
+
yield {"text": streaming_text} # Always yield dict with aggregated text
|
|
269
|
+
except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
|
|
270
|
+
continue # Skip if sanitize_stream fails or yields nothing
|
|
259
271
|
# Extract SIDs after processing the stream
|
|
260
272
|
sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
|
|
261
273
|
second_sid = sids[1] if len(sids) >= 2 else None
|
|
@@ -284,13 +296,10 @@ class PiAI(Provider):
|
|
|
284
296
|
else:
|
|
285
297
|
# For non-stream, collect all responses and return the final one
|
|
286
298
|
final_text = ""
|
|
287
|
-
#
|
|
299
|
+
# process_stream always yields dicts now
|
|
288
300
|
for res in process_stream():
|
|
289
301
|
if isinstance(res, dict) and "text" in res:
|
|
290
302
|
final_text = res["text"] # Keep updating with the latest aggregated text
|
|
291
|
-
# Handle raw JSON object case if raw=True was passed
|
|
292
|
-
elif raw and isinstance(res, dict) and 'text' in res and res['text'] is not None:
|
|
293
|
-
final_text += res['text'] # Append chunks if raw
|
|
294
303
|
|
|
295
304
|
# last_response and history are updated within process_stream
|
|
296
305
|
# Return the final aggregated response dict or raw text
|
|
@@ -1,13 +1,10 @@
|
|
|
1
|
-
from
|
|
2
|
-
import requests
|
|
3
|
-
import json
|
|
1
|
+
from curl_cffi.requests import Session
|
|
4
2
|
import uuid
|
|
5
3
|
import re
|
|
6
|
-
from
|
|
7
|
-
from typing import Any, Dict, Optional, Union, Generator
|
|
4
|
+
from typing import Any, Dict, Optional, Union
|
|
8
5
|
from webscout.AIutel import Optimizers
|
|
9
6
|
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
11
8
|
from webscout.AIbase import Provider
|
|
12
9
|
from webscout import exceptions
|
|
13
10
|
from webscout.litagent import LitAgent
|
|
@@ -98,9 +95,9 @@ class StandardInputAI(Provider):
|
|
|
98
95
|
"ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
|
|
99
96
|
}
|
|
100
97
|
|
|
101
|
-
self.session =
|
|
98
|
+
self.session = Session() # Use curl_cffi Session
|
|
102
99
|
self.session.headers.update(self.headers)
|
|
103
|
-
self.session.proxies
|
|
100
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
104
101
|
|
|
105
102
|
self.is_conversation = is_conversation
|
|
106
103
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -153,6 +150,17 @@ class StandardInputAI(Provider):
|
|
|
153
150
|
|
|
154
151
|
return self.fingerprint
|
|
155
152
|
|
|
153
|
+
@staticmethod
|
|
154
|
+
def _standardinput_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
155
|
+
"""Extracts content from the StandardInput stream format '0:"..."'."""
|
|
156
|
+
if isinstance(chunk, str):
|
|
157
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
158
|
+
if match:
|
|
159
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
160
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
161
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
162
|
+
return None
|
|
163
|
+
|
|
156
164
|
def ask(
|
|
157
165
|
self,
|
|
158
166
|
prompt: str,
|
|
@@ -183,45 +191,48 @@ class StandardInputAI(Provider):
|
|
|
183
191
|
}
|
|
184
192
|
|
|
185
193
|
try:
|
|
186
|
-
|
|
194
|
+
# Use curl_cffi post with impersonate
|
|
195
|
+
response = self.session.post(
|
|
196
|
+
self.url,
|
|
197
|
+
cookies=self.cookies,
|
|
198
|
+
json=payload,
|
|
199
|
+
stream=True,
|
|
200
|
+
timeout=self.timeout,
|
|
201
|
+
impersonate="chrome120" # Add impersonate
|
|
202
|
+
)
|
|
203
|
+
|
|
187
204
|
if response.status_code != 200:
|
|
188
|
-
# Try to get response content for better error messages
|
|
189
205
|
try:
|
|
190
206
|
error_content = response.text
|
|
191
207
|
except:
|
|
192
208
|
error_content = "<could not read response content>"
|
|
193
209
|
|
|
194
210
|
if response.status_code in [403, 429]:
|
|
195
|
-
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
196
211
|
self.refresh_identity()
|
|
197
|
-
response = self.session.post(
|
|
212
|
+
response = self.session.post(
|
|
213
|
+
self.url, cookies=self.cookies, json=payload, stream=True,
|
|
214
|
+
timeout=self.timeout, impersonate="chrome120"
|
|
215
|
+
)
|
|
198
216
|
if not response.ok:
|
|
199
217
|
raise exceptions.FailedToGenerateResponseError(
|
|
200
218
|
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
201
219
|
)
|
|
202
|
-
print("Identity refreshed successfully.")
|
|
203
220
|
else:
|
|
204
221
|
raise exceptions.FailedToGenerateResponseError(
|
|
205
222
|
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
206
223
|
)
|
|
207
224
|
|
|
208
225
|
full_response = ""
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
match = re.search(r'0:"(.*?)"', line_str)
|
|
220
|
-
if match:
|
|
221
|
-
content = match.group(1)
|
|
222
|
-
full_response += content
|
|
223
|
-
continue
|
|
224
|
-
except: pass
|
|
226
|
+
# Use sanitize_stream
|
|
227
|
+
processed_stream = sanitize_stream(
|
|
228
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
229
|
+
intro_value=None, # No simple prefix
|
|
230
|
+
to_json=False, # Content is not JSON
|
|
231
|
+
content_extractor=self._standardinput_extractor # Use the specific extractor
|
|
232
|
+
)
|
|
233
|
+
for content_chunk in processed_stream:
|
|
234
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
235
|
+
full_response += content_chunk
|
|
225
236
|
|
|
226
237
|
self.last_response = {"text": full_response}
|
|
227
238
|
self.conversation.update_chat_history(prompt, full_response)
|
|
@@ -243,7 +254,8 @@ class StandardInputAI(Provider):
|
|
|
243
254
|
|
|
244
255
|
def get_message(self, response: dict) -> str:
|
|
245
256
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
246
|
-
|
|
257
|
+
# Extractor handles formatting
|
|
258
|
+
return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
247
259
|
|
|
248
260
|
if __name__ == "__main__":
|
|
249
261
|
print("-" * 100)
|
|
@@ -4,7 +4,7 @@ from typing import Union, Any, Dict
|
|
|
4
4
|
from webscout.AIbase import Provider # Import Provider base class
|
|
5
5
|
from webscout import exceptions # Import custom exceptions
|
|
6
6
|
from webscout.conversation import Conversation
|
|
7
|
-
from webscout.
|
|
7
|
+
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.prompt_manager import AwesomePrompts
|
|
9
9
|
from webscout.litagent import LitAgent
|
|
10
10
|
|
|
@@ -131,13 +131,22 @@ class TeachAnything(Provider):
|
|
|
131
131
|
)
|
|
132
132
|
response.raise_for_status() # Check for HTTP errors
|
|
133
133
|
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
#
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
134
|
+
resp_text_raw = response.text # Get raw response text
|
|
135
|
+
|
|
136
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
137
|
+
# This keeps the pattern consistent, though it won't do much here
|
|
138
|
+
processed_stream = sanitize_stream(
|
|
139
|
+
data=resp_text_raw,
|
|
140
|
+
intro_value=None, # No prefix
|
|
141
|
+
to_json=False # It's plain text
|
|
140
142
|
)
|
|
143
|
+
|
|
144
|
+
# Extract the single result from the generator
|
|
145
|
+
resp_text = "".join(list(processed_stream)) # Aggregate potential chunks (should be one)
|
|
146
|
+
|
|
147
|
+
self.last_response = {"text": resp_text}
|
|
148
|
+
self.conversation.update_chat_history(prompt, resp_text)
|
|
149
|
+
|
|
141
150
|
# Return dict or raw string based on raw flag
|
|
142
151
|
return resp_text if raw else self.last_response
|
|
143
152
|
|
|
@@ -2,7 +2,9 @@ from curl_cffi.requests import Session
|
|
|
2
2
|
from curl_cffi import CurlError
|
|
3
3
|
import json
|
|
4
4
|
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
-
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
6
8
|
from webscout.AIbase import Provider
|
|
7
9
|
from webscout import exceptions
|
|
8
10
|
from webscout.litagent import LitAgent as Lit
|
|
@@ -13,32 +15,30 @@ class TextPollinationsAI(Provider):
|
|
|
13
15
|
"""
|
|
14
16
|
|
|
15
17
|
AVAILABLE_MODELS = [
|
|
16
|
-
"openai",
|
|
17
|
-
"openai-large",
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"deepseek-reasoning",
|
|
29
|
-
"
|
|
30
|
-
"
|
|
31
|
-
"
|
|
32
|
-
"
|
|
33
|
-
"
|
|
34
|
-
"
|
|
35
|
-
"
|
|
36
|
-
"sur", # Sur AI Assistant (Mistral) (Scaleway) - vision capable
|
|
37
|
-
"openai-audio", # OpenAI GPT-4o-audio-preview (Azure) - vision and audio capable
|
|
18
|
+
"openai",
|
|
19
|
+
"openai-large",
|
|
20
|
+
"qwen-coder",
|
|
21
|
+
"llama",
|
|
22
|
+
"llamascout",
|
|
23
|
+
"mistral",
|
|
24
|
+
"unity",
|
|
25
|
+
"midijourney",
|
|
26
|
+
"rtist",
|
|
27
|
+
"searchgpt",
|
|
28
|
+
"evil",
|
|
29
|
+
"deepseek-reasoning",
|
|
30
|
+
"deepseek-reasoning-large",
|
|
31
|
+
"phi",
|
|
32
|
+
"llama-vision",
|
|
33
|
+
"hormoz",
|
|
34
|
+
"hypnosis-tracy",
|
|
35
|
+
"deepseek",
|
|
36
|
+
"sur",
|
|
37
|
+
"openai-audio",
|
|
38
38
|
]
|
|
39
|
+
_models_url = "https://text.pollinations.ai/models"
|
|
39
40
|
|
|
40
|
-
def __init__(
|
|
41
|
-
self,
|
|
41
|
+
def __init__(self,
|
|
42
42
|
is_conversation: bool = True,
|
|
43
43
|
max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
|
|
44
44
|
timeout: int = 30,
|
|
@@ -52,10 +52,6 @@ class TextPollinationsAI(Provider):
|
|
|
52
52
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
53
53
|
):
|
|
54
54
|
"""Initializes the TextPollinationsAI API client."""
|
|
55
|
-
if model not in self.AVAILABLE_MODELS:
|
|
56
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
57
|
-
|
|
58
|
-
# Initialize curl_cffi Session
|
|
59
55
|
self.session = Session()
|
|
60
56
|
self.is_conversation = is_conversation
|
|
61
57
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -66,6 +62,10 @@ class TextPollinationsAI(Provider):
|
|
|
66
62
|
self.model = model
|
|
67
63
|
self.system_prompt = system_prompt
|
|
68
64
|
|
|
65
|
+
# Validate against the hardcoded list
|
|
66
|
+
if model not in self.AVAILABLE_MODELS:
|
|
67
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
68
|
+
|
|
69
69
|
self.headers = {
|
|
70
70
|
'Accept': '*/*',
|
|
71
71
|
'Accept-Language': 'en-US,en;q=0.9',
|
|
@@ -96,6 +96,7 @@ class TextPollinationsAI(Provider):
|
|
|
96
96
|
)
|
|
97
97
|
self.conversation.history_offset = history_offset
|
|
98
98
|
|
|
99
|
+
|
|
99
100
|
def ask(
|
|
100
101
|
self,
|
|
101
102
|
prompt: str,
|
|
@@ -148,40 +149,34 @@ class TextPollinationsAI(Provider):
|
|
|
148
149
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
149
150
|
)
|
|
150
151
|
|
|
151
|
-
|
|
152
|
-
#
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
yield tool_calls if raw else dict(tool_calls=tool_calls)
|
|
174
|
-
except json.JSONDecodeError:
|
|
175
|
-
continue
|
|
176
|
-
except UnicodeDecodeError:
|
|
177
|
-
continue
|
|
152
|
+
streaming_text = ""
|
|
153
|
+
# Use sanitize_stream
|
|
154
|
+
processed_stream = sanitize_stream(
|
|
155
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
156
|
+
intro_value="data:",
|
|
157
|
+
to_json=True, # Stream sends JSON
|
|
158
|
+
skip_markers=["[DONE]"],
|
|
159
|
+
# Extractor handles both content and tool_calls
|
|
160
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
161
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
for delta in processed_stream:
|
|
165
|
+
# delta is the extracted 'delta' object or None
|
|
166
|
+
if delta and isinstance(delta, dict):
|
|
167
|
+
if 'content' in delta and delta['content'] is not None:
|
|
168
|
+
content = delta['content']
|
|
169
|
+
streaming_text += content
|
|
170
|
+
yield content if raw else dict(text=content)
|
|
171
|
+
elif 'tool_calls' in delta:
|
|
172
|
+
tool_calls = delta['tool_calls']
|
|
173
|
+
yield tool_calls if raw else dict(tool_calls=tool_calls)
|
|
178
174
|
|
|
179
175
|
# Update history and last response after stream finishes
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
if full_response: # Only update history if text was received
|
|
176
|
+
self.last_response.update(dict(text=streaming_text)) # Store aggregated text
|
|
177
|
+
if streaming_text: # Only update history if text was received
|
|
183
178
|
self.conversation.update_chat_history(
|
|
184
|
-
prompt,
|
|
179
|
+
prompt, streaming_text # Use the fully aggregated text
|
|
185
180
|
)
|
|
186
181
|
except CurlError as e: # Catch CurlError
|
|
187
182
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
@@ -193,22 +188,27 @@ class TextPollinationsAI(Provider):
|
|
|
193
188
|
# Aggregate the stream using the updated for_stream logic
|
|
194
189
|
final_content = ""
|
|
195
190
|
tool_calls_aggregated = None # To store potential tool calls
|
|
196
|
-
for
|
|
197
|
-
|
|
198
|
-
if
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
tool_calls_aggregated
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
tool_calls_aggregated
|
|
211
|
-
|
|
191
|
+
try: # Add try block for potential errors during aggregation
|
|
192
|
+
for chunk_data in for_stream():
|
|
193
|
+
if isinstance(chunk_data, dict):
|
|
194
|
+
if "text" in chunk_data:
|
|
195
|
+
final_content += chunk_data["text"]
|
|
196
|
+
elif "tool_calls" in chunk_data:
|
|
197
|
+
# Aggregate tool calls (simple aggregation, might need refinement)
|
|
198
|
+
if tool_calls_aggregated is None:
|
|
199
|
+
tool_calls_aggregated = []
|
|
200
|
+
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
201
|
+
elif isinstance(chunk_data, str): # Handle raw stream case
|
|
202
|
+
final_content += chunk_data
|
|
203
|
+
# Handle raw tool calls list if raw=True
|
|
204
|
+
elif isinstance(chunk_data, list) and raw:
|
|
205
|
+
if tool_calls_aggregated is None:
|
|
206
|
+
tool_calls_aggregated = []
|
|
207
|
+
tool_calls_aggregated.extend(chunk_data)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
210
|
+
if not final_content and not tool_calls_aggregated:
|
|
211
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
212
212
|
|
|
213
213
|
|
|
214
214
|
# last_response and history are updated within for_stream (for text)
|
|
@@ -263,6 +263,7 @@ class TextPollinationsAI(Provider):
|
|
|
263
263
|
elif "tool_calls" in response:
|
|
264
264
|
# For tool calls, return a string representation
|
|
265
265
|
return json.dumps(response["tool_calls"])
|
|
266
|
+
return "" # Return empty string if neither text nor tool_calls found
|
|
266
267
|
|
|
267
268
|
if __name__ == "__main__":
|
|
268
269
|
# Ensure curl_cffi is installed
|
|
@@ -274,6 +275,7 @@ if __name__ == "__main__":
|
|
|
274
275
|
working = 0
|
|
275
276
|
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
276
277
|
|
|
278
|
+
|
|
277
279
|
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
278
280
|
try:
|
|
279
281
|
test_ai = TextPollinationsAI(model=model, timeout=60)
|