webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +16 -7
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/__init__.py +7 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +23 -20
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/cli.py +256 -0
- webscout/conversation.py +0 -2
- webscout/exceptions.py +3 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/toolbaz.py
CHANGED
|
@@ -13,7 +13,7 @@ from typing import Any, Dict, Optional, Generator, Union, List
|
|
|
13
13
|
from webscout import exceptions
|
|
14
14
|
from webscout.AIutel import Optimizers
|
|
15
15
|
from webscout.AIutel import Conversation
|
|
16
|
-
from webscout.AIutel import AwesomePrompts
|
|
16
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
17
17
|
from webscout.AIbase import Provider
|
|
18
18
|
|
|
19
19
|
class Toolbaz(Provider):
|
|
@@ -26,6 +26,7 @@ class Toolbaz(Provider):
|
|
|
26
26
|
"gemini-2.0-flash-thinking",
|
|
27
27
|
"gemini-2.0-flash",
|
|
28
28
|
"gemini-1.5-flash",
|
|
29
|
+
"o3-mini",
|
|
29
30
|
"gpt-4o-latest",
|
|
30
31
|
"gpt-4o",
|
|
31
32
|
"deepseek-r1",
|
|
@@ -111,6 +112,13 @@ class Toolbaz(Provider):
|
|
|
111
112
|
)
|
|
112
113
|
self.conversation.history_offset = history_offset
|
|
113
114
|
|
|
115
|
+
@staticmethod
|
|
116
|
+
def _toolbaz_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
117
|
+
"""Removes [model:...] tags from a string chunk."""
|
|
118
|
+
if isinstance(chunk, str):
|
|
119
|
+
return re.sub(r"\[model:.*?\]", "", chunk)
|
|
120
|
+
return None
|
|
121
|
+
|
|
114
122
|
def random_string(self, length):
|
|
115
123
|
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
116
124
|
|
|
@@ -207,53 +215,23 @@ class Toolbaz(Provider):
|
|
|
207
215
|
)
|
|
208
216
|
resp.raise_for_status()
|
|
209
217
|
|
|
210
|
-
buffer = ""
|
|
211
|
-
tag_start = "[model:"
|
|
212
218
|
streaming_text = ""
|
|
213
219
|
|
|
214
|
-
#
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
# Add remaining text after the last complete tag
|
|
231
|
-
processed_buffer += buffer[last_processed_index:]
|
|
232
|
-
|
|
233
|
-
# Now, check for incomplete tag at the end
|
|
234
|
-
last_tag_start_index = processed_buffer.rfind(tag_start)
|
|
235
|
-
|
|
236
|
-
if last_tag_start_index != -1:
|
|
237
|
-
# Text before the potential incomplete tag
|
|
238
|
-
text_to_yield = processed_buffer[:last_tag_start_index]
|
|
239
|
-
# Keep the potential incomplete tag start for the next iteration
|
|
240
|
-
buffer = processed_buffer[last_tag_start_index:]
|
|
241
|
-
else:
|
|
242
|
-
# No potential incomplete tag found, yield everything processed
|
|
243
|
-
text_to_yield = processed_buffer
|
|
244
|
-
buffer = "" # Clear buffer as everything is processed
|
|
245
|
-
|
|
246
|
-
if text_to_yield:
|
|
247
|
-
streaming_text += text_to_yield
|
|
248
|
-
# Yield dict or raw string
|
|
249
|
-
yield {"text": text_to_yield} if not raw else text_to_yield
|
|
250
|
-
|
|
251
|
-
# Process any remaining text in the buffer after the loop finishes
|
|
252
|
-
# Remove any potential tags (complete or incomplete)
|
|
253
|
-
final_text = re.sub(r"\[model:.*?\]", "", buffer)
|
|
254
|
-
if final_text:
|
|
255
|
-
streaming_text += final_text
|
|
256
|
-
yield {"text": final_text} if not raw else final_text
|
|
220
|
+
# Use sanitize_stream with the custom extractor
|
|
221
|
+
# It will decode bytes and yield processed string chunks
|
|
222
|
+
processed_stream = sanitize_stream(
|
|
223
|
+
data=resp.iter_content(chunk_size=None), # Pass byte iterator
|
|
224
|
+
intro_value=None, # No simple prefix
|
|
225
|
+
to_json=False, # Content is text
|
|
226
|
+
content_extractor=self._toolbaz_extractor, # Use the tag remover
|
|
227
|
+
yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
for content_chunk in processed_stream:
|
|
231
|
+
# content_chunk is the string with tags removed
|
|
232
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
233
|
+
streaming_text += content_chunk
|
|
234
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
257
235
|
|
|
258
236
|
self.last_response = {"text": streaming_text}
|
|
259
237
|
self.conversation.update_chat_history(prompt, streaming_text)
|
webscout/Provider/turboseek.py
CHANGED
|
@@ -4,10 +4,10 @@ import json
|
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from typing import Union, Any, AsyncGenerator, Dict
|
|
10
|
+
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
11
11
|
from webscout.litagent import LitAgent
|
|
12
12
|
|
|
13
13
|
class TurboSeek(Provider):
|
|
@@ -88,6 +88,13 @@ class TurboSeek(Provider):
|
|
|
88
88
|
)
|
|
89
89
|
self.conversation.history_offset = history_offset
|
|
90
90
|
|
|
91
|
+
@staticmethod
|
|
92
|
+
def _turboseek_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
93
|
+
"""Extracts content from TurboSeek stream JSON objects."""
|
|
94
|
+
if isinstance(chunk, dict) and "text" in chunk:
|
|
95
|
+
return chunk.get("text") # json.loads already handles unicode escapes
|
|
96
|
+
return None
|
|
97
|
+
|
|
91
98
|
def ask(
|
|
92
99
|
self,
|
|
93
100
|
prompt: str,
|
|
@@ -142,24 +149,24 @@ class TurboSeek(Provider):
|
|
|
142
149
|
raise exceptions.FailedToGenerateResponseError(
|
|
143
150
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
151
|
)
|
|
152
|
+
|
|
145
153
|
streaming_text = ""
|
|
146
|
-
#
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
pass # Ignore lines that are not valid JSON or cannot be decoded
|
|
154
|
+
# Use sanitize_stream with the custom extractor
|
|
155
|
+
processed_stream = sanitize_stream(
|
|
156
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
157
|
+
intro_value="data:",
|
|
158
|
+
to_json=True, # Stream sends JSON
|
|
159
|
+
content_extractor=self._turboseek_extractor, # Use the specific extractor
|
|
160
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
for content_chunk in processed_stream:
|
|
164
|
+
# content_chunk is the string extracted by _turboseek_extractor
|
|
165
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
166
|
+
streaming_text += content_chunk
|
|
167
|
+
self.last_response.update(dict(text=streaming_text)) # Update last_response incrementally
|
|
168
|
+
yield dict(text=content_chunk) if not raw else content_chunk # Yield dict or raw string
|
|
169
|
+
|
|
163
170
|
# Update conversation history after stream finishes
|
|
164
171
|
if streaming_text: # Only update if content was received
|
|
165
172
|
self.conversation.update_chat_history(
|
|
@@ -174,21 +181,15 @@ class TurboSeek(Provider):
|
|
|
174
181
|
def for_non_stream():
|
|
175
182
|
# Aggregate the stream using the updated for_stream logic
|
|
176
183
|
full_text = ""
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
data = json.loads(line)
|
|
187
|
-
if "text" in data:
|
|
188
|
-
content = data["text"].encode().decode('unicode_escape')
|
|
189
|
-
full_text += content
|
|
190
|
-
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
|
|
191
|
-
pass
|
|
184
|
+
try:
|
|
185
|
+
# Ensure raw=False so for_stream yields dicts
|
|
186
|
+
for chunk_data in for_stream():
|
|
187
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
188
|
+
full_text += chunk_data["text"]
|
|
189
|
+
elif isinstance(chunk_data, str): # Handle case where raw=True was passed
|
|
190
|
+
full_text += chunk_data
|
|
191
|
+
except Exception as e:
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
|
|
192
193
|
# last_response and history are updated within for_stream
|
|
193
194
|
# Ensure last_response reflects the complete aggregated text
|
|
194
195
|
self.last_response = {"text": full_text}
|
|
@@ -241,7 +242,7 @@ class TurboSeek(Provider):
|
|
|
241
242
|
str: Message extracted
|
|
242
243
|
"""
|
|
243
244
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
244
|
-
#
|
|
245
|
+
# Unicode escapes are handled by json.loads within sanitize_stream
|
|
245
246
|
return response.get("text", "")
|
|
246
247
|
|
|
247
248
|
if __name__ == '__main__':
|
|
@@ -250,13 +251,9 @@ if __name__ == '__main__':
|
|
|
250
251
|
try: # Add try-except block for testing
|
|
251
252
|
ai = TurboSeek(timeout=60)
|
|
252
253
|
print("[bold blue]Testing Stream:[/bold blue]")
|
|
253
|
-
response_stream = ai.chat("
|
|
254
|
-
full_stream_response = ""
|
|
254
|
+
response_stream = ai.chat("yooooooooooo", stream=True)
|
|
255
255
|
for chunk in response_stream:
|
|
256
256
|
print(chunk, end="", flush=True)
|
|
257
|
-
full_stream_response += chunk
|
|
258
|
-
print("\n[bold green]Stream Test Complete.[/bold green]\n")
|
|
259
|
-
|
|
260
257
|
# Optional: Test non-stream
|
|
261
258
|
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
262
259
|
# response_non_stream = ai.chat("What is the capital of France?", stream=False)
|
|
@@ -267,4 +264,3 @@ if __name__ == '__main__':
|
|
|
267
264
|
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
268
265
|
except Exception as e:
|
|
269
266
|
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
270
|
-
|
webscout/Provider/typefully.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
from typing import Union, Any, Dict
|
|
1
|
+
from typing import Optional, Union, Any, Dict
|
|
2
2
|
import re
|
|
3
3
|
from uuid import uuid4
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
@@ -114,6 +114,17 @@ class TypefullyAI(Provider):
|
|
|
114
114
|
)
|
|
115
115
|
self.conversation.history_offset = history_offset
|
|
116
116
|
|
|
117
|
+
@staticmethod
|
|
118
|
+
def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
119
|
+
"""Extracts content from the Typefully stream format '0:"..."'."""
|
|
120
|
+
if isinstance(chunk, str):
|
|
121
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
122
|
+
if match:
|
|
123
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
124
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
125
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
126
|
+
return None
|
|
127
|
+
|
|
117
128
|
def ask(
|
|
118
129
|
self,
|
|
119
130
|
prompt: str,
|
|
@@ -174,23 +185,22 @@ class TypefullyAI(Provider):
|
|
|
174
185
|
raise exceptions.FailedToGenerateResponseError(
|
|
175
186
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
176
187
|
)
|
|
177
|
-
|
|
178
|
-
#
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
break
|
|
188
|
+
streaming_text = ""
|
|
189
|
+
# Use sanitize_stream with the custom extractor
|
|
190
|
+
processed_stream = sanitize_stream(
|
|
191
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
192
|
+
intro_value=None, # No simple prefix
|
|
193
|
+
to_json=False, # Content is not JSON
|
|
194
|
+
content_extractor=self._typefully_extractor, # Use the specific extractor
|
|
195
|
+
end_marker="e:", # Stop processing if "e:" line is encountered (adjust if needed)
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
for content_chunk in processed_stream:
|
|
199
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
200
|
+
streaming_text += content_chunk
|
|
201
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
192
202
|
# Update history and last response after stream finishes
|
|
193
|
-
self.last_response.update(dict(text=
|
|
203
|
+
self.last_response.update(dict(text=streaming_text))
|
|
194
204
|
self.conversation.update_chat_history(
|
|
195
205
|
prompt, self.get_message(self.last_response)
|
|
196
206
|
)
|
|
@@ -271,13 +281,12 @@ class TypefullyAI(Provider):
|
|
|
271
281
|
"""
|
|
272
282
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
273
283
|
# Handle potential unicode escapes in the final text
|
|
284
|
+
# Formatting is now handled by the extractor
|
|
274
285
|
text = response.get("text", "")
|
|
275
286
|
try:
|
|
276
|
-
# Attempt to decode escapes, return original if fails
|
|
277
|
-
# Already decoded in ask method, just handle formatting
|
|
278
287
|
formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
279
288
|
return formatted_text
|
|
280
|
-
except Exception: # Catch potential errors during
|
|
289
|
+
except Exception: # Catch potential errors during newline replacement
|
|
281
290
|
return text # Return original text if formatting fails
|
|
282
291
|
|
|
283
292
|
|
|
@@ -319,4 +328,3 @@ if __name__ == "__main__":
|
|
|
319
328
|
|
|
320
329
|
except Exception as e:
|
|
321
330
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
322
|
-
|
webscout/Provider/typegpt.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Union, Any, Dict, Generator
|
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers
|
|
7
7
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout.litagent import LitAgent
|
|
@@ -137,37 +137,30 @@ class TypeGPT(Provider):
|
|
|
137
137
|
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
138
138
|
) from ce
|
|
139
139
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
message_load += new_content
|
|
162
|
-
# Yield only the new content
|
|
163
|
-
yield dict(text=new_content) if not raw else new_content
|
|
164
|
-
# Update last_response incrementally for potential non-stream use later
|
|
165
|
-
self.last_response = dict(text=message_load)
|
|
166
|
-
except json.JSONDecodeError:
|
|
167
|
-
continue
|
|
140
|
+
response.raise_for_status() # Check for HTTP errors first
|
|
141
|
+
|
|
142
|
+
streaming_text = ""
|
|
143
|
+
# Use sanitize_stream
|
|
144
|
+
processed_stream = sanitize_stream(
|
|
145
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
146
|
+
intro_value="data:",
|
|
147
|
+
to_json=True, # Stream sends JSON
|
|
148
|
+
skip_markers=["[DONE]"],
|
|
149
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
150
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
for content_chunk in processed_stream:
|
|
154
|
+
# content_chunk is the string extracted by the content_extractor
|
|
155
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
156
|
+
streaming_text += content_chunk
|
|
157
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
158
|
+
# Update last_response incrementally
|
|
159
|
+
self.last_response = dict(text=streaming_text)
|
|
160
|
+
|
|
168
161
|
# Update conversation history after stream finishes
|
|
169
|
-
if
|
|
170
|
-
self.conversation.update_chat_history(prompt,
|
|
162
|
+
if streaming_text: # Only update if something was received
|
|
163
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
171
164
|
|
|
172
165
|
|
|
173
166
|
def for_non_stream():
|
|
@@ -185,26 +178,30 @@ class TypeGPT(Provider):
|
|
|
185
178
|
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
186
179
|
) from ce
|
|
187
180
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
f"Request failed - {response.status_code}: {response.text}"
|
|
191
|
-
)
|
|
192
|
-
|
|
181
|
+
response.raise_for_status() # Check for HTTP errors
|
|
182
|
+
|
|
193
183
|
try:
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
#
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
184
|
+
response_text = response.text # Get raw text
|
|
185
|
+
|
|
186
|
+
# Use sanitize_stream for non-streaming JSON response
|
|
187
|
+
processed_stream = sanitize_stream(
|
|
188
|
+
data=response_text,
|
|
189
|
+
to_json=True, # Parse the whole text as JSON
|
|
190
|
+
intro_value=None,
|
|
191
|
+
# Extractor for non-stream structure
|
|
192
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
|
|
193
|
+
yield_raw_on_error=False
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Extract the single result
|
|
197
|
+
content = ""
|
|
198
|
+
for extracted_content in processed_stream:
|
|
199
|
+
content = extracted_content if isinstance(extracted_content, str) else ""
|
|
200
|
+
|
|
201
|
+
self.last_response = {"text": content} # Store in expected format
|
|
202
|
+
self.conversation.update_chat_history(prompt, content)
|
|
203
|
+
return self.last_response
|
|
204
|
+
except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
|
|
208
205
|
raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
|
|
209
206
|
|
|
210
207
|
|
|
@@ -290,4 +287,3 @@ if __name__ == "__main__":
|
|
|
290
287
|
|
|
291
288
|
except Exception as e:
|
|
292
289
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
293
|
-
|
webscout/Provider/uncovr.py
CHANGED
|
@@ -5,7 +5,7 @@ import uuid
|
|
|
5
5
|
import re
|
|
6
6
|
from typing import Any, Dict, Optional, Generator, Union
|
|
7
7
|
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIutel import AwesomePrompts
|
|
10
10
|
from webscout.AIbase import Provider
|
|
11
11
|
from webscout import exceptions
|
|
@@ -109,6 +109,17 @@ class UncovrAI(Provider):
|
|
|
109
109
|
)
|
|
110
110
|
self.conversation.history_offset = history_offset
|
|
111
111
|
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts content from the UncovrAI stream format '0:"..."'."""
|
|
115
|
+
if isinstance(chunk, str):
|
|
116
|
+
match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
|
|
117
|
+
if match:
|
|
118
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
119
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
120
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
121
|
+
return None
|
|
122
|
+
|
|
112
123
|
def refresh_identity(self, browser: str = None):
|
|
113
124
|
"""
|
|
114
125
|
Refreshes the browser identity fingerprint.
|
|
@@ -202,27 +213,21 @@ class UncovrAI(Provider):
|
|
|
202
213
|
raise exceptions.FailedToGenerateResponseError(
|
|
203
214
|
f"Request failed with status code {response.status_code} - {response.text}"
|
|
204
215
|
)
|
|
205
|
-
|
|
216
|
+
|
|
206
217
|
streaming_text = ""
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
|
|
221
|
-
if error_match:
|
|
222
|
-
error_msg = error_match.group(1)
|
|
223
|
-
raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
|
|
224
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
225
|
-
continue
|
|
218
|
+
# Use sanitize_stream with the custom extractor
|
|
219
|
+
processed_stream = sanitize_stream(
|
|
220
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
221
|
+
intro_value=None, # No simple prefix
|
|
222
|
+
to_json=False, # Content is not JSON
|
|
223
|
+
content_extractor=self._uncovr_extractor, # Use the specific extractor
|
|
224
|
+
yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
for content_chunk in processed_stream:
|
|
228
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
229
|
+
streaming_text += content_chunk
|
|
230
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
226
231
|
|
|
227
232
|
self.last_response = {"text": streaming_text}
|
|
228
233
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
@@ -262,25 +267,25 @@ class UncovrAI(Provider):
|
|
|
262
267
|
f"Request failed with status code {response.status_code} - {response.text}"
|
|
263
268
|
)
|
|
264
269
|
|
|
265
|
-
|
|
270
|
+
response_text = response.text # Get the full response text
|
|
271
|
+
|
|
272
|
+
# Use sanitize_stream to process the non-streaming text
|
|
273
|
+
# It won't parse as JSON, but will apply the extractor line by line
|
|
274
|
+
processed_stream = sanitize_stream(
|
|
275
|
+
data=response_text.splitlines(), # Split into lines first
|
|
276
|
+
intro_value=None,
|
|
277
|
+
to_json=False,
|
|
278
|
+
content_extractor=self._uncovr_extractor,
|
|
279
|
+
yield_raw_on_error=True
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Aggregate the results from the generator
|
|
266
283
|
full_response = ""
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
try:
|
|
271
|
-
# line is already decoded string
|
|
272
|
-
content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
|
|
273
|
-
if content_match:
|
|
274
|
-
content = content_match.group(1).encode().decode('unicode_escape') # Decode escapes
|
|
275
|
-
full_response += content
|
|
276
|
-
# Check for error messages
|
|
277
|
-
error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
|
|
278
|
-
if error_match:
|
|
279
|
-
error_msg = error_match.group(1)
|
|
280
|
-
raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
|
|
281
|
-
except (json.JSONDecodeError): # UnicodeDecodeError less likely here
|
|
282
|
-
continue
|
|
284
|
+
for content in processed_stream:
|
|
285
|
+
if content and isinstance(content, str):
|
|
286
|
+
full_response += content
|
|
283
287
|
|
|
288
|
+
# Check if aggregation resulted in empty response (might indicate error not caught by extractor)
|
|
284
289
|
self.last_response = {"text": full_response}
|
|
285
290
|
self.conversation.update_chat_history(prompt, full_response)
|
|
286
291
|
return {"text": full_response}
|
|
@@ -323,7 +328,9 @@ class UncovrAI(Provider):
|
|
|
323
328
|
|
|
324
329
|
def get_message(self, response: dict) -> str:
|
|
325
330
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
326
|
-
|
|
331
|
+
# Formatting handled by extractor
|
|
332
|
+
text = response.get("text", "")
|
|
333
|
+
return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
|
|
327
334
|
|
|
328
335
|
if __name__ == "__main__":
|
|
329
336
|
# Ensure curl_cffi is installed
|
|
@@ -359,4 +366,3 @@ if __name__ == "__main__":
|
|
|
359
366
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
360
367
|
except Exception as e:
|
|
361
368
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
362
|
-
|