webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +16 -7
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/__init__.py +7 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +23 -20
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/cli.py +256 -0
- webscout/conversation.py +0 -2
- webscout/exceptions.py +3 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/HeckAI.py
CHANGED
|
@@ -120,7 +120,7 @@ class HeckAI(Provider):
|
|
|
120
120
|
|
|
121
121
|
def for_stream():
|
|
122
122
|
streaming_text = "" # Initialize outside try block
|
|
123
|
-
in_answer = False #
|
|
123
|
+
# in_answer = False # No longer needed
|
|
124
124
|
try:
|
|
125
125
|
# Use curl_cffi session post with impersonate
|
|
126
126
|
response = self.session.post(
|
|
@@ -132,41 +132,23 @@ class HeckAI(Provider):
|
|
|
132
132
|
impersonate="chrome110" # Use a common impersonation profile
|
|
133
133
|
)
|
|
134
134
|
response.raise_for_status() # Check for HTTP errors
|
|
135
|
-
|
|
136
|
-
#
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
continue
|
|
153
|
-
|
|
154
|
-
if data == "[ANSWER_DONE]":
|
|
155
|
-
in_answer = False
|
|
156
|
-
continue
|
|
157
|
-
|
|
158
|
-
if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
|
|
159
|
-
continue
|
|
160
|
-
|
|
161
|
-
# Process content if we're in an answer section
|
|
162
|
-
if in_answer:
|
|
163
|
-
# Assuming 'data' is the text chunk here
|
|
164
|
-
streaming_text += data
|
|
165
|
-
resp = dict(text=data)
|
|
166
|
-
# Yield dict or raw string chunk
|
|
167
|
-
yield resp if not raw else data
|
|
168
|
-
except UnicodeDecodeError:
|
|
169
|
-
continue # Ignore decoding errors for specific lines
|
|
135
|
+
|
|
136
|
+
# Use sanitize_stream to process the stream
|
|
137
|
+
processed_stream = sanitize_stream(
|
|
138
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
139
|
+
intro_value="data:", # Prefix to remove *from lines between markers*
|
|
140
|
+
to_json=False, # Content is text
|
|
141
|
+
start_marker="data: [ANSWER_START]", # Check against the raw line including prefix
|
|
142
|
+
end_marker="data: [ANSWER_DONE]", # Check against the raw line including prefix
|
|
143
|
+
skip_markers=["[RELATE_Q_START]", "[RELATE_Q_DONE]"], # Skip these if they appear within answer block
|
|
144
|
+
yield_raw_on_error=True
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
for content_chunk in processed_stream:
|
|
148
|
+
# content_chunk is the text between ANSWER_START and ANSWER_DONE
|
|
149
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
150
|
+
streaming_text += content_chunk
|
|
151
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
170
152
|
|
|
171
153
|
# Update history and previous answer after stream finishes
|
|
172
154
|
self.previous_answer = streaming_text
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
import re
|
|
5
5
|
from typing import Union, Any, Dict, Optional, Generator
|
|
6
6
|
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
@@ -97,6 +97,17 @@ class JadveOpenAI(Provider):
|
|
|
97
97
|
)
|
|
98
98
|
self.conversation.history_offset = history_offset
|
|
99
99
|
|
|
100
|
+
@staticmethod
|
|
101
|
+
def _jadve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
102
|
+
"""Extracts content from the Jadve stream format '0:"..."'."""
|
|
103
|
+
if isinstance(chunk, str):
|
|
104
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
105
|
+
if match:
|
|
106
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
107
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
108
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
109
|
+
return None
|
|
110
|
+
|
|
100
111
|
def ask(
|
|
101
112
|
self,
|
|
102
113
|
prompt: str,
|
|
@@ -153,41 +164,22 @@ class JadveOpenAI(Provider):
|
|
|
153
164
|
)
|
|
154
165
|
response.raise_for_status() # Check for HTTP errors
|
|
155
166
|
|
|
156
|
-
#
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
if
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
if
|
|
172
|
-
for chunk in matches:
|
|
173
|
-
# Handle potential escape sequences like \\n
|
|
174
|
-
decoded_chunk = chunk.encode().decode('unicode_escape')
|
|
175
|
-
full_response_text += decoded_chunk
|
|
176
|
-
resp = {"text": decoded_chunk}
|
|
177
|
-
# Yield dict or raw string chunk
|
|
178
|
-
yield resp if not raw else decoded_chunk
|
|
179
|
-
|
|
180
|
-
# Remove matched parts from the buffer
|
|
181
|
-
# Be careful with buffer modification during iteration if issues arise
|
|
182
|
-
matched_parts = [f'0:"{match}"' for match in matches]
|
|
183
|
-
for part in matched_parts:
|
|
184
|
-
buffer = buffer.replace(part, '', 1)
|
|
185
|
-
|
|
186
|
-
# Check if we've reached the end of the response
|
|
187
|
-
if 'e:' in line or 'd:' in line:
|
|
188
|
-
break
|
|
189
|
-
except UnicodeDecodeError:
|
|
190
|
-
continue # Ignore decoding errors for specific lines
|
|
167
|
+
# Use sanitize_stream
|
|
168
|
+
processed_stream = sanitize_stream(
|
|
169
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
170
|
+
intro_value=None, # No simple prefix
|
|
171
|
+
to_json=False, # Content is text after extraction
|
|
172
|
+
content_extractor=self._jadve_extractor, # Use the specific extractor
|
|
173
|
+
# end_marker="e:", # Add if 'e:' reliably marks the end
|
|
174
|
+
yield_raw_on_error=True
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
for content_chunk in processed_stream:
|
|
178
|
+
# content_chunk is the string extracted by _jadve_extractor
|
|
179
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
180
|
+
full_response_text += content_chunk
|
|
181
|
+
resp = {"text": content_chunk}
|
|
182
|
+
yield resp if not raw else content_chunk
|
|
191
183
|
|
|
192
184
|
# Update history after stream finishes
|
|
193
185
|
self.last_response = {"text": full_response_text}
|
|
@@ -272,7 +264,8 @@ class JadveOpenAI(Provider):
|
|
|
272
264
|
str: Extracted text.
|
|
273
265
|
"""
|
|
274
266
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
275
|
-
|
|
267
|
+
# Extractor handles formatting
|
|
268
|
+
return response.get("text", "")
|
|
276
269
|
|
|
277
270
|
if __name__ == "__main__":
|
|
278
271
|
# Ensure curl_cffi is installed
|
webscout/Provider/LambdaChat.py
CHANGED
|
@@ -7,8 +7,8 @@ import re
|
|
|
7
7
|
import uuid
|
|
8
8
|
from typing import Any, Dict, List, Optional, Union, Generator
|
|
9
9
|
|
|
10
|
-
from webscout.AIutel import Conversation
|
|
11
|
-
from webscout.AIbase import Provider
|
|
10
|
+
from webscout.AIutel import Conversation, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider # Import sanitize_stream
|
|
12
12
|
from webscout import exceptions
|
|
13
13
|
from webscout.litagent import LitAgent
|
|
14
14
|
|
|
@@ -182,62 +182,21 @@ class LambdaChat(Provider):
|
|
|
182
182
|
boundary += "".join(random.choice(boundary_chars) for _ in range(16))
|
|
183
183
|
return boundary
|
|
184
184
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
185
|
+
@staticmethod
|
|
186
|
+
def _lambdachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
187
|
+
"""Extracts content from LambdaChat stream JSON objects."""
|
|
188
|
+
if not isinstance(chunk, dict) or "type" not in chunk:
|
|
189
|
+
return None
|
|
190
|
+
|
|
189
191
|
reasoning_text = ""
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
data = json.loads(line)
|
|
199
|
-
|
|
200
|
-
# Handle different response types
|
|
201
|
-
if "type" not in data:
|
|
202
|
-
continue
|
|
203
|
-
|
|
204
|
-
if data["type"] == "stream" and "token" in data:
|
|
205
|
-
token = data["token"].replace("\u0000", "")
|
|
206
|
-
full_text += token
|
|
207
|
-
resp = {"text": token}
|
|
208
|
-
yield resp
|
|
209
|
-
elif data["type"] == "finalAnswer":
|
|
210
|
-
final_text = data.get("text", "")
|
|
211
|
-
if final_text and not full_text:
|
|
212
|
-
full_text = final_text
|
|
213
|
-
resp = {"text": final_text}
|
|
214
|
-
yield resp
|
|
215
|
-
elif data["type"] == "webSearch" and "sources" in data:
|
|
216
|
-
sources = data["sources"]
|
|
217
|
-
elif data["type"] == "reasoning":
|
|
218
|
-
has_reasoning = True
|
|
219
|
-
if data.get("subtype") == "stream" and "token" in data:
|
|
220
|
-
reasoning_text += data["token"]
|
|
221
|
-
|
|
222
|
-
# If we have reasoning, prepend it to the next text output
|
|
223
|
-
if reasoning_text and not full_text:
|
|
224
|
-
resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
|
|
225
|
-
yield resp
|
|
226
|
-
|
|
227
|
-
except json.JSONDecodeError:
|
|
228
|
-
continue
|
|
229
|
-
|
|
230
|
-
# Update conversation history only for saving to file if needed
|
|
231
|
-
if full_text and self.conversation.file:
|
|
232
|
-
if has_reasoning:
|
|
233
|
-
full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
|
|
234
|
-
self.last_response = {"text": full_text_with_reasoning}
|
|
235
|
-
self.conversation.update_chat_history(prompt, full_text_with_reasoning)
|
|
236
|
-
else:
|
|
237
|
-
self.last_response = {"text": full_text}
|
|
238
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
239
|
-
|
|
240
|
-
return full_text
|
|
192
|
+
if chunk["type"] == "stream" and "token" in chunk:
|
|
193
|
+
return chunk["token"].replace("\u0000", "")
|
|
194
|
+
elif chunk["type"] == "finalAnswer":
|
|
195
|
+
return chunk.get("text")
|
|
196
|
+
elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
|
|
197
|
+
# Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
|
|
198
|
+
return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
|
|
199
|
+
return None
|
|
241
200
|
|
|
242
201
|
def ask(
|
|
243
202
|
self,
|
|
@@ -296,6 +255,7 @@ class LambdaChat(Provider):
|
|
|
296
255
|
multipart_headers["Content-Length"] = str(len(body))
|
|
297
256
|
|
|
298
257
|
def for_stream():
|
|
258
|
+
streaming_text = "" # Initialize for history
|
|
299
259
|
try:
|
|
300
260
|
# Try with multipart/form-data first
|
|
301
261
|
response = None
|
|
@@ -327,8 +287,21 @@ class LambdaChat(Provider):
|
|
|
327
287
|
|
|
328
288
|
response.raise_for_status() # Check status after potential fallback
|
|
329
289
|
|
|
330
|
-
#
|
|
331
|
-
|
|
290
|
+
# Use sanitize_stream
|
|
291
|
+
processed_stream = sanitize_stream(
|
|
292
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
293
|
+
intro_value=None, # No prefix
|
|
294
|
+
to_json=True, # Stream sends JSON lines
|
|
295
|
+
content_extractor=self._lambdachat_extractor, # Use the specific extractor
|
|
296
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
for content_chunk in processed_stream:
|
|
300
|
+
# content_chunk is the string extracted by _lambdachat_extractor
|
|
301
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
302
|
+
streaming_text += content_chunk # Aggregate text for history
|
|
303
|
+
resp = {"text": content_chunk}
|
|
304
|
+
yield resp if not raw else content_chunk
|
|
332
305
|
|
|
333
306
|
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
|
|
334
307
|
# Handle specific exceptions if needed
|
|
@@ -353,6 +326,10 @@ class LambdaChat(Provider):
|
|
|
353
326
|
# If we get here, all models failed
|
|
354
327
|
raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
|
|
355
328
|
|
|
329
|
+
# Update history after stream finishes
|
|
330
|
+
if streaming_text and self.conversation.file:
|
|
331
|
+
self.last_response = {"text": streaming_text}
|
|
332
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
356
333
|
|
|
357
334
|
def for_non_stream():
|
|
358
335
|
# Aggregate the stream using the updated for_stream logic
|
webscout/Provider/MCPCore.py
CHANGED
|
@@ -8,7 +8,7 @@ from curl_cffi import CurlError
|
|
|
8
8
|
|
|
9
9
|
from webscout.AIutel import Optimizers
|
|
10
10
|
from webscout.AIutel import Conversation
|
|
11
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
12
12
|
from webscout.AIbase import Provider
|
|
13
13
|
from webscout import exceptions
|
|
14
14
|
from webscout.litagent import LitAgent
|
|
@@ -146,11 +146,11 @@ class MCPCore(Provider):
|
|
|
146
146
|
)
|
|
147
147
|
return cookie_string, token
|
|
148
148
|
except FileNotFoundError:
|
|
149
|
-
raise exceptions.
|
|
149
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
150
150
|
f"Error: Cookies file not found at {self.cookies_path}!"
|
|
151
151
|
)
|
|
152
152
|
except json.JSONDecodeError:
|
|
153
|
-
raise exceptions.
|
|
153
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
154
154
|
f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
|
|
155
155
|
)
|
|
156
156
|
|
|
@@ -207,24 +207,21 @@ class MCPCore(Provider):
|
|
|
207
207
|
)
|
|
208
208
|
response.raise_for_status()
|
|
209
209
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
yield resp if not raw else content
|
|
226
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
227
|
-
continue
|
|
210
|
+
# Use sanitize_stream
|
|
211
|
+
processed_stream = sanitize_stream(
|
|
212
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
213
|
+
intro_value="data:",
|
|
214
|
+
to_json=True, # Stream sends JSON
|
|
215
|
+
skip_markers=["[DONE]"],
|
|
216
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
217
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
for content_chunk in processed_stream:
|
|
221
|
+
# content_chunk is the string extracted by the content_extractor
|
|
222
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
223
|
+
streaming_text += content_chunk
|
|
224
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
228
225
|
|
|
229
226
|
self.last_response = {"text": streaming_text}
|
|
230
227
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
webscout/Provider/Marcus.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Union, Any, Dict, Optional, Generator
|
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers
|
|
7
7
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
|
|
@@ -103,18 +103,18 @@ class Marcus(Provider):
|
|
|
103
103
|
)
|
|
104
104
|
response.raise_for_status() # Check for HTTP errors
|
|
105
105
|
|
|
106
|
-
#
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
# Yield dict or raw string chunk
|
|
114
|
-
yield resp if not raw else decoded_line
|
|
115
|
-
except UnicodeDecodeError:
|
|
116
|
-
continue # Ignore decoding errors
|
|
106
|
+
# Use sanitize_stream to decode bytes and yield text chunks
|
|
107
|
+
processed_stream = sanitize_stream(
|
|
108
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
109
|
+
intro_value=None, # No prefix
|
|
110
|
+
to_json=False, # It's plain text
|
|
111
|
+
yield_raw_on_error=True
|
|
112
|
+
)
|
|
117
113
|
|
|
114
|
+
for content_chunk in processed_stream:
|
|
115
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
116
|
+
streaming_text += content_chunk # Aggregate text
|
|
117
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
118
118
|
# Update history after stream finishes
|
|
119
119
|
self.last_response = {"text": streaming_text} # Store aggregated text
|
|
120
120
|
self.conversation.update_chat_history(
|
|
@@ -140,8 +140,17 @@ class Marcus(Provider):
|
|
|
140
140
|
)
|
|
141
141
|
response.raise_for_status() # Check for HTTP errors
|
|
142
142
|
|
|
143
|
-
|
|
144
|
-
|
|
143
|
+
response_text_raw = response.text # Get raw text
|
|
144
|
+
|
|
145
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
146
|
+
processed_stream = sanitize_stream(
|
|
147
|
+
data=response_text_raw,
|
|
148
|
+
intro_value=None, # No prefix
|
|
149
|
+
to_json=False # It's plain text
|
|
150
|
+
)
|
|
151
|
+
# Aggregate the single result
|
|
152
|
+
full_response = "".join(list(processed_stream))
|
|
153
|
+
|
|
145
154
|
self.last_response = {"text": full_response}
|
|
146
155
|
self.conversation.update_chat_history(prompt, full_response)
|
|
147
156
|
# Return dict or raw string
|
webscout/Provider/Netwrck.py
CHANGED
|
@@ -1,10 +1,5 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
1
|
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
-
from
|
|
6
|
-
from datetime import date
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
2
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
3
|
from webscout.AIbase import Provider
|
|
9
4
|
from webscout import exceptions
|
|
10
5
|
from webscout.litagent import LitAgent
|
|
@@ -95,6 +90,15 @@ class Netwrck(Provider):
|
|
|
95
90
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
96
91
|
)
|
|
97
92
|
|
|
93
|
+
@staticmethod
|
|
94
|
+
def _netwrck_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
95
|
+
"""Removes surrounding quotes and handles potential escapes."""
|
|
96
|
+
if isinstance(chunk, str):
|
|
97
|
+
text = chunk.strip('"')
|
|
98
|
+
# Handle potential unicode escapes if they appear
|
|
99
|
+
# text = text.encode().decode('unicode_escape') # Uncomment if needed
|
|
100
|
+
return text
|
|
101
|
+
return None
|
|
98
102
|
def ask(
|
|
99
103
|
self,
|
|
100
104
|
prompt: str,
|
|
@@ -136,21 +140,18 @@ class Netwrck(Provider):
|
|
|
136
140
|
response.raise_for_status() # Check for HTTP errors
|
|
137
141
|
|
|
138
142
|
streaming_text = ""
|
|
139
|
-
#
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
# Handle potential decoding errors if chunks split mid-character
|
|
152
|
-
continue
|
|
153
|
-
|
|
143
|
+
# Use sanitize_stream
|
|
144
|
+
processed_stream = sanitize_stream(
|
|
145
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
146
|
+
intro_value=None, # No prefix
|
|
147
|
+
to_json=False, # It's text
|
|
148
|
+
content_extractor=self._netwrck_extractor, # Use the quote stripper
|
|
149
|
+
yield_raw_on_error=True
|
|
150
|
+
)
|
|
151
|
+
for content_chunk in processed_stream:
|
|
152
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
153
|
+
streaming_text += content_chunk
|
|
154
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
154
155
|
# Update history after stream finishes
|
|
155
156
|
self.last_response = {"text": streaming_text} # Store aggregated text
|
|
156
157
|
self.conversation.update_chat_history(payload["query"], streaming_text)
|
|
@@ -174,11 +175,19 @@ class Netwrck(Provider):
|
|
|
174
175
|
)
|
|
175
176
|
response.raise_for_status() # Check for HTTP errors
|
|
176
177
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
#
|
|
180
|
-
|
|
181
|
-
|
|
178
|
+
response_text_raw = response.text # Get raw text
|
|
179
|
+
|
|
180
|
+
# Process the text using sanitize_stream
|
|
181
|
+
processed_stream = sanitize_stream(
|
|
182
|
+
data=response_text_raw,
|
|
183
|
+
intro_value=None,
|
|
184
|
+
to_json=False,
|
|
185
|
+
content_extractor=self._netwrck_extractor
|
|
186
|
+
)
|
|
187
|
+
# Aggregate the single result
|
|
188
|
+
text = "".join(list(processed_stream))
|
|
189
|
+
|
|
190
|
+
self.last_response = {"text": text} # Store processed text
|
|
182
191
|
self.conversation.update_chat_history(prompt, text)
|
|
183
192
|
|
|
184
193
|
# Return dict or raw string
|
|
@@ -35,6 +35,8 @@ MODEL_CONFIGS = {
|
|
|
35
35
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
36
36
|
"gemini-2.5-pro-exp-03-25",
|
|
37
37
|
"gemini-2.0-pro-exp-02-05",
|
|
38
|
+
"gemini-2.5-flash-preview-04-17",
|
|
39
|
+
|
|
38
40
|
|
|
39
41
|
],
|
|
40
42
|
},
|
|
@@ -83,6 +85,7 @@ MODEL_CONFIGS = {
|
|
|
83
85
|
},
|
|
84
86
|
}
|
|
85
87
|
|
|
88
|
+
|
|
86
89
|
class Completions(BaseCompletions):
|
|
87
90
|
def __init__(self, client: 'ExaChat'):
|
|
88
91
|
self._client = client
|
|
@@ -292,6 +295,7 @@ class ExaChat(OpenAICompatibleProvider):
|
|
|
292
295
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
293
296
|
"gemini-2.5-pro-exp-03-25",
|
|
294
297
|
"gemini-2.0-pro-exp-02-05",
|
|
298
|
+
"gemini-2.5-flash-preview-04-17",
|
|
295
299
|
|
|
296
300
|
# OpenRouter Models
|
|
297
301
|
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
@@ -324,15 +324,13 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
324
324
|
"""
|
|
325
325
|
|
|
326
326
|
AVAILABLE_MODELS = {
|
|
327
|
-
"scira-default": "Grok3",
|
|
328
|
-
"scira-grok-3
|
|
327
|
+
"scira-default": "Grok3-mini", # thinking model
|
|
328
|
+
"scira-grok-3": "Grok3",
|
|
329
329
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
330
330
|
"scira-4.1-mini": "GPT4.1-mini",
|
|
331
331
|
"scira-qwq": "QWQ-32B",
|
|
332
332
|
"scira-o4-mini": "o4-mini",
|
|
333
333
|
"scira-google": "gemini 2.5 flash"
|
|
334
|
-
|
|
335
|
-
|
|
336
334
|
}
|
|
337
335
|
|
|
338
336
|
def __init__(
|
|
@@ -268,28 +268,26 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
268
268
|
"""
|
|
269
269
|
|
|
270
270
|
AVAILABLE_MODELS = [
|
|
271
|
-
"openai",
|
|
272
|
-
"openai-large",
|
|
273
|
-
"
|
|
274
|
-
"
|
|
275
|
-
"
|
|
276
|
-
"
|
|
277
|
-
"
|
|
278
|
-
"
|
|
279
|
-
"
|
|
280
|
-
"
|
|
281
|
-
"
|
|
282
|
-
"
|
|
283
|
-
"deepseek-reasoning",
|
|
284
|
-
"
|
|
285
|
-
"
|
|
286
|
-
"
|
|
287
|
-
"
|
|
288
|
-
"
|
|
289
|
-
"
|
|
290
|
-
"
|
|
291
|
-
"sur", # Sur AI Assistant (Mistral) (Scaleway) - vision capable
|
|
292
|
-
"openai-audio", # OpenAI GPT-4o-audio-preview (Azure) - vision and audio capable
|
|
271
|
+
"openai",
|
|
272
|
+
"openai-large",
|
|
273
|
+
"qwen-coder",
|
|
274
|
+
"llama",
|
|
275
|
+
"llamascout",
|
|
276
|
+
"mistral",
|
|
277
|
+
"unity",
|
|
278
|
+
"midijourney",
|
|
279
|
+
"rtist",
|
|
280
|
+
"searchgpt",
|
|
281
|
+
"evil",
|
|
282
|
+
"deepseek-reasoning",
|
|
283
|
+
"deepseek-reasoning-large",
|
|
284
|
+
"phi",
|
|
285
|
+
"llama-vision",
|
|
286
|
+
"hormoz",
|
|
287
|
+
"hypnosis-tracy",
|
|
288
|
+
"deepseek",
|
|
289
|
+
"sur",
|
|
290
|
+
"openai-audio",
|
|
293
291
|
]
|
|
294
292
|
|
|
295
293
|
def __init__(
|