webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +16 -7
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/__init__.py +7 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +23 -20
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/cli.py +256 -0
- webscout/conversation.py +0 -2
- webscout/exceptions.py +3 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/llmchatco.py
CHANGED
|
@@ -5,7 +5,7 @@ import uuid
|
|
|
5
5
|
import re
|
|
6
6
|
from typing import Union, Any, Dict, Optional, Generator, List
|
|
7
7
|
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIutel import Conversation
|
|
10
10
|
from webscout.AIutel import AwesomePrompts
|
|
11
11
|
from webscout.AIbase import Provider
|
|
@@ -66,15 +66,15 @@ class LLMChatCo(Provider):
|
|
|
66
66
|
self.model = model
|
|
67
67
|
self.system_prompt = system_prompt
|
|
68
68
|
self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
|
|
69
|
-
|
|
69
|
+
|
|
70
70
|
# Create LitAgent instance (keep if needed for other headers)
|
|
71
71
|
lit_agent = Lit()
|
|
72
|
-
|
|
72
|
+
|
|
73
73
|
# Headers based on the provided request
|
|
74
74
|
self.headers = {
|
|
75
75
|
"Content-Type": "application/json",
|
|
76
76
|
"Accept": "text/event-stream",
|
|
77
|
-
"User-Agent": lit_agent.random(),
|
|
77
|
+
"User-Agent": lit_agent.random(),
|
|
78
78
|
"Accept-Language": "en-US,en;q=0.9",
|
|
79
79
|
"Origin": "https://llmchat.co",
|
|
80
80
|
"Referer": f"https://llmchat.co/chat/{self.thread_id}",
|
|
@@ -109,24 +109,16 @@ class LLMChatCo(Provider):
|
|
|
109
109
|
# Store message history for conversation context
|
|
110
110
|
self.last_assistant_response = ""
|
|
111
111
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
if data.startswith('data:'):
|
|
123
|
-
data_content = data[5:].strip()
|
|
124
|
-
if data_content:
|
|
125
|
-
try:
|
|
126
|
-
return {'data': json.loads(data_content)}
|
|
127
|
-
except json.JSONDecodeError:
|
|
128
|
-
return {'data': data_content}
|
|
129
|
-
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts text content from LLMChat.co stream JSON objects."""
|
|
115
|
+
if isinstance(chunk, dict) and "answer" in chunk:
|
|
116
|
+
answer = chunk["answer"]
|
|
117
|
+
# Prefer fullText if available and status is COMPLETED
|
|
118
|
+
if answer.get("fullText") and answer.get("status") == "COMPLETED":
|
|
119
|
+
return answer["fullText"]
|
|
120
|
+
elif "text" in answer:
|
|
121
|
+
return answer["text"]
|
|
130
122
|
return None
|
|
131
123
|
|
|
132
124
|
def ask(
|
|
@@ -176,62 +168,40 @@ class LLMChatCo(Provider):
|
|
|
176
168
|
try:
|
|
177
169
|
# Use curl_cffi session post with impersonate
|
|
178
170
|
response = self.session.post(
|
|
179
|
-
self.api_endpoint,
|
|
180
|
-
json=payload,
|
|
171
|
+
self.api_endpoint,
|
|
172
|
+
json=payload,
|
|
181
173
|
# headers are set on the session
|
|
182
|
-
stream=True,
|
|
174
|
+
stream=True,
|
|
183
175
|
timeout=self.timeout,
|
|
184
176
|
# proxies are set on the session
|
|
185
177
|
impersonate="chrome110" # Use a common impersonation profile
|
|
186
178
|
)
|
|
187
179
|
response.raise_for_status() # Check for HTTP errors
|
|
188
|
-
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
if
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
if data_content and current_event == 'answer':
|
|
214
|
-
try:
|
|
215
|
-
json_data = json.loads(data_content)
|
|
216
|
-
if "answer" in json_data and "text" in json_data["answer"]:
|
|
217
|
-
text_chunk = json_data["answer"]["text"]
|
|
218
|
-
# If there's a fullText, use it as it's more complete
|
|
219
|
-
if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
|
|
220
|
-
text_chunk = json_data["answer"]["fullText"]
|
|
221
|
-
|
|
222
|
-
# Extract only new content since last chunk
|
|
223
|
-
new_text = text_chunk[len(full_response):]
|
|
224
|
-
if new_text:
|
|
225
|
-
full_response = text_chunk # Update full response tracker
|
|
226
|
-
resp = dict(text=new_text)
|
|
227
|
-
# Yield dict or raw string chunk
|
|
228
|
-
yield resp if not raw else new_text
|
|
229
|
-
except json.JSONDecodeError:
|
|
230
|
-
continue # Ignore invalid JSON data
|
|
231
|
-
elif data_content and current_event == 'done':
|
|
232
|
-
# Handle potential final data before done event if needed
|
|
233
|
-
break # Exit loop on 'done' event
|
|
234
|
-
|
|
180
|
+
|
|
181
|
+
# Use sanitize_stream
|
|
182
|
+
# Note: This won't handle SSE 'event:' lines, only 'data:' lines.
|
|
183
|
+
# The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
|
|
184
|
+
processed_stream = sanitize_stream(
|
|
185
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
186
|
+
intro_value="data:",
|
|
187
|
+
to_json=True, # Stream sends JSON
|
|
188
|
+
content_extractor=self._llmchatco_extractor, # Use the specific extractor
|
|
189
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
last_yielded_text = ""
|
|
193
|
+
for current_full_text in processed_stream:
|
|
194
|
+
# current_full_text is the full text extracted by _llmchatco_extractor
|
|
195
|
+
if current_full_text and isinstance(current_full_text, str):
|
|
196
|
+
# Calculate the new part of the text
|
|
197
|
+
new_text = current_full_text[len(last_yielded_text):]
|
|
198
|
+
if new_text:
|
|
199
|
+
full_response = current_full_text # Keep track of the latest full text
|
|
200
|
+
last_yielded_text = current_full_text # Update tracker
|
|
201
|
+
resp = dict(text=new_text)
|
|
202
|
+
# Yield dict or raw string chunk
|
|
203
|
+
yield resp if not raw else new_text
|
|
204
|
+
|
|
235
205
|
# Update history after stream finishes
|
|
236
206
|
self.last_response = dict(text=full_response)
|
|
237
207
|
self.last_assistant_response = full_response
|
|
@@ -244,7 +214,7 @@ class LLMChatCo(Provider):
|
|
|
244
214
|
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
245
215
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
246
216
|
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
247
|
-
|
|
217
|
+
|
|
248
218
|
def for_non_stream():
|
|
249
219
|
# Aggregate the stream using the updated for_stream logic
|
|
250
220
|
full_response_text = ""
|
|
@@ -261,7 +231,7 @@ class LLMChatCo(Provider):
|
|
|
261
231
|
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
262
232
|
if not full_response_text:
|
|
263
233
|
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
264
|
-
|
|
234
|
+
|
|
265
235
|
# last_response and history are updated within for_stream
|
|
266
236
|
# Return the final aggregated response dict or raw string
|
|
267
237
|
return full_response_text if raw else self.last_response
|
|
@@ -313,17 +283,17 @@ if __name__ == "__main__":
|
|
|
313
283
|
print("-" * 80)
|
|
314
284
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
315
285
|
print("-" * 80)
|
|
316
|
-
|
|
286
|
+
|
|
317
287
|
# Test all available models
|
|
318
288
|
working = 0
|
|
319
289
|
total = len(LLMChatCo.AVAILABLE_MODELS)
|
|
320
|
-
|
|
290
|
+
|
|
321
291
|
for model in LLMChatCo.AVAILABLE_MODELS:
|
|
322
292
|
try:
|
|
323
293
|
test_ai = LLMChatCo(model=model, timeout=60)
|
|
324
294
|
response = test_ai.chat("Say 'Hello' in one word")
|
|
325
295
|
response_text = response
|
|
326
|
-
|
|
296
|
+
|
|
327
297
|
if response_text and len(response_text.strip()) > 0:
|
|
328
298
|
status = "✓"
|
|
329
299
|
# Truncate response if too long
|
|
@@ -333,4 +303,4 @@ if __name__ == "__main__":
|
|
|
333
303
|
display_text = "Empty or invalid response"
|
|
334
304
|
print(f"{model:<50} {status:<10} {display_text}")
|
|
335
305
|
except Exception as e:
|
|
336
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
306
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/multichat.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
import uuid
|
|
5
5
|
from typing import Any, Dict, Union
|
|
6
6
|
from datetime import datetime
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
@@ -279,8 +279,18 @@ class MultiChatAI(Provider):
|
|
|
279
279
|
response = self._make_request(payload)
|
|
280
280
|
try:
|
|
281
281
|
# Use response.text which is already decoded
|
|
282
|
-
|
|
283
|
-
|
|
282
|
+
response_text_raw = response.text # Get raw text
|
|
283
|
+
|
|
284
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response_text_raw,
|
|
287
|
+
intro_value=None, # No prefix
|
|
288
|
+
to_json=False # It's plain text
|
|
289
|
+
)
|
|
290
|
+
# Aggregate the single result
|
|
291
|
+
full_response = "".join(list(processed_stream)).strip()
|
|
292
|
+
|
|
293
|
+
self.last_response = {"text": full_response} # Store processed text
|
|
284
294
|
self.conversation.update_chat_history(prompt, full_response)
|
|
285
295
|
# Return dict or raw string based on raw flag
|
|
286
296
|
return full_response if raw else self.last_response
|
webscout/Provider/scira_chat.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from os import system
|
|
2
|
-
import
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
from curl_cffi.requests import Session
|
|
3
4
|
import json
|
|
4
5
|
import uuid
|
|
5
6
|
import re
|
|
6
|
-
from typing import Any, Dict, Optional, Union
|
|
7
|
+
from typing import Any, Dict, Optional, Union, List
|
|
7
8
|
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
9
10
|
from webscout.AIutel import AwesomePrompts
|
|
10
11
|
from webscout.AIbase import Provider
|
|
11
12
|
from webscout import exceptions
|
|
@@ -17,15 +18,13 @@ class SciraAI(Provider):
|
|
|
17
18
|
"""
|
|
18
19
|
|
|
19
20
|
AVAILABLE_MODELS = {
|
|
20
|
-
"scira-default": "Grok3",
|
|
21
|
-
"scira-grok-3
|
|
21
|
+
"scira-default": "Grok3-mini", # thinking model
|
|
22
|
+
"scira-grok-3": "Grok3",
|
|
22
23
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
23
24
|
"scira-4.1-mini": "GPT4.1-mini",
|
|
24
25
|
"scira-qwq": "QWQ-32B",
|
|
25
26
|
"scira-o4-mini": "o4-mini",
|
|
26
27
|
"scira-google": "gemini 2.5 flash"
|
|
27
|
-
|
|
28
|
-
|
|
29
28
|
}
|
|
30
29
|
|
|
31
30
|
def __init__(
|
|
@@ -92,9 +91,9 @@ class SciraAI(Provider):
|
|
|
92
91
|
"Sec-Fetch-Site": "same-origin"
|
|
93
92
|
}
|
|
94
93
|
|
|
95
|
-
self.session =
|
|
94
|
+
self.session = Session() # Use curl_cffi Session
|
|
96
95
|
self.session.headers.update(self.headers)
|
|
97
|
-
self.session.proxies
|
|
96
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
98
97
|
|
|
99
98
|
self.is_conversation = is_conversation
|
|
100
99
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -150,12 +149,23 @@ class SciraAI(Provider):
|
|
|
150
149
|
|
|
151
150
|
return self.fingerprint
|
|
152
151
|
|
|
152
|
+
@staticmethod
|
|
153
|
+
def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
154
|
+
"""Extracts content from the Scira stream format '0:"..."'."""
|
|
155
|
+
if isinstance(chunk, str):
|
|
156
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
157
|
+
if match:
|
|
158
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
159
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
160
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
161
|
+
return None
|
|
162
|
+
|
|
153
163
|
def ask(
|
|
154
164
|
self,
|
|
155
165
|
prompt: str,
|
|
156
166
|
optimizer: str = None,
|
|
157
167
|
conversationally: bool = False,
|
|
158
|
-
) -> Dict[str, Any]:
|
|
168
|
+
) -> Dict[str, Any]: # Note: Stream parameter removed as API doesn't seem to support it
|
|
159
169
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
160
170
|
if optimizer:
|
|
161
171
|
if optimizer in self.__available_optimizers:
|
|
@@ -181,10 +191,16 @@ class SciraAI(Provider):
|
|
|
181
191
|
}
|
|
182
192
|
|
|
183
193
|
try:
|
|
184
|
-
|
|
194
|
+
# Use curl_cffi post with impersonate
|
|
195
|
+
response = self.session.post(
|
|
196
|
+
self.url,
|
|
197
|
+
json=payload,
|
|
198
|
+
timeout=self.timeout,
|
|
199
|
+
impersonate="chrome120" # Add impersonate
|
|
200
|
+
)
|
|
185
201
|
if response.status_code != 200:
|
|
186
202
|
# Try to get response content for better error messages
|
|
187
|
-
try:
|
|
203
|
+
try: # Use try-except for reading response content
|
|
188
204
|
error_content = response.text
|
|
189
205
|
except:
|
|
190
206
|
error_content = "<could not read response content>"
|
|
@@ -192,7 +208,10 @@ class SciraAI(Provider):
|
|
|
192
208
|
if response.status_code in [403, 429]:
|
|
193
209
|
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
194
210
|
self.refresh_identity()
|
|
195
|
-
response = self.session.post(
|
|
211
|
+
response = self.session.post(
|
|
212
|
+
self.url, json=payload, timeout=self.timeout,
|
|
213
|
+
impersonate="chrome120" # Add impersonate to retry
|
|
214
|
+
)
|
|
196
215
|
if not response.ok:
|
|
197
216
|
raise exceptions.FailedToGenerateResponseError(
|
|
198
217
|
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
@@ -203,28 +222,27 @@ class SciraAI(Provider):
|
|
|
203
222
|
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
204
223
|
)
|
|
205
224
|
|
|
206
|
-
|
|
207
|
-
debug_lines = []
|
|
208
|
-
|
|
209
|
-
# Collect the first few lines for debugging
|
|
210
|
-
for i, line in enumerate(response.iter_lines()):
|
|
211
|
-
if line:
|
|
212
|
-
try:
|
|
213
|
-
line_str = line.decode('utf-8')
|
|
214
|
-
debug_lines.append(line_str)
|
|
225
|
+
response_text_raw = response.text # Get raw response text
|
|
215
226
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
227
|
+
# Process the text using sanitize_stream line by line
|
|
228
|
+
processed_stream = sanitize_stream(
|
|
229
|
+
data=response_text_raw.splitlines(), # Split into lines
|
|
230
|
+
intro_value=None, # No simple prefix
|
|
231
|
+
to_json=False, # Content is not JSON
|
|
232
|
+
content_extractor=self._scira_extractor # Use the specific extractor
|
|
233
|
+
)
|
|
222
234
|
|
|
235
|
+
# Aggregate the results from the generator
|
|
236
|
+
full_response = ""
|
|
237
|
+
for content in processed_stream:
|
|
238
|
+
if content and isinstance(content, str):
|
|
239
|
+
full_response += content
|
|
223
240
|
|
|
224
|
-
except: pass
|
|
225
241
|
self.last_response = {"text": full_response}
|
|
226
242
|
self.conversation.update_chat_history(prompt, full_response)
|
|
227
243
|
return {"text": full_response}
|
|
244
|
+
except CurlError as e: # Catch CurlError
|
|
245
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
228
246
|
except Exception as e:
|
|
229
247
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
230
248
|
|
|
@@ -242,7 +260,8 @@ class SciraAI(Provider):
|
|
|
242
260
|
|
|
243
261
|
def get_message(self, response: dict) -> str:
|
|
244
262
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
245
|
-
|
|
263
|
+
# Extractor handles formatting
|
|
264
|
+
return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
246
265
|
|
|
247
266
|
if __name__ == "__main__":
|
|
248
267
|
print("-" * 100)
|
webscout/Provider/scnet.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
import secrets
|
|
5
5
|
from typing import Any, Dict, Optional, Generator, Union
|
|
6
6
|
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
|
|
@@ -86,6 +86,13 @@ class SCNet(Provider):
|
|
|
86
86
|
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
87
87
|
self.conversation.history_offset = history_offset
|
|
88
88
|
|
|
89
|
+
@staticmethod
|
|
90
|
+
def _scnet_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
91
|
+
"""Extracts content from SCNet stream JSON objects."""
|
|
92
|
+
if isinstance(chunk, dict):
|
|
93
|
+
return chunk.get("content")
|
|
94
|
+
return None
|
|
95
|
+
|
|
89
96
|
def ask(
|
|
90
97
|
self,
|
|
91
98
|
prompt: str,
|
|
@@ -126,25 +133,21 @@ class SCNet(Provider):
|
|
|
126
133
|
response.raise_for_status() # Check for HTTP errors
|
|
127
134
|
|
|
128
135
|
streaming_text = ""
|
|
129
|
-
#
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
continue
|
|
145
|
-
elif data == "[done]":
|
|
146
|
-
break
|
|
147
|
-
|
|
136
|
+
# Use sanitize_stream
|
|
137
|
+
processed_stream = sanitize_stream(
|
|
138
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
139
|
+
intro_value="data:",
|
|
140
|
+
to_json=True, # Stream sends JSON
|
|
141
|
+
skip_markers=["[done]"],
|
|
142
|
+
content_extractor=self._scnet_extractor, # Use the specific extractor
|
|
143
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
for content_chunk in processed_stream:
|
|
147
|
+
# content_chunk is the string extracted by _scnet_extractor
|
|
148
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
149
|
+
streaming_text += content_chunk
|
|
150
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
148
151
|
# Update history and last response after stream finishes
|
|
149
152
|
self.last_response = {"text": streaming_text}
|
|
150
153
|
self.conversation.update_chat_history(prompt, streaming_text)
|
webscout/Provider/searchchat.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, Dict, Optional, Generator, Union
|
|
|
6
6
|
|
|
7
7
|
from webscout.AIutel import Optimizers
|
|
8
8
|
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
10
|
from webscout.AIbase import Provider
|
|
11
11
|
from webscout import exceptions
|
|
12
12
|
from webscout.litagent import LitAgent
|
|
@@ -183,33 +183,25 @@ class SearchChatAI(Provider):
|
|
|
183
183
|
)
|
|
184
184
|
|
|
185
185
|
streaming_text = ""
|
|
186
|
-
#
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
content = delta["content"]
|
|
202
|
-
streaming_text += content
|
|
203
|
-
resp = dict(text=content)
|
|
204
|
-
# Yield dict or raw string
|
|
205
|
-
yield resp if not raw else content
|
|
206
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
207
|
-
continue
|
|
186
|
+
# Use sanitize_stream
|
|
187
|
+
processed_stream = sanitize_stream(
|
|
188
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
189
|
+
intro_value="data:",
|
|
190
|
+
to_json=True, # Stream sends JSON
|
|
191
|
+
skip_markers=["[DONE]"],
|
|
192
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
193
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
for content_chunk in processed_stream:
|
|
197
|
+
# content_chunk is the string extracted by the content_extractor
|
|
198
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
199
|
+
streaming_text += content_chunk
|
|
200
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
208
201
|
|
|
209
202
|
# Update history and last response after stream finishes
|
|
210
203
|
self.last_response = {"text": streaming_text}
|
|
211
204
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
212
|
-
|
|
213
205
|
except CurlError as e: # Catch CurlError
|
|
214
206
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
215
207
|
except Exception as e: # Catch other potential exceptions
|
webscout/Provider/sonus.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
@@ -78,6 +78,13 @@ class SonusAI(Provider):
|
|
|
78
78
|
)
|
|
79
79
|
self.conversation.history_offset = history_offset
|
|
80
80
|
|
|
81
|
+
@staticmethod
|
|
82
|
+
def _sonus_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
83
|
+
"""Extracts content from Sonus stream JSON objects."""
|
|
84
|
+
if isinstance(chunk, dict) and "content" in chunk:
|
|
85
|
+
return chunk.get("content")
|
|
86
|
+
return None
|
|
87
|
+
|
|
81
88
|
def ask(
|
|
82
89
|
self,
|
|
83
90
|
prompt: str,
|
|
@@ -124,30 +131,22 @@ class SonusAI(Provider):
|
|
|
124
131
|
raise exceptions.FailedToGenerateResponseError(
|
|
125
132
|
f"Request failed with status code {response.status_code} - {response.text}"
|
|
126
133
|
)
|
|
127
|
-
|
|
134
|
+
|
|
128
135
|
streaming_text = ""
|
|
129
|
-
#
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
# Handle potential empty lines after prefix removal
|
|
139
|
-
if not line.strip():
|
|
140
|
-
continue
|
|
136
|
+
# Use sanitize_stream
|
|
137
|
+
processed_stream = sanitize_stream(
|
|
138
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
139
|
+
intro_value="data:",
|
|
140
|
+
to_json=True, # Stream sends JSON
|
|
141
|
+
content_extractor=self._sonus_extractor, # Use the specific extractor
|
|
142
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
143
|
+
)
|
|
141
144
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
# Yield dict or raw string
|
|
148
|
-
yield resp if raw else resp
|
|
149
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
150
|
-
continue
|
|
145
|
+
for content_chunk in processed_stream:
|
|
146
|
+
# content_chunk is the string extracted by _sonus_extractor
|
|
147
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
148
|
+
streaming_text += content_chunk
|
|
149
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
151
150
|
|
|
152
151
|
# Update history and last response after stream finishes
|
|
153
152
|
self.last_response = {"text": streaming_text}
|
|
@@ -173,23 +172,22 @@ class SonusAI(Provider):
|
|
|
173
172
|
f"Request failed with status code {response.status_code} - {response.text}"
|
|
174
173
|
)
|
|
175
174
|
|
|
175
|
+
response_text_raw = response.text # Get raw text
|
|
176
|
+
|
|
177
|
+
# Use sanitize_stream to process the non-streaming text
|
|
178
|
+
processed_stream = sanitize_stream(
|
|
179
|
+
data=response_text_raw.splitlines(), # Split into lines
|
|
180
|
+
intro_value="data:",
|
|
181
|
+
to_json=True,
|
|
182
|
+
content_extractor=self._sonus_extractor,
|
|
183
|
+
yield_raw_on_error=False
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Aggregate the results
|
|
176
187
|
full_response = ""
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
if line:
|
|
181
|
-
try:
|
|
182
|
-
if line.startswith('data: '):
|
|
183
|
-
line = line[6:]
|
|
184
|
-
|
|
185
|
-
if not line.strip():
|
|
186
|
-
continue
|
|
187
|
-
|
|
188
|
-
data = json.loads(line)
|
|
189
|
-
if "content" in data:
|
|
190
|
-
full_response += data["content"]
|
|
191
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
192
|
-
continue
|
|
188
|
+
for content in processed_stream:
|
|
189
|
+
if content and isinstance(content, str):
|
|
190
|
+
full_response += content
|
|
193
191
|
|
|
194
192
|
self.last_response = {"text": full_response}
|
|
195
193
|
self.conversation.update_chat_history(prompt, full_response)
|