webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/typegpt.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator
|
|
4
|
-
import requests.exceptions
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers
|
|
7
7
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout.litagent import LitAgent
|
|
@@ -35,7 +35,7 @@ class TypeGPT(Provider):
|
|
|
35
35
|
proxies: dict = {},
|
|
36
36
|
history_offset: int = 10250,
|
|
37
37
|
act: str = None,
|
|
38
|
-
model: str = "gpt-4o",
|
|
38
|
+
model: str = "gpt-4o-mini-2024-07-18",
|
|
39
39
|
system_prompt: str = "You are a helpful assistant.",
|
|
40
40
|
temperature: float = 0.5,
|
|
41
41
|
presence_penalty: int = 0,
|
|
@@ -46,7 +46,8 @@ class TypeGPT(Provider):
|
|
|
46
46
|
if model not in self.AVAILABLE_MODELS:
|
|
47
47
|
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
48
48
|
|
|
49
|
-
|
|
49
|
+
# Initialize curl_cffi Session
|
|
50
|
+
self.session = Session()
|
|
50
51
|
self.is_conversation = is_conversation
|
|
51
52
|
self.max_tokens_to_sample = max_tokens
|
|
52
53
|
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
@@ -82,6 +83,8 @@ class TypeGPT(Provider):
|
|
|
82
83
|
)
|
|
83
84
|
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
84
85
|
self.conversation.history_offset = history_offset
|
|
86
|
+
# Update curl_cffi session headers and proxies
|
|
87
|
+
self.session.headers.update(self.headers)
|
|
85
88
|
self.session.proxies = proxies
|
|
86
89
|
|
|
87
90
|
def ask(
|
|
@@ -120,57 +123,87 @@ class TypeGPT(Provider):
|
|
|
120
123
|
|
|
121
124
|
def for_stream():
|
|
122
125
|
try:
|
|
126
|
+
# Use curl_cffi session post with impersonate
|
|
123
127
|
response = self.session.post(
|
|
124
|
-
self.api_endpoint,
|
|
128
|
+
self.api_endpoint,
|
|
129
|
+
headers=self.headers,
|
|
130
|
+
json=payload,
|
|
131
|
+
stream=True,
|
|
132
|
+
timeout=self.timeout,
|
|
133
|
+
impersonate="chrome120"
|
|
125
134
|
)
|
|
126
|
-
except
|
|
135
|
+
except CurlError as ce:
|
|
127
136
|
raise exceptions.FailedToGenerateResponseError(
|
|
128
|
-
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
137
|
+
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
129
138
|
) from ce
|
|
130
139
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
continue
|
|
157
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
140
|
+
response.raise_for_status() # Check for HTTP errors first
|
|
141
|
+
|
|
142
|
+
streaming_text = ""
|
|
143
|
+
# Use sanitize_stream
|
|
144
|
+
processed_stream = sanitize_stream(
|
|
145
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
146
|
+
intro_value="data:",
|
|
147
|
+
to_json=True, # Stream sends JSON
|
|
148
|
+
skip_markers=["[DONE]"],
|
|
149
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
150
|
+
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
for content_chunk in processed_stream:
|
|
154
|
+
# content_chunk is the string extracted by the content_extractor
|
|
155
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
156
|
+
streaming_text += content_chunk
|
|
157
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
158
|
+
# Update last_response incrementally
|
|
159
|
+
self.last_response = dict(text=streaming_text)
|
|
160
|
+
|
|
161
|
+
# Update conversation history after stream finishes
|
|
162
|
+
if streaming_text: # Only update if something was received
|
|
163
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
164
|
+
|
|
158
165
|
|
|
159
166
|
def for_non_stream():
|
|
160
167
|
try:
|
|
161
|
-
|
|
162
|
-
|
|
168
|
+
# Use curl_cffi session post with impersonate
|
|
169
|
+
response = self.session.post(
|
|
170
|
+
self.api_endpoint,
|
|
171
|
+
headers=self.headers,
|
|
172
|
+
json=payload,
|
|
173
|
+
timeout=self.timeout,
|
|
174
|
+
impersonate="chrome120"
|
|
175
|
+
)
|
|
176
|
+
except CurlError as ce:
|
|
163
177
|
raise exceptions.FailedToGenerateResponseError(
|
|
164
|
-
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
178
|
+
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
165
179
|
) from ce
|
|
166
180
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
181
|
+
response.raise_for_status() # Check for HTTP errors
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
response_text = response.text # Get raw text
|
|
185
|
+
|
|
186
|
+
# Use sanitize_stream for non-streaming JSON response
|
|
187
|
+
processed_stream = sanitize_stream(
|
|
188
|
+
data=response_text,
|
|
189
|
+
to_json=True, # Parse the whole text as JSON
|
|
190
|
+
intro_value=None,
|
|
191
|
+
# Extractor for non-stream structure
|
|
192
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
|
|
193
|
+
yield_raw_on_error=False
|
|
170
194
|
)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
195
|
+
|
|
196
|
+
# Extract the single result
|
|
197
|
+
content = ""
|
|
198
|
+
for extracted_content in processed_stream:
|
|
199
|
+
content = extracted_content if isinstance(extracted_content, str) else ""
|
|
200
|
+
|
|
201
|
+
self.last_response = {"text": content} # Store in expected format
|
|
202
|
+
self.conversation.update_chat_history(prompt, content)
|
|
203
|
+
return self.last_response
|
|
204
|
+
except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
|
|
205
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
|
|
206
|
+
|
|
174
207
|
|
|
175
208
|
return for_stream() if stream else for_non_stream()
|
|
176
209
|
|
|
@@ -183,23 +216,36 @@ class TypeGPT(Provider):
|
|
|
183
216
|
) -> Union[str, Generator[str, None, None]]:
|
|
184
217
|
"""Generate response string or stream."""
|
|
185
218
|
if stream:
|
|
219
|
+
# ask() yields dicts or strings when streaming
|
|
186
220
|
gen = self.ask(
|
|
187
|
-
prompt, stream=True,
|
|
221
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
222
|
+
optimizer=optimizer, conversationally=conversationally
|
|
188
223
|
)
|
|
189
|
-
for
|
|
190
|
-
|
|
224
|
+
for chunk_dict in gen:
|
|
225
|
+
# get_message expects a dict
|
|
226
|
+
yield self.get_message(chunk_dict)
|
|
191
227
|
else:
|
|
192
|
-
|
|
228
|
+
# ask() returns a dict when not streaming
|
|
229
|
+
response_dict = self.ask(
|
|
230
|
+
prompt, stream=False,
|
|
231
|
+
optimizer=optimizer, conversationally=conversationally
|
|
232
|
+
)
|
|
233
|
+
return self.get_message(response_dict)
|
|
193
234
|
|
|
194
235
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
195
236
|
"""Retrieves message from response."""
|
|
196
|
-
if isinstance(response,
|
|
197
|
-
return response
|
|
198
|
-
elif isinstance(response, dict):
|
|
237
|
+
if isinstance(response, dict):
|
|
199
238
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
200
|
-
|
|
239
|
+
# Handle potential unicode escapes in the final text
|
|
240
|
+
text = response.get("text", "")
|
|
241
|
+
try:
|
|
242
|
+
# Attempt to decode escapes, return original if fails
|
|
243
|
+
return text.encode('utf-8').decode('unicode_escape')
|
|
244
|
+
except UnicodeDecodeError:
|
|
245
|
+
return text
|
|
201
246
|
else:
|
|
202
|
-
|
|
247
|
+
# This case should ideally not be reached if ask() behaves as expected
|
|
248
|
+
raise TypeError(f"Invalid response type: {type(response)}. Expected dict.")
|
|
203
249
|
|
|
204
250
|
if __name__ == "__main__":
|
|
205
251
|
print("-" * 80)
|
|
@@ -213,20 +259,31 @@ if __name__ == "__main__":
|
|
|
213
259
|
for model in TypeGPT.AVAILABLE_MODELS:
|
|
214
260
|
try:
|
|
215
261
|
test_ai = TypeGPT(model=model, timeout=60)
|
|
216
|
-
|
|
262
|
+
# Test stream first
|
|
263
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
217
264
|
response_text = ""
|
|
218
|
-
|
|
265
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
266
|
+
for chunk in response_stream:
|
|
219
267
|
response_text += chunk
|
|
220
|
-
print
|
|
268
|
+
# Optional: print chunks as they arrive for visual feedback
|
|
269
|
+
# print(chunk, end="", flush=True)
|
|
221
270
|
|
|
222
271
|
if response_text and len(response_text.strip()) > 0:
|
|
223
272
|
status = "✓"
|
|
224
|
-
#
|
|
225
|
-
|
|
273
|
+
# Clean and truncate response
|
|
274
|
+
clean_text = response_text.strip() # Already decoded in get_message
|
|
275
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
226
276
|
else:
|
|
227
|
-
status = "✗"
|
|
228
|
-
display_text = "Empty or invalid response"
|
|
277
|
+
status = "✗ (Stream)"
|
|
278
|
+
display_text = "Empty or invalid stream response"
|
|
229
279
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
280
|
+
|
|
281
|
+
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
282
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
283
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
284
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
285
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
286
|
+
|
|
287
|
+
|
|
230
288
|
except Exception as e:
|
|
231
289
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
232
|
-
|
webscout/Provider/uncovr.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import uuid
|
|
4
5
|
import re
|
|
5
6
|
from typing import Any, Dict, Optional, Generator, Union
|
|
6
7
|
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
8
9
|
from webscout.AIutel import AwesomePrompts
|
|
9
10
|
from webscout.AIbase import Provider
|
|
10
11
|
from webscout import exceptions
|
|
@@ -76,7 +77,9 @@ class UncovrAI(Provider):
|
|
|
76
77
|
"Sec-Fetch-Site": "same-origin"
|
|
77
78
|
}
|
|
78
79
|
|
|
79
|
-
|
|
80
|
+
# Initialize curl_cffi Session
|
|
81
|
+
self.session = Session()
|
|
82
|
+
# Update curl_cffi session headers and proxies
|
|
80
83
|
self.session.headers.update(self.headers)
|
|
81
84
|
self.session.proxies.update(proxies)
|
|
82
85
|
|
|
@@ -106,6 +109,17 @@ class UncovrAI(Provider):
|
|
|
106
109
|
)
|
|
107
110
|
self.conversation.history_offset = history_offset
|
|
108
111
|
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts content from the UncovrAI stream format '0:"..."'."""
|
|
115
|
+
if isinstance(chunk, str):
|
|
116
|
+
match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
|
|
117
|
+
if match:
|
|
118
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
119
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
120
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
121
|
+
return None
|
|
122
|
+
|
|
109
123
|
def refresh_identity(self, browser: str = None):
|
|
110
124
|
"""
|
|
111
125
|
Refreshes the browser identity fingerprint.
|
|
@@ -169,87 +183,118 @@ class UncovrAI(Provider):
|
|
|
169
183
|
|
|
170
184
|
def for_stream():
|
|
171
185
|
try:
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
186
|
+
# Use curl_cffi session post with impersonate
|
|
187
|
+
response = self.session.post(
|
|
188
|
+
self.url,
|
|
189
|
+
json=payload,
|
|
190
|
+
stream=True,
|
|
191
|
+
timeout=self.timeout,
|
|
192
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
if response.status_code != 200:
|
|
196
|
+
# If we get a non-200 response, try refreshing our identity once
|
|
197
|
+
if response.status_code in [403, 429]:
|
|
198
|
+
self.refresh_identity()
|
|
199
|
+
# Retry with new identity using curl_cffi session
|
|
200
|
+
retry_response = self.session.post(
|
|
201
|
+
self.url,
|
|
202
|
+
json=payload,
|
|
203
|
+
stream=True,
|
|
204
|
+
timeout=self.timeout,
|
|
205
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
|
|
206
|
+
)
|
|
207
|
+
if not retry_response.ok:
|
|
185
208
|
raise exceptions.FailedToGenerateResponseError(
|
|
186
|
-
f"
|
|
209
|
+
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
187
210
|
)
|
|
211
|
+
response = retry_response # Use the successful retry response
|
|
212
|
+
else:
|
|
213
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
214
|
+
f"Request failed with status code {response.status_code} - {response.text}"
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
streaming_text = ""
|
|
218
|
+
# Use sanitize_stream with the custom extractor
|
|
219
|
+
processed_stream = sanitize_stream(
|
|
220
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
221
|
+
intro_value=None, # No simple prefix
|
|
222
|
+
to_json=False, # Content is not JSON
|
|
223
|
+
content_extractor=self._uncovr_extractor, # Use the specific extractor
|
|
224
|
+
yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
for content_chunk in processed_stream:
|
|
228
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
229
|
+
streaming_text += content_chunk
|
|
230
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
231
|
+
|
|
232
|
+
self.last_response = {"text": streaming_text}
|
|
233
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
188
234
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
# Use regex to match content messages
|
|
195
|
-
content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
|
|
196
|
-
if content_match: # Content message
|
|
197
|
-
content = content_match.group(1)
|
|
198
|
-
streaming_text += content
|
|
199
|
-
resp = dict(text=content)
|
|
200
|
-
yield resp if raw else resp
|
|
201
|
-
# Check for error messages
|
|
202
|
-
error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
|
|
203
|
-
if error_match:
|
|
204
|
-
error_msg = error_match.group(1)
|
|
205
|
-
raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
|
|
206
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
207
|
-
continue
|
|
208
|
-
|
|
209
|
-
self.last_response = {"text": streaming_text}
|
|
210
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
211
|
-
|
|
212
|
-
except requests.RequestException as e:
|
|
213
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
235
|
+
except CurlError as e: # Catch CurlError
|
|
236
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
237
|
+
except Exception as e: # Catch other potential exceptions
|
|
238
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
239
|
+
|
|
214
240
|
|
|
215
241
|
def for_non_stream():
|
|
216
242
|
try:
|
|
217
|
-
|
|
243
|
+
# Use curl_cffi session post with impersonate
|
|
244
|
+
response = self.session.post(
|
|
245
|
+
self.url,
|
|
246
|
+
json=payload,
|
|
247
|
+
timeout=self.timeout,
|
|
248
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
249
|
+
)
|
|
250
|
+
|
|
218
251
|
if response.status_code != 200:
|
|
219
252
|
if response.status_code in [403, 429]:
|
|
220
253
|
self.refresh_identity()
|
|
221
|
-
|
|
254
|
+
# Retry with new identity using curl_cffi session
|
|
255
|
+
response = self.session.post(
|
|
256
|
+
self.url,
|
|
257
|
+
json=payload,
|
|
258
|
+
timeout=self.timeout,
|
|
259
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
260
|
+
)
|
|
222
261
|
if not response.ok:
|
|
223
262
|
raise exceptions.FailedToGenerateResponseError(
|
|
224
263
|
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
225
264
|
)
|
|
226
265
|
else:
|
|
227
266
|
raise exceptions.FailedToGenerateResponseError(
|
|
228
|
-
f"Request failed with status code {response.status_code}"
|
|
267
|
+
f"Request failed with status code {response.status_code} - {response.text}"
|
|
229
268
|
)
|
|
230
269
|
|
|
270
|
+
response_text = response.text # Get the full response text
|
|
271
|
+
|
|
272
|
+
# Use sanitize_stream to process the non-streaming text
|
|
273
|
+
# It won't parse as JSON, but will apply the extractor line by line
|
|
274
|
+
processed_stream = sanitize_stream(
|
|
275
|
+
data=response_text.splitlines(), # Split into lines first
|
|
276
|
+
intro_value=None,
|
|
277
|
+
to_json=False,
|
|
278
|
+
content_extractor=self._uncovr_extractor,
|
|
279
|
+
yield_raw_on_error=True
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Aggregate the results from the generator
|
|
231
283
|
full_response = ""
|
|
232
|
-
for
|
|
233
|
-
if
|
|
234
|
-
|
|
235
|
-
line = line.decode('utf-8')
|
|
236
|
-
content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
|
|
237
|
-
if content_match:
|
|
238
|
-
content = content_match.group(1)
|
|
239
|
-
full_response += content
|
|
240
|
-
# Check for error messages
|
|
241
|
-
error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
|
|
242
|
-
if error_match:
|
|
243
|
-
error_msg = error_match.group(1)
|
|
244
|
-
raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
|
|
245
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
246
|
-
continue
|
|
284
|
+
for content in processed_stream:
|
|
285
|
+
if content and isinstance(content, str):
|
|
286
|
+
full_response += content
|
|
247
287
|
|
|
288
|
+
# Check if aggregation resulted in empty response (might indicate error not caught by extractor)
|
|
248
289
|
self.last_response = {"text": full_response}
|
|
249
290
|
self.conversation.update_chat_history(prompt, full_response)
|
|
250
291
|
return {"text": full_response}
|
|
251
|
-
|
|
252
|
-
|
|
292
|
+
|
|
293
|
+
except CurlError as e: # Catch CurlError
|
|
294
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
295
|
+
except Exception as e: # Catch other potential exceptions
|
|
296
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
|
|
297
|
+
|
|
253
298
|
|
|
254
299
|
return for_stream() if stream else for_non_stream()
|
|
255
300
|
|
|
@@ -283,9 +328,12 @@ class UncovrAI(Provider):
|
|
|
283
328
|
|
|
284
329
|
def get_message(self, response: dict) -> str:
|
|
285
330
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
286
|
-
|
|
331
|
+
# Formatting handled by extractor
|
|
332
|
+
text = response.get("text", "")
|
|
333
|
+
return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
|
|
287
334
|
|
|
288
335
|
if __name__ == "__main__":
|
|
336
|
+
# Ensure curl_cffi is installed
|
|
289
337
|
print("-" * 80)
|
|
290
338
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
291
339
|
print("-" * 80)
|
|
@@ -293,20 +341,28 @@ if __name__ == "__main__":
|
|
|
293
341
|
for model in UncovrAI.AVAILABLE_MODELS:
|
|
294
342
|
try:
|
|
295
343
|
test_ai = UncovrAI(model=model, timeout=60)
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
for chunk in response:
|
|
299
|
-
response_text += chunk
|
|
344
|
+
# Test non-stream first as stream logic depends on it
|
|
345
|
+
response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
|
|
300
346
|
|
|
301
|
-
if
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
347
|
+
if response_non_stream and len(response_non_stream.strip()) > 0:
|
|
348
|
+
# Now test stream
|
|
349
|
+
response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
|
|
350
|
+
response_text = ""
|
|
351
|
+
for chunk in response_stream:
|
|
352
|
+
response_text += chunk
|
|
353
|
+
|
|
354
|
+
if response_text and len(response_text.strip()) > 0:
|
|
355
|
+
status = "✓"
|
|
356
|
+
# Clean and truncate response
|
|
357
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
358
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
359
|
+
else:
|
|
360
|
+
status = "✗ (Stream)"
|
|
361
|
+
display_text = "Empty or invalid stream response"
|
|
306
362
|
else:
|
|
307
|
-
status = "✗"
|
|
308
|
-
display_text = "Empty or invalid response"
|
|
363
|
+
status = "✗ (Non-Stream)"
|
|
364
|
+
display_text = "Empty or invalid non-stream response"
|
|
365
|
+
|
|
309
366
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
310
367
|
except Exception as e:
|
|
311
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
312
|
-
|
|
368
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|