webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/AllenAI.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import os
|
|
4
5
|
from uuid import uuid4
|
|
@@ -57,40 +58,37 @@ class AllenAI(Provider):
|
|
|
57
58
|
history_offset: int = 10250,
|
|
58
59
|
act: str = None,
|
|
59
60
|
model: str = "OLMo-2-1124-13B-Instruct",
|
|
60
|
-
host: str = None
|
|
61
|
+
host: str = None
|
|
61
62
|
):
|
|
62
63
|
"""Initializes the AllenAI API client."""
|
|
63
64
|
if model not in self.AVAILABLE_MODELS:
|
|
64
65
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
66
|
|
|
66
67
|
self.url = "https://playground.allenai.org"
|
|
67
|
-
# Updated API endpoint to v3 from v4
|
|
68
68
|
self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
|
|
69
69
|
self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
|
|
70
70
|
|
|
71
|
-
# Updated headers
|
|
71
|
+
# Updated headers (remove those handled by impersonate)
|
|
72
72
|
self.headers = {
|
|
73
|
-
'User-Agent': "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36",
|
|
74
73
|
'Accept': '*/*',
|
|
75
74
|
'Accept-Language': 'id-ID,id;q=0.9',
|
|
76
75
|
'Origin': self.url,
|
|
77
76
|
'Referer': f"{self.url}/",
|
|
78
|
-
'Connection': 'keep-alive',
|
|
79
77
|
'Cache-Control': 'no-cache',
|
|
80
78
|
'Pragma': 'no-cache',
|
|
81
79
|
'Priority': 'u=1, i',
|
|
82
80
|
'Sec-Fetch-Dest': 'empty',
|
|
83
81
|
'Sec-Fetch-Mode': 'cors',
|
|
84
82
|
'Sec-Fetch-Site': 'cross-site',
|
|
85
|
-
'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
86
|
-
'sec-ch-ua-mobile': '?1',
|
|
87
|
-
'sec-ch-ua-platform': '"Android"',
|
|
88
83
|
'Content-Type': 'application/json'
|
|
89
84
|
}
|
|
90
85
|
|
|
91
|
-
|
|
86
|
+
# Initialize curl_cffi Session
|
|
87
|
+
self.session = Session()
|
|
88
|
+
# Update curl_cffi session headers and proxies
|
|
92
89
|
self.session.headers.update(self.headers)
|
|
93
|
-
self.session.proxies
|
|
90
|
+
self.session.proxies = proxies
|
|
91
|
+
|
|
94
92
|
self.model = model
|
|
95
93
|
|
|
96
94
|
# Auto-detect host if not provided
|
|
@@ -133,46 +131,45 @@ class AllenAI(Provider):
|
|
|
133
131
|
def whoami(self):
|
|
134
132
|
"""Gets or creates a user ID for authentication with Allen AI API"""
|
|
135
133
|
temp_id = str(uuid4())
|
|
136
|
-
|
|
137
|
-
|
|
134
|
+
request_headers = self.session.headers.copy() # Use session headers as base
|
|
135
|
+
request_headers.update({"x-anonymous-user-id": temp_id})
|
|
138
136
|
|
|
139
137
|
try:
|
|
138
|
+
# Use curl_cffi session get with impersonate
|
|
140
139
|
response = self.session.get(
|
|
141
140
|
self.whoami_endpoint,
|
|
142
|
-
headers=headers
|
|
143
|
-
timeout=self.timeout
|
|
141
|
+
headers=request_headers, # Pass updated headers
|
|
142
|
+
timeout=self.timeout,
|
|
143
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
144
144
|
)
|
|
145
|
+
response.raise_for_status() # Check for HTTP errors
|
|
145
146
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
return data
|
|
150
|
-
else:
|
|
151
|
-
self.x_anonymous_user_id = temp_id
|
|
152
|
-
return {"client": temp_id}
|
|
147
|
+
data = response.json()
|
|
148
|
+
self.x_anonymous_user_id = data.get("client", temp_id)
|
|
149
|
+
return data
|
|
153
150
|
|
|
154
|
-
except
|
|
151
|
+
except CurlError as e: # Catch CurlError
|
|
155
152
|
self.x_anonymous_user_id = temp_id
|
|
156
|
-
return {"client": temp_id, "error":
|
|
153
|
+
return {"client": temp_id, "error": f"CurlError: {e}"}
|
|
154
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
155
|
+
self.x_anonymous_user_id = temp_id
|
|
156
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
157
|
+
return {"client": temp_id, "error": f"{type(e).__name__}: {e} - {err_text}"}
|
|
157
158
|
|
|
158
|
-
|
|
159
|
-
def
|
|
160
|
-
"""
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
result += parsed.get("content", "")
|
|
168
|
-
except:
|
|
169
|
-
continue
|
|
170
|
-
return result
|
|
159
|
+
@staticmethod
|
|
160
|
+
def _allenai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
161
|
+
"""Extracts content from AllenAI stream JSON objects."""
|
|
162
|
+
if isinstance(chunk, dict):
|
|
163
|
+
if chunk.get("message", "").startswith("msg_") and "content" in chunk:
|
|
164
|
+
return chunk.get("content")
|
|
165
|
+
elif "message" in chunk and chunk.get("content"): # Legacy handling
|
|
166
|
+
return chunk.get("content")
|
|
167
|
+
return None
|
|
171
168
|
|
|
172
169
|
def ask(
|
|
173
170
|
self,
|
|
174
171
|
prompt: str,
|
|
175
|
-
stream: bool = False,
|
|
172
|
+
stream: bool = False, # API supports streaming
|
|
176
173
|
raw: bool = False,
|
|
177
174
|
optimizer: str = None,
|
|
178
175
|
conversationally: bool = False,
|
|
@@ -185,20 +182,22 @@ class AllenAI(Provider):
|
|
|
185
182
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
186
183
|
if optimizer:
|
|
187
184
|
if optimizer in self.__available_optimizers:
|
|
188
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
189
|
-
conversation_prompt if conversationally else prompt
|
|
190
|
-
)
|
|
185
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
191
186
|
else:
|
|
192
187
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
193
188
|
|
|
194
189
|
# Ensure we have a user ID
|
|
195
190
|
if not self.x_anonymous_user_id:
|
|
196
191
|
self.whoami()
|
|
192
|
+
# Check if whoami failed and we still don't have an ID
|
|
193
|
+
if not self.x_anonymous_user_id:
|
|
194
|
+
raise exceptions.AuthenticationError("Failed to obtain anonymous user ID.")
|
|
197
195
|
|
|
198
|
-
# Prepare the API request
|
|
199
|
-
self.session.headers.
|
|
196
|
+
# Prepare the API request headers for this specific request
|
|
197
|
+
request_headers = self.session.headers.copy()
|
|
198
|
+
request_headers.update({
|
|
200
199
|
"x-anonymous-user-id": self.x_anonymous_user_id,
|
|
201
|
-
"Content-Type": "application/json"
|
|
200
|
+
"Content-Type": "application/json" # Ensure Content-Type is set
|
|
202
201
|
})
|
|
203
202
|
|
|
204
203
|
# Create options dictionary
|
|
@@ -232,122 +231,150 @@ class AllenAI(Provider):
|
|
|
232
231
|
"host": current_host,
|
|
233
232
|
"opts": opts
|
|
234
233
|
}
|
|
235
|
-
|
|
236
|
-
# Add parent if exists
|
|
237
|
-
if self.parent:
|
|
238
|
-
payload["parent"] = self.parent
|
|
234
|
+
payload["host"] = current_host # Ensure host is updated in payload
|
|
239
235
|
|
|
240
236
|
try:
|
|
241
237
|
if stream:
|
|
242
|
-
|
|
238
|
+
# Pass request_headers to the stream method
|
|
239
|
+
return self._stream_request(payload, prompt, request_headers, raw)
|
|
243
240
|
else:
|
|
244
|
-
|
|
245
|
-
|
|
241
|
+
# Pass request_headers to the non-stream method
|
|
242
|
+
return self._non_stream_request(payload, prompt, request_headers, raw)
|
|
243
|
+
except (exceptions.FailedToGenerateResponseError, CurlError, Exception) as e:
|
|
246
244
|
last_error = e
|
|
247
245
|
# Log the error but continue to try other hosts
|
|
248
|
-
print(f"Host '{current_host}' failed for model '{self.model}', trying next host...")
|
|
246
|
+
print(f"Host '{current_host}' failed for model '{self.model}' ({type(e).__name__}), trying next host...")
|
|
249
247
|
continue
|
|
250
248
|
|
|
251
249
|
# If we've tried all hosts and none worked, raise the last error
|
|
252
250
|
raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
|
|
253
251
|
|
|
254
|
-
def _stream_request(self, payload, prompt, raw=False):
|
|
255
|
-
"""Handle streaming requests with the given payload"""
|
|
252
|
+
def _stream_request(self, payload, prompt, request_headers, raw=False):
|
|
253
|
+
"""Handle streaming requests with the given payload and headers"""
|
|
254
|
+
streaming_text = "" # Initialize outside try block
|
|
255
|
+
current_parent = None # Initialize outside try block
|
|
256
256
|
try:
|
|
257
|
+
# Use curl_cffi session post with impersonate
|
|
257
258
|
response = self.session.post(
|
|
258
259
|
self.api_endpoint,
|
|
260
|
+
headers=request_headers, # Use headers passed to this method
|
|
259
261
|
json=payload,
|
|
260
262
|
stream=True,
|
|
261
|
-
timeout=self.timeout
|
|
263
|
+
timeout=self.timeout,
|
|
264
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
262
265
|
)
|
|
266
|
+
response.raise_for_status() # Check for HTTP errors
|
|
263
267
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
268
|
+
# Use sanitize_stream
|
|
269
|
+
processed_stream = sanitize_stream(
|
|
270
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
271
|
+
intro_value=None, # No prefix
|
|
272
|
+
to_json=True, # Stream sends JSON lines
|
|
273
|
+
content_extractor=self._allenai_extractor, # Use the specific extractor
|
|
274
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
for content_chunk in processed_stream:
|
|
278
|
+
# content_chunk is the string extracted by _allenai_extractor
|
|
279
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
280
|
+
streaming_text += content_chunk
|
|
281
|
+
resp = dict(text=content_chunk)
|
|
282
|
+
yield resp if not raw else content_chunk
|
|
283
|
+
|
|
284
|
+
# Try to extract parent ID from the *last* raw line (less reliable than before)
|
|
285
|
+
# This part is tricky as sanitize_stream consumes the raw lines.
|
|
286
|
+
# We might need to re-fetch or adjust if parent ID is critical per stream.
|
|
287
|
+
# For now, we'll rely on the non-stream request to update parent ID more reliably.
|
|
288
|
+
# Example placeholder logic (might not work reliably):
|
|
289
|
+
try:
|
|
290
|
+
last_line_data = json.loads(response.text.splitlines()[-1]) # Get last line if possible
|
|
291
|
+
if last_line_data.get("id"):
|
|
292
|
+
current_parent = last_line_data.get("id")
|
|
293
|
+
elif last_line_data.get("children"):
|
|
294
|
+
for child in last_line_data["children"]: # Use last_line_data here
|
|
295
|
+
if child.get("role") == "assistant":
|
|
296
|
+
current_parent = child.get("id")
|
|
297
|
+
break
|
|
298
|
+
|
|
299
|
+
# Handle completion
|
|
300
|
+
if last_line_data.get("final") or last_line_data.get("finish_reason") == "stop":
|
|
301
|
+
if current_parent:
|
|
302
|
+
self.parent = current_parent
|
|
286
303
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
yield resp if raw else resp
|
|
295
|
-
|
|
296
|
-
# Legacy handling for older API
|
|
297
|
-
elif "message" in data and data.get("content"):
|
|
298
|
-
content = data.get("content")
|
|
299
|
-
if content.strip():
|
|
300
|
-
streaming_text += content
|
|
301
|
-
resp = dict(text=content)
|
|
302
|
-
yield resp if raw else resp
|
|
303
|
-
|
|
304
|
-
# Update parent ID if present
|
|
305
|
-
if data.get("id"):
|
|
306
|
-
current_parent = data.get("id")
|
|
307
|
-
elif data.get("children"):
|
|
308
|
-
for child in data["children"]:
|
|
309
|
-
if child.get("role") == "assistant":
|
|
310
|
-
current_parent = child.get("id")
|
|
311
|
-
break
|
|
312
|
-
|
|
313
|
-
# Handle completion
|
|
314
|
-
if data.get("final") or data.get("finish_reason") == "stop":
|
|
315
|
-
if current_parent:
|
|
316
|
-
self.parent = current_parent
|
|
317
|
-
|
|
318
|
-
# Update conversation history
|
|
319
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
320
|
-
self.last_response = {"text": streaming_text}
|
|
321
|
-
return
|
|
304
|
+
# Update conversation history
|
|
305
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
306
|
+
self.last_response = {"text": streaming_text} # Update last response here
|
|
307
|
+
return # End the generator
|
|
308
|
+
except Exception as e:
|
|
309
|
+
# Log the error but continue with the rest of the function
|
|
310
|
+
print(f"Error processing response data: {str(e)}")
|
|
322
311
|
|
|
323
|
-
|
|
324
|
-
|
|
312
|
+
# If loop finishes without returning (e.g., no final message), update history
|
|
313
|
+
if current_parent:
|
|
314
|
+
self.parent = current_parent
|
|
315
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
316
|
+
self.last_response = {"text": streaming_text}
|
|
325
317
|
|
|
326
|
-
|
|
327
|
-
|
|
318
|
+
except CurlError as e: # Catch CurlError
|
|
319
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
320
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
321
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
322
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def _non_stream_request(self, payload, prompt, request_headers, raw=False):
|
|
326
|
+
"""Handle non-streaming requests with the given payload and headers"""
|
|
328
327
|
try:
|
|
329
|
-
#
|
|
328
|
+
# Use curl_cffi session post with impersonate
|
|
330
329
|
response = self.session.post(
|
|
331
330
|
self.api_endpoint,
|
|
331
|
+
headers=request_headers, # Use headers passed to this method
|
|
332
332
|
json=payload,
|
|
333
|
-
stream=False,
|
|
334
|
-
timeout=self.timeout
|
|
333
|
+
stream=False, # Explicitly set stream to False
|
|
334
|
+
timeout=self.timeout,
|
|
335
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
335
336
|
)
|
|
337
|
+
response.raise_for_status() # Check for HTTP errors
|
|
336
338
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
339
|
+
raw_response = response.text # Get raw text
|
|
340
|
+
|
|
341
|
+
# Process the full text using sanitize_stream line by line
|
|
342
|
+
processed_stream = sanitize_stream(
|
|
343
|
+
data=raw_response.splitlines(), # Split into lines
|
|
344
|
+
intro_value=None,
|
|
345
|
+
to_json=True,
|
|
346
|
+
content_extractor=self._allenai_extractor,
|
|
347
|
+
yield_raw_on_error=False
|
|
348
|
+
)
|
|
349
|
+
# Aggregate the results
|
|
350
|
+
parsed_response = "".join(list(processed_stream))
|
|
351
|
+
|
|
352
|
+
# Update parent ID from the full response if possible (might need adjustment based on actual non-stream response structure)
|
|
353
|
+
# This part is speculative as the non-stream structure isn't fully clear from the stream logic
|
|
354
|
+
try:
|
|
355
|
+
lines = raw_response.splitlines()
|
|
356
|
+
if lines:
|
|
357
|
+
last_line_data = json.loads(lines[-1])
|
|
358
|
+
if last_line_data.get("id"):
|
|
359
|
+
self.parent = last_line_data.get("id")
|
|
360
|
+
elif last_line_data.get("children"):
|
|
361
|
+
for child in last_line_data["children"]:
|
|
362
|
+
if child.get("role") == "assistant":
|
|
363
|
+
self.parent = child.get("id")
|
|
364
|
+
break
|
|
365
|
+
except (json.JSONDecodeError, IndexError):
|
|
366
|
+
pass # Ignore errors parsing parent ID from non-stream
|
|
367
|
+
|
|
345
368
|
self.conversation.update_chat_history(prompt, parsed_response)
|
|
346
369
|
self.last_response = {"text": parsed_response}
|
|
347
|
-
return self.last_response
|
|
370
|
+
return self.last_response if not raw else parsed_response # Return dict or raw string
|
|
348
371
|
|
|
349
|
-
except
|
|
350
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
372
|
+
except CurlError as e: # Catch CurlError
|
|
373
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
374
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
375
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
376
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
377
|
+
|
|
351
378
|
|
|
352
379
|
def chat(
|
|
353
380
|
self,
|
|
@@ -357,29 +384,35 @@ class AllenAI(Provider):
|
|
|
357
384
|
conversationally: bool = False,
|
|
358
385
|
host: str = None,
|
|
359
386
|
options: dict = None,
|
|
360
|
-
) -> str:
|
|
361
|
-
def
|
|
362
|
-
|
|
387
|
+
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
388
|
+
def for_stream_chat(): # Renamed inner function
|
|
389
|
+
# ask() yields dicts or strings when streaming
|
|
390
|
+
gen = self.ask(
|
|
363
391
|
prompt,
|
|
364
|
-
True,
|
|
392
|
+
stream=True,
|
|
393
|
+
raw=False, # Ensure ask yields dicts
|
|
365
394
|
optimizer=optimizer,
|
|
366
395
|
conversationally=conversationally,
|
|
367
396
|
host=host,
|
|
368
397
|
options=options
|
|
369
|
-
):
|
|
370
|
-
yield self.get_message(response)
|
|
371
|
-
def for_non_stream():
|
|
372
|
-
return self.get_message(
|
|
373
|
-
self.ask(
|
|
374
|
-
prompt,
|
|
375
|
-
False,
|
|
376
|
-
optimizer=optimizer,
|
|
377
|
-
conversationally=conversationally,
|
|
378
|
-
host=host,
|
|
379
|
-
options=options
|
|
380
|
-
)
|
|
381
398
|
)
|
|
382
|
-
|
|
399
|
+
for response_dict in gen:
|
|
400
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
401
|
+
|
|
402
|
+
def for_non_stream_chat(): # Renamed inner function
|
|
403
|
+
# ask() returns dict or str when not streaming
|
|
404
|
+
response_data = self.ask(
|
|
405
|
+
prompt,
|
|
406
|
+
stream=False,
|
|
407
|
+
raw=False, # Ensure ask returns dict
|
|
408
|
+
optimizer=optimizer,
|
|
409
|
+
conversationally=conversationally,
|
|
410
|
+
host=host,
|
|
411
|
+
options=options
|
|
412
|
+
)
|
|
413
|
+
return self.get_message(response_data) # get_message expects dict
|
|
414
|
+
|
|
415
|
+
return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
|
|
383
416
|
|
|
384
417
|
def get_message(self, response: dict) -> str:
|
|
385
418
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
@@ -388,6 +421,7 @@ class AllenAI(Provider):
|
|
|
388
421
|
|
|
389
422
|
|
|
390
423
|
if __name__ == "__main__":
|
|
424
|
+
# Ensure curl_cffi is installed
|
|
391
425
|
print("-" * 80)
|
|
392
426
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
393
427
|
print("-" * 80)
|