webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +367 -41
- webscout/Bard.py +2 -22
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/scira_search.py +24 -11
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/Deepinfra.py +75 -57
- webscout/Provider/ExaChat.py +93 -63
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +39 -59
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +31 -30
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/autoproxy.py +753 -18
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +96 -132
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +82 -49
- webscout/Provider/OPENAI/textpollinations.py +13 -12
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +4 -4
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/Qodo.py +454 -0
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +91 -82
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +6 -6
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +257 -104
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +43 -48
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +325 -299
- webscout/Provider/yep.py +79 -96
- webscout/__init__.py +7 -2
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +146 -105
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
webscout/Provider/GeminiProxy.py
CHANGED
|
@@ -14,10 +14,11 @@ class GeminiProxy(Provider):
|
|
|
14
14
|
AVAILABLE_MODELS = [
|
|
15
15
|
"gemini-2.0-flash-lite",
|
|
16
16
|
"gemini-2.0-flash",
|
|
17
|
-
"gemini-2.5-pro-preview-06-05",
|
|
18
|
-
"gemini-2.5-pro-preview-05-06",
|
|
19
17
|
"gemini-2.5-flash-preview-04-17",
|
|
20
18
|
"gemini-2.5-flash-preview-05-20",
|
|
19
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
20
|
+
"gemini-2.5-pro",
|
|
21
|
+
"gemini-2.5-flash",
|
|
21
22
|
|
|
22
23
|
]
|
|
23
24
|
|
|
@@ -135,6 +136,31 @@ class GeminiProxy(Provider):
|
|
|
135
136
|
return str(response)
|
|
136
137
|
|
|
137
138
|
if __name__ == "__main__":
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
print(
|
|
139
|
+
# Ensure curl_cffi is installed
|
|
140
|
+
print("-" * 80)
|
|
141
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
142
|
+
print("-" * 80)
|
|
143
|
+
|
|
144
|
+
# Test all available models
|
|
145
|
+
working = 0
|
|
146
|
+
total = len(GeminiProxy.AVAILABLE_MODELS)
|
|
147
|
+
|
|
148
|
+
for model in GeminiProxy.AVAILABLE_MODELS:
|
|
149
|
+
try:
|
|
150
|
+
test_ai = GeminiProxy(model=model, timeout=60)
|
|
151
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
152
|
+
response_text = ""
|
|
153
|
+
for chunk in response:
|
|
154
|
+
response_text += chunk
|
|
155
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
156
|
+
|
|
157
|
+
if response_text and len(response_text.strip()) > 0:
|
|
158
|
+
status = "✓"
|
|
159
|
+
# Truncate response if too long
|
|
160
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
161
|
+
else:
|
|
162
|
+
status = "✗"
|
|
163
|
+
display_text = "Empty or invalid response"
|
|
164
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/HeckAI.py
CHANGED
|
@@ -57,7 +57,7 @@ class HeckAI(Provider):
|
|
|
57
57
|
proxies: dict = {},
|
|
58
58
|
history_offset: int = 10250,
|
|
59
59
|
act: str = None,
|
|
60
|
-
model: str = "google/gemini-2.
|
|
60
|
+
model: str = "google/gemini-2.5-flash-preview",
|
|
61
61
|
language: str = "English"
|
|
62
62
|
):
|
|
63
63
|
"""
|
|
@@ -177,79 +177,73 @@ class HeckAI(Provider):
|
|
|
177
177
|
def for_stream():
|
|
178
178
|
streaming_text = "" # Initialize outside try block
|
|
179
179
|
try:
|
|
180
|
-
# Use curl_cffi session post with impersonate
|
|
181
180
|
response = self.session.post(
|
|
182
181
|
self.url,
|
|
183
|
-
# headers are set on the session
|
|
184
182
|
data=json.dumps(payload),
|
|
185
183
|
stream=True,
|
|
186
184
|
timeout=self.timeout,
|
|
187
|
-
impersonate="chrome110"
|
|
185
|
+
impersonate="chrome110"
|
|
188
186
|
)
|
|
189
|
-
response.raise_for_status()
|
|
187
|
+
response.raise_for_status()
|
|
190
188
|
|
|
191
|
-
# Use sanitize_stream to process the stream
|
|
192
189
|
processed_stream = sanitize_stream(
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
190
|
+
data=response.iter_content(chunk_size=1024),
|
|
191
|
+
intro_value="data: ",
|
|
192
|
+
to_json=False,
|
|
193
|
+
start_marker="data: [ANSWER_START]",
|
|
194
|
+
end_marker="data: [ANSWER_DONE]",
|
|
195
|
+
skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
|
|
196
|
+
yield_raw_on_error=True,
|
|
197
|
+
strip_chars=" \n\r\t",
|
|
198
|
+
raw=raw
|
|
201
199
|
)
|
|
202
200
|
|
|
203
201
|
for content_chunk in processed_stream:
|
|
204
|
-
# content_chunk is the text between ANSWER_START and ANSWER_DONE
|
|
205
202
|
if content_chunk and isinstance(content_chunk, str):
|
|
206
|
-
|
|
207
|
-
|
|
203
|
+
content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
|
|
204
|
+
if raw:
|
|
205
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
206
|
+
streaming_text += content_chunk
|
|
207
|
+
yield content_chunk
|
|
208
|
+
else:
|
|
209
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
210
|
+
streaming_text += content_chunk
|
|
211
|
+
yield dict(text=content_chunk)
|
|
208
212
|
|
|
209
213
|
# Only update history if we received a valid response
|
|
210
214
|
if streaming_text:
|
|
211
|
-
# Update history and previous answer after stream finishes
|
|
212
215
|
self.previous_answer = streaming_text
|
|
213
|
-
# Convert to simple text before updating conversation
|
|
214
216
|
try:
|
|
215
|
-
# Ensure content is valid before updating conversation
|
|
216
217
|
if streaming_text and isinstance(streaming_text, str):
|
|
217
|
-
# Sanitize the content to ensure it's valid
|
|
218
218
|
sanitized_text = streaming_text.strip()
|
|
219
|
-
if sanitized_text:
|
|
219
|
+
if sanitized_text:
|
|
220
220
|
self.conversation.update_chat_history(prompt, sanitized_text)
|
|
221
221
|
except Exception as e:
|
|
222
|
-
# If conversation update fails, log but don't crash
|
|
223
222
|
print(f"Warning: Failed to update conversation history: {str(e)}")
|
|
224
|
-
|
|
225
|
-
except CurlError as e: # Catch CurlError
|
|
223
|
+
except CurlError as e:
|
|
226
224
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
227
|
-
except Exception as e:
|
|
225
|
+
except Exception as e:
|
|
228
226
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
229
227
|
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
230
228
|
|
|
231
|
-
|
|
232
229
|
def for_non_stream():
|
|
233
|
-
# Aggregate the stream using the updated for_stream logic
|
|
234
230
|
full_text = ""
|
|
235
231
|
try:
|
|
236
|
-
# Ensure raw=False so for_stream yields dicts
|
|
237
232
|
for chunk_data in for_stream():
|
|
238
|
-
if
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
233
|
+
if raw:
|
|
234
|
+
if isinstance(chunk_data, str):
|
|
235
|
+
chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
|
|
236
|
+
full_text += chunk_data
|
|
237
|
+
else:
|
|
238
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
239
|
+
text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
|
|
240
|
+
full_text += text
|
|
243
241
|
except Exception as e:
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
# Return the final aggregated response dict or raw string
|
|
249
|
-
self.last_response = {"text": full_text} # Update last_response here
|
|
242
|
+
if not full_text:
|
|
243
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
244
|
+
self.last_response = {"text": full_text}
|
|
250
245
|
return full_text if raw else self.last_response
|
|
251
246
|
|
|
252
|
-
|
|
253
247
|
return for_stream() if stream else for_non_stream()
|
|
254
248
|
|
|
255
249
|
@staticmethod
|
|
@@ -266,15 +260,15 @@ class HeckAI(Provider):
|
|
|
266
260
|
if isinstance(text, dict) and "text" in text:
|
|
267
261
|
try:
|
|
268
262
|
text["text"] = text["text"].encode("latin1").decode("utf-8")
|
|
269
|
-
return text
|
|
263
|
+
return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
|
|
270
264
|
except (UnicodeError, AttributeError) as e:
|
|
271
|
-
return text
|
|
265
|
+
return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
|
|
272
266
|
elif isinstance(text, str):
|
|
273
267
|
try:
|
|
274
268
|
return text.encode("latin1").decode("utf-8")
|
|
275
269
|
except (UnicodeError, AttributeError) as e:
|
|
276
|
-
return text
|
|
277
|
-
return text
|
|
270
|
+
return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
|
|
271
|
+
return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
|
|
278
272
|
|
|
279
273
|
def chat(
|
|
280
274
|
self,
|
|
@@ -282,6 +276,7 @@ class HeckAI(Provider):
|
|
|
282
276
|
stream: bool = False,
|
|
283
277
|
optimizer: str = None,
|
|
284
278
|
conversationally: bool = False,
|
|
279
|
+
raw: bool = False,
|
|
285
280
|
) -> Union[str, Generator[str, None, None]]:
|
|
286
281
|
"""
|
|
287
282
|
Sends a prompt to the HeckAI API and returns only the message text.
|
|
@@ -298,18 +293,23 @@ class HeckAI(Provider):
|
|
|
298
293
|
def for_stream_chat():
|
|
299
294
|
# ask() yields dicts or strings when streaming
|
|
300
295
|
gen = self.ask(
|
|
301
|
-
prompt, stream=True, raw=
|
|
296
|
+
prompt, stream=True, raw=raw,
|
|
302
297
|
optimizer=optimizer, conversationally=conversationally
|
|
303
298
|
)
|
|
304
|
-
for
|
|
305
|
-
|
|
299
|
+
for response in gen:
|
|
300
|
+
if raw:
|
|
301
|
+
yield response
|
|
302
|
+
else:
|
|
303
|
+
yield self.get_message(response)
|
|
306
304
|
|
|
307
305
|
def for_non_stream_chat():
|
|
308
306
|
# ask() returns dict or str when not streaming
|
|
309
307
|
response_data = self.ask(
|
|
310
|
-
prompt, stream=False, raw=
|
|
308
|
+
prompt, stream=False, raw=raw,
|
|
311
309
|
optimizer=optimizer, conversationally=conversationally
|
|
312
310
|
)
|
|
311
|
+
if raw:
|
|
312
|
+
return response_data if isinstance(response_data, str) else str(response_data)
|
|
313
313
|
return self.get_message(response_data) # get_message expects dict
|
|
314
314
|
|
|
315
315
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
@@ -338,38 +338,43 @@ class HeckAI(Provider):
|
|
|
338
338
|
# Ensure text is a string
|
|
339
339
|
text = response["text"]
|
|
340
340
|
if not isinstance(text, str):
|
|
341
|
-
|
|
341
|
+
text = str(text)
|
|
342
342
|
|
|
343
|
-
return text
|
|
343
|
+
return text.replace('\\\\', '\\').replace('\\"', '"')
|
|
344
344
|
|
|
345
345
|
if __name__ == "__main__":
|
|
346
|
-
# Ensure curl_cffi is installed
|
|
347
|
-
print("-" * 80)
|
|
348
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
349
|
-
print("-" * 80)
|
|
350
|
-
|
|
351
|
-
for model in HeckAI.AVAILABLE_MODELS:
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
346
|
+
# # Ensure curl_cffi is installed
|
|
347
|
+
# print("-" * 80)
|
|
348
|
+
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
349
|
+
# print("-" * 80)
|
|
350
|
+
|
|
351
|
+
# for model in HeckAI.AVAILABLE_MODELS:
|
|
352
|
+
# try:
|
|
353
|
+
# test_ai = HeckAI(model=model, timeout=60)
|
|
354
|
+
# # Use non-streaming mode first to avoid potential streaming issues
|
|
355
|
+
# try:
|
|
356
|
+
# response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
|
|
357
|
+
# print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
|
|
358
|
+
# except Exception as e1:
|
|
359
|
+
# # Fall back to streaming if non-streaming fails
|
|
360
|
+
# print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
|
|
361
|
+
# response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
362
|
+
# response_text = ""
|
|
363
|
+
# for chunk in response:
|
|
364
|
+
# if chunk and isinstance(chunk, str):
|
|
365
|
+
# response_text += chunk
|
|
366
366
|
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
367
|
+
# if response_text and len(response_text.strip()) > 0:
|
|
368
|
+
# status = "✓"
|
|
369
|
+
# # Truncate response if too long
|
|
370
|
+
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
371
|
+
# print(f"\r{model:<50} {status:<10} {display_text}")
|
|
372
|
+
# else:
|
|
373
|
+
# raise ValueError("Empty or invalid response")
|
|
374
|
+
# except Exception as e:
|
|
375
|
+
# print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
376
|
+
from rich import print
|
|
377
|
+
ai = HeckAI()
|
|
378
|
+
response = ai.chat("tell me about humans", stream=True, raw=False)
|
|
379
|
+
for chunk in response:
|
|
380
|
+
print(chunk, end='', flush=True)
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -170,16 +170,20 @@ class JadveOpenAI(Provider):
|
|
|
170
170
|
intro_value=None, # No simple prefix
|
|
171
171
|
to_json=False, # Content is text after extraction
|
|
172
172
|
content_extractor=self._jadve_extractor, # Use the specific extractor
|
|
173
|
-
|
|
174
|
-
|
|
173
|
+
yield_raw_on_error=True,
|
|
174
|
+
raw=raw
|
|
175
175
|
)
|
|
176
176
|
|
|
177
177
|
for content_chunk in processed_stream:
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
178
|
+
if raw:
|
|
179
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
180
|
+
full_response_text += content_chunk
|
|
181
|
+
yield content_chunk
|
|
182
|
+
else:
|
|
183
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
184
|
+
full_response_text += content_chunk
|
|
185
|
+
resp = {"text": content_chunk}
|
|
186
|
+
yield resp
|
|
183
187
|
|
|
184
188
|
# Update history after stream finishes
|
|
185
189
|
self.last_response = {"text": full_response_text}
|
|
@@ -191,30 +195,22 @@ class JadveOpenAI(Provider):
|
|
|
191
195
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
192
196
|
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
193
197
|
|
|
194
|
-
|
|
195
198
|
def for_non_stream():
|
|
196
|
-
# Aggregate the stream using the updated for_stream logic
|
|
197
199
|
collected_text = ""
|
|
198
200
|
try:
|
|
199
|
-
# Ensure raw=False so for_stream yields dicts
|
|
200
201
|
for chunk_data in for_stream():
|
|
201
|
-
if
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
202
|
+
if raw:
|
|
203
|
+
if isinstance(chunk_data, str):
|
|
204
|
+
collected_text += chunk_data
|
|
205
|
+
else:
|
|
206
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
207
|
+
collected_text += chunk_data["text"]
|
|
206
208
|
except Exception as e:
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
210
|
-
|
|
209
|
+
if not collected_text:
|
|
210
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
211
211
|
# last_response and history are updated within for_stream
|
|
212
|
-
# Return the final aggregated response dict or raw string
|
|
213
212
|
return collected_text if raw else self.last_response
|
|
214
213
|
|
|
215
|
-
|
|
216
|
-
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
217
|
-
# The non-stream wrapper will handle aggregation if stream=False.
|
|
218
214
|
return for_stream() if stream else for_non_stream()
|
|
219
215
|
|
|
220
216
|
def chat(
|
|
@@ -223,6 +219,7 @@ class JadveOpenAI(Provider):
|
|
|
223
219
|
stream: bool = False,
|
|
224
220
|
optimizer: str = None,
|
|
225
221
|
conversationally: bool = False,
|
|
222
|
+
raw: bool = False,
|
|
226
223
|
) -> Union[str, Generator[str, None, None]]:
|
|
227
224
|
"""
|
|
228
225
|
Generate a chat response (string).
|
|
@@ -232,25 +229,29 @@ class JadveOpenAI(Provider):
|
|
|
232
229
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
233
230
|
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
234
231
|
conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
|
|
232
|
+
raw (bool, optional): Return raw response. Defaults to False.
|
|
235
233
|
Returns:
|
|
236
234
|
str or generator: Generated response string or generator yielding response chunks.
|
|
237
235
|
"""
|
|
238
236
|
def for_stream_chat():
|
|
239
|
-
# ask() yields dicts or strings when streaming
|
|
240
237
|
gen = self.ask(
|
|
241
|
-
prompt, stream=True, raw=
|
|
238
|
+
prompt, stream=True, raw=raw,
|
|
242
239
|
optimizer=optimizer, conversationally=conversationally
|
|
243
240
|
)
|
|
244
|
-
for
|
|
245
|
-
|
|
241
|
+
for response in gen:
|
|
242
|
+
if raw:
|
|
243
|
+
yield response
|
|
244
|
+
else:
|
|
245
|
+
yield self.get_message(response)
|
|
246
246
|
|
|
247
247
|
def for_non_stream_chat():
|
|
248
|
-
# ask() returns dict or str when not streaming
|
|
249
248
|
response_data = self.ask(
|
|
250
|
-
prompt, stream=False, raw=
|
|
249
|
+
prompt, stream=False, raw=raw,
|
|
251
250
|
optimizer=optimizer, conversationally=conversationally
|
|
252
251
|
)
|
|
253
|
-
|
|
252
|
+
if raw:
|
|
253
|
+
return response_data if isinstance(response_data, str) else str(response_data)
|
|
254
|
+
return self.get_message(response_data)
|
|
254
255
|
|
|
255
256
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
256
257
|
|
|
@@ -268,24 +269,29 @@ class JadveOpenAI(Provider):
|
|
|
268
269
|
return response.get("text", "")
|
|
269
270
|
|
|
270
271
|
if __name__ == "__main__":
|
|
271
|
-
# Ensure curl_cffi is installed
|
|
272
|
-
print("-" * 80)
|
|
273
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
274
|
-
print("-" * 80)
|
|
275
|
-
|
|
276
|
-
for model in JadveOpenAI.AVAILABLE_MODELS:
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
272
|
+
# # Ensure curl_cffi is installed
|
|
273
|
+
# print("-" * 80)
|
|
274
|
+
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
275
|
+
# print("-" * 80)
|
|
276
|
+
|
|
277
|
+
# for model in JadveOpenAI.AVAILABLE_MODELS:
|
|
278
|
+
# try:
|
|
279
|
+
# test_ai = JadveOpenAI(model=model, timeout=60)
|
|
280
|
+
# response = test_ai.chat("Say 'Hello' in one word")
|
|
281
|
+
# response_text = response
|
|
281
282
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
283
|
+
# if response_text and len(response_text.strip()) > 0:
|
|
284
|
+
# status = "✓"
|
|
285
|
+
# # Truncate response if too long
|
|
286
|
+
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
287
|
+
# else:
|
|
288
|
+
# status = "✗"
|
|
289
|
+
# display_text = "Empty or invalid response"
|
|
290
|
+
# print(f"{model:<50} {status:<10} {display_text}")
|
|
291
|
+
# except Exception as e:
|
|
292
|
+
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
293
|
+
from rich import print
|
|
294
|
+
ai = JadveOpenAI()
|
|
295
|
+
response = ai.chat("tell me about humans", stream=True, raw=False)
|
|
296
|
+
for chunk in response:
|
|
297
|
+
print(chunk, end='', flush=True)
|
webscout/Provider/LambdaChat.py
CHANGED
|
@@ -93,49 +93,49 @@ class LambdaChat(Provider):
|
|
|
93
93
|
self.session.proxies = proxies # Assign proxies directly
|
|
94
94
|
|
|
95
95
|
def create_conversation(self, model: str):
|
|
96
|
-
"""Create a new conversation with the specified model."""
|
|
96
|
+
"""Create a new conversation with the specified model, using updated headers and cookies."""
|
|
97
97
|
url = f"{self.url}/conversation"
|
|
98
98
|
payload = {
|
|
99
99
|
"model": model,
|
|
100
|
-
"preprompt": self.system_prompt
|
|
101
|
-
|
|
100
|
+
"preprompt": self.system_prompt
|
|
102
101
|
}
|
|
103
|
-
|
|
104
|
-
# Update
|
|
102
|
+
|
|
103
|
+
# Update headers for this specific request
|
|
105
104
|
headers = self.headers.copy()
|
|
106
|
-
headers["Referer"] = f"{self.url}/
|
|
107
|
-
|
|
105
|
+
headers["Referer"] = f"{self.url}/"
|
|
106
|
+
# Add browser-like headers for best compatibility
|
|
107
|
+
headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
|
|
108
|
+
headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
|
|
109
|
+
headers["Sec-GPC"] = "1"
|
|
110
|
+
headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
|
|
111
|
+
headers["Sec-Ch-Ua-Mobile"] = "?0"
|
|
112
|
+
headers["Sec-Ch-Ua-Platform"] = '"Windows"'
|
|
113
|
+
headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
|
|
114
|
+
headers["Origin"] = self.url
|
|
115
|
+
# cookies are handled by curl_cffi session automatically
|
|
116
|
+
|
|
108
117
|
try:
|
|
109
|
-
# Use curl_cffi session post with impersonate
|
|
110
118
|
response = self.session.post(
|
|
111
|
-
url,
|
|
112
|
-
json=payload,
|
|
113
|
-
headers=headers,
|
|
114
|
-
impersonate="chrome110"
|
|
119
|
+
url,
|
|
120
|
+
json=payload,
|
|
121
|
+
headers=headers,
|
|
122
|
+
impersonate="chrome110"
|
|
115
123
|
)
|
|
116
|
-
|
|
117
124
|
if response.status_code == 401:
|
|
118
125
|
raise exceptions.AuthenticationError("Authentication failed.")
|
|
119
|
-
|
|
120
|
-
# Handle other error codes
|
|
121
126
|
if response.status_code != 200:
|
|
122
127
|
return None
|
|
123
|
-
|
|
124
128
|
data = response.json()
|
|
125
129
|
conversation_id = data.get("conversationId")
|
|
126
|
-
|
|
127
|
-
# Store conversation data
|
|
128
130
|
if model not in self._conversation_data:
|
|
129
131
|
self._conversation_data[model] = {
|
|
130
132
|
"conversationId": conversation_id,
|
|
131
|
-
"messageId": str(uuid.uuid4())
|
|
133
|
+
"messageId": str(uuid.uuid4())
|
|
132
134
|
}
|
|
133
|
-
|
|
134
135
|
return conversation_id
|
|
135
|
-
except CurlError
|
|
136
|
-
# Log or handle CurlError specifically if needed
|
|
136
|
+
except CurlError:
|
|
137
137
|
return None
|
|
138
|
-
except Exception:
|
|
138
|
+
except Exception:
|
|
139
139
|
return None
|
|
140
140
|
|
|
141
141
|
def fetch_message_id(self, conversation_id: str) -> str:
|
|
@@ -230,35 +230,43 @@ class LambdaChat(Provider):
|
|
|
230
230
|
url = f"{self.url}/conversation/{conversation_id}"
|
|
231
231
|
message_id = self._conversation_data[model]["messageId"]
|
|
232
232
|
|
|
233
|
-
# Data to send
|
|
233
|
+
# Data to send (tools should be empty list by default)
|
|
234
234
|
request_data = {
|
|
235
235
|
"inputs": prompt,
|
|
236
236
|
"id": message_id,
|
|
237
237
|
"is_retry": False,
|
|
238
238
|
"is_continue": False,
|
|
239
239
|
"web_search": web_search,
|
|
240
|
-
"tools": [
|
|
240
|
+
"tools": []
|
|
241
241
|
}
|
|
242
|
-
|
|
242
|
+
|
|
243
243
|
# Update headers for this specific request
|
|
244
244
|
headers = self.headers.copy()
|
|
245
245
|
headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
|
|
246
|
-
|
|
246
|
+
headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
|
|
247
|
+
headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
|
|
248
|
+
headers["Sec-GPC"] = "1"
|
|
249
|
+
headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
|
|
250
|
+
headers["Sec-Ch-Ua-Mobile"] = "?0"
|
|
251
|
+
headers["Sec-Ch-Ua-Platform"] = '"Windows"'
|
|
252
|
+
headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
|
|
253
|
+
headers["Origin"] = self.url
|
|
254
|
+
|
|
247
255
|
# Create multipart form data
|
|
248
256
|
boundary = self.generate_boundary()
|
|
249
257
|
multipart_headers = headers.copy()
|
|
250
258
|
multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
|
|
251
|
-
|
|
259
|
+
|
|
252
260
|
# Serialize the data to JSON
|
|
253
261
|
data_json = json.dumps(request_data, separators=(',', ':'))
|
|
254
|
-
|
|
262
|
+
|
|
255
263
|
# Create the multipart form data body
|
|
256
264
|
body = f"--{boundary}\r\n"
|
|
257
265
|
body += f'Content-Disposition: form-data; name="data"\r\n'
|
|
258
|
-
body += f"
|
|
266
|
+
body += f"\r\n"
|
|
259
267
|
body += f"{data_json}\r\n"
|
|
260
268
|
body += f"--{boundary}--\r\n"
|
|
261
|
-
|
|
269
|
+
|
|
262
270
|
multipart_headers["Content-Length"] = str(len(body))
|
|
263
271
|
|
|
264
272
|
def for_stream():
|