webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/LambdaChat.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import time
|
|
4
5
|
import random
|
|
@@ -31,42 +32,41 @@ class LambdaChat(Provider):
|
|
|
31
32
|
def __init__(
|
|
32
33
|
self,
|
|
33
34
|
is_conversation: bool = True,
|
|
34
|
-
max_tokens: int = 2000,
|
|
35
|
+
max_tokens: int = 2000, # Note: max_tokens is not used by this API
|
|
35
36
|
timeout: int = 60,
|
|
36
37
|
filepath: str = None,
|
|
37
38
|
update_file: bool = True,
|
|
38
39
|
proxies: dict = {},
|
|
39
40
|
model: str = "deepseek-llama3.3-70b",
|
|
40
|
-
assistantId: str = None,
|
|
41
|
-
system_prompt: str = "You are a helpful assistant. Please answer the following question.",
|
|
41
|
+
assistantId: str = None, # Note: assistantId is not used by this API
|
|
42
|
+
system_prompt: str = "You are a helpful assistant. Please answer the following question.", # Note: system_prompt is not used by this API
|
|
42
43
|
):
|
|
43
44
|
"""Initialize the LambdaChat client."""
|
|
44
45
|
if model not in self.AVAILABLE_MODELS:
|
|
45
46
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
47
|
|
|
47
48
|
self.model = model
|
|
48
|
-
|
|
49
|
-
self.session
|
|
49
|
+
# Initialize curl_cffi Session
|
|
50
|
+
self.session = Session()
|
|
50
51
|
self.assistantId = assistantId
|
|
51
52
|
self.system_prompt = system_prompt
|
|
52
53
|
|
|
53
54
|
# Set up headers for all requests
|
|
54
55
|
self.headers = {
|
|
55
|
-
"Content-Type": "application/json",
|
|
56
|
-
"
|
|
57
|
-
"
|
|
58
|
-
"Accept-
|
|
59
|
-
"
|
|
60
|
-
"
|
|
61
|
-
"
|
|
62
|
-
"Sec-Ch-Ua": "
|
|
63
|
-
"Sec-Ch-Ua-
|
|
64
|
-
"Sec-
|
|
65
|
-
"Sec-Fetch-Dest": "empty",
|
|
56
|
+
"Content-Type": "application/json", # Keep Content-Type for JSON posts
|
|
57
|
+
"Accept": "*/*", # Keep Accept
|
|
58
|
+
# "User-Agent": LitAgent().random(), # Removed, handled by impersonate
|
|
59
|
+
"Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
|
|
60
|
+
"Origin": self.url, # Keep Origin
|
|
61
|
+
"Referer": f"{self.url}/", # Keep Referer (will be updated per request)
|
|
62
|
+
# "Sec-Ch-Ua": "\"Chromium\";v=\"120\"", # Removed, handled by impersonate
|
|
63
|
+
# "Sec-Ch-Ua-Mobile": "?0", # Removed, handled by impersonate
|
|
64
|
+
# "Sec-Ch-Ua-Platform": "\"Windows\"", # Removed, handled by impersonate
|
|
65
|
+
"Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-* headers
|
|
66
66
|
"Sec-Fetch-Mode": "cors",
|
|
67
67
|
"Sec-Fetch-Site": "same-origin",
|
|
68
|
-
"DNT": "1",
|
|
69
|
-
"Priority": "u=1, i"
|
|
68
|
+
"DNT": "1", # Keep DNT
|
|
69
|
+
"Priority": "u=1, i" # Keep Priority
|
|
70
70
|
}
|
|
71
71
|
|
|
72
72
|
# Provider settings
|
|
@@ -81,11 +81,17 @@ class LambdaChat(Provider):
|
|
|
81
81
|
# Store conversation data for different models
|
|
82
82
|
self._conversation_data = {}
|
|
83
83
|
|
|
84
|
+
# Update curl_cffi session headers and proxies
|
|
85
|
+
self.session.headers.update(self.headers)
|
|
86
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
87
|
+
|
|
84
88
|
def create_conversation(self, model: str):
|
|
85
89
|
"""Create a new conversation with the specified model."""
|
|
86
90
|
url = f"{self.url}/conversation"
|
|
87
91
|
payload = {
|
|
88
|
-
"model": model
|
|
92
|
+
"model": model,
|
|
93
|
+
"preprompt": self.system_prompt,
|
|
94
|
+
|
|
89
95
|
}
|
|
90
96
|
|
|
91
97
|
# Update referer for this specific request
|
|
@@ -93,7 +99,13 @@ class LambdaChat(Provider):
|
|
|
93
99
|
headers["Referer"] = f"{self.url}/models/{model}"
|
|
94
100
|
|
|
95
101
|
try:
|
|
96
|
-
|
|
102
|
+
# Use curl_cffi session post with impersonate
|
|
103
|
+
response = self.session.post(
|
|
104
|
+
url,
|
|
105
|
+
json=payload,
|
|
106
|
+
headers=headers, # Use updated headers with specific Referer
|
|
107
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
108
|
+
)
|
|
97
109
|
|
|
98
110
|
if response.status_code == 401:
|
|
99
111
|
raise exceptions.AuthenticationError("Authentication failed.")
|
|
@@ -113,14 +125,21 @@ class LambdaChat(Provider):
|
|
|
113
125
|
}
|
|
114
126
|
|
|
115
127
|
return conversation_id
|
|
116
|
-
except
|
|
128
|
+
except CurlError as e: # Catch CurlError
|
|
129
|
+
# Log or handle CurlError specifically if needed
|
|
130
|
+
return None
|
|
131
|
+
except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
|
|
117
132
|
return None
|
|
118
133
|
|
|
119
134
|
def fetch_message_id(self, conversation_id: str) -> str:
|
|
120
135
|
"""Fetch the latest message ID for a conversation."""
|
|
121
136
|
try:
|
|
122
137
|
url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
|
123
|
-
response = self.session.get(
|
|
138
|
+
response = self.session.get(
|
|
139
|
+
url,
|
|
140
|
+
headers=self.headers, # Use base headers
|
|
141
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
142
|
+
)
|
|
124
143
|
response.raise_for_status()
|
|
125
144
|
|
|
126
145
|
# Parse the JSON data from the response
|
|
@@ -150,7 +169,9 @@ class LambdaChat(Provider):
|
|
|
150
169
|
|
|
151
170
|
return message_id
|
|
152
171
|
|
|
153
|
-
except
|
|
172
|
+
except CurlError: # Catch CurlError
|
|
173
|
+
return str(uuid.uuid4()) # Fallback on CurlError
|
|
174
|
+
except Exception: # Catch other potential exceptions
|
|
154
175
|
# Fall back to a UUID if there's an error
|
|
155
176
|
return str(uuid.uuid4())
|
|
156
177
|
|
|
@@ -221,10 +242,10 @@ class LambdaChat(Provider):
|
|
|
221
242
|
def ask(
|
|
222
243
|
self,
|
|
223
244
|
prompt: str,
|
|
224
|
-
stream: bool = False,
|
|
245
|
+
stream: bool = False, # API supports streaming
|
|
225
246
|
raw: bool = False,
|
|
226
|
-
optimizer: str = None,
|
|
227
|
-
conversationally: bool = False,
|
|
247
|
+
optimizer: str = None, # Note: optimizer is not used by this API
|
|
248
|
+
conversationally: bool = False, # Note: conversationally is not used by this API
|
|
228
249
|
web_search: bool = False,
|
|
229
250
|
) -> Union[Dict[str, Any], Generator]:
|
|
230
251
|
"""Send a message to the Lambda Chat API"""
|
|
@@ -279,39 +300,41 @@ class LambdaChat(Provider):
|
|
|
279
300
|
# Try with multipart/form-data first
|
|
280
301
|
response = None
|
|
281
302
|
try:
|
|
303
|
+
# Use curl_cffi session post with impersonate
|
|
282
304
|
response = self.session.post(
|
|
283
305
|
url,
|
|
284
306
|
data=body,
|
|
285
|
-
headers=multipart_headers,
|
|
307
|
+
headers=multipart_headers, # Use multipart headers
|
|
286
308
|
stream=True,
|
|
287
|
-
timeout=self.timeout
|
|
309
|
+
timeout=self.timeout,
|
|
310
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
288
311
|
)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
312
|
+
response.raise_for_status() # Check status after potential error
|
|
313
|
+
except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
|
|
314
|
+
response = None # Ensure response is None if multipart fails
|
|
315
|
+
|
|
292
316
|
# If multipart fails or returns error, try with regular JSON
|
|
293
317
|
if not response or response.status_code != 200:
|
|
318
|
+
# Use curl_cffi session post with impersonate
|
|
294
319
|
response = self.session.post(
|
|
295
320
|
url,
|
|
296
|
-
json=request_data,
|
|
297
|
-
headers=headers,
|
|
321
|
+
json=request_data, # Use JSON payload
|
|
322
|
+
headers=headers, # Use regular headers
|
|
298
323
|
stream=True,
|
|
299
|
-
timeout=self.timeout
|
|
324
|
+
timeout=self.timeout,
|
|
325
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
300
326
|
)
|
|
301
327
|
|
|
302
|
-
#
|
|
303
|
-
if response.status_code != 200:
|
|
304
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
|
|
328
|
+
response.raise_for_status() # Check status after potential fallback
|
|
305
329
|
|
|
306
330
|
# Process the streaming response
|
|
307
331
|
yield from self.process_response(response, prompt)
|
|
308
332
|
|
|
309
|
-
except Exception as e:
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
raise exceptions.AuthenticationError("Authentication failed.")
|
|
333
|
+
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
|
|
334
|
+
# Handle specific exceptions if needed
|
|
335
|
+
if isinstance(e, CurlError):
|
|
336
|
+
# Log or handle CurlError specifically
|
|
337
|
+
pass
|
|
315
338
|
|
|
316
339
|
# Try another model if current one fails
|
|
317
340
|
if len(self.AVAILABLE_MODELS) > 1:
|
|
@@ -328,15 +351,29 @@ class LambdaChat(Provider):
|
|
|
328
351
|
return
|
|
329
352
|
|
|
330
353
|
# If we get here, all models failed
|
|
331
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
354
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
|
|
355
|
+
|
|
332
356
|
|
|
333
357
|
def for_non_stream():
|
|
358
|
+
# Aggregate the stream using the updated for_stream logic
|
|
334
359
|
response_text = ""
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
360
|
+
try:
|
|
361
|
+
# Ensure raw=False so for_stream yields dicts
|
|
362
|
+
for chunk_data in for_stream():
|
|
363
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
364
|
+
response_text += chunk_data["text"]
|
|
365
|
+
# Handle raw string case if raw=True was passed
|
|
366
|
+
elif raw and isinstance(chunk_data, str):
|
|
367
|
+
response_text += chunk_data
|
|
368
|
+
except Exception as e:
|
|
369
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
370
|
+
if not response_text:
|
|
371
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
372
|
+
|
|
373
|
+
# last_response and history are updated within process_response called by for_stream
|
|
374
|
+
# Return the final aggregated response dict or raw string
|
|
375
|
+
return response_text if raw else {"text": response_text} # Return dict for consistency
|
|
376
|
+
|
|
340
377
|
|
|
341
378
|
return for_stream() if stream else for_non_stream()
|
|
342
379
|
|
|
@@ -344,25 +381,29 @@ class LambdaChat(Provider):
|
|
|
344
381
|
self,
|
|
345
382
|
prompt: str,
|
|
346
383
|
stream: bool = False,
|
|
347
|
-
optimizer: str = None,
|
|
348
|
-
conversationally: bool = False,
|
|
384
|
+
optimizer: str = None, # Note: optimizer is not used by this API
|
|
385
|
+
conversationally: bool = False, # Note: conversationally is not used by this API
|
|
349
386
|
web_search: bool = False
|
|
350
387
|
) -> Union[str, Generator]:
|
|
351
388
|
"""Generate a response to a prompt"""
|
|
352
|
-
def
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
389
|
+
def for_stream_chat():
|
|
390
|
+
# ask() yields dicts or strings when streaming
|
|
391
|
+
gen = self.ask(
|
|
392
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
393
|
+
optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
394
|
+
)
|
|
395
|
+
for response_dict in gen:
|
|
396
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
357
397
|
|
|
358
|
-
def
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
398
|
+
def for_non_stream_chat():
|
|
399
|
+
# ask() returns dict or str when not streaming
|
|
400
|
+
response_data = self.ask(
|
|
401
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
402
|
+
optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
363
403
|
)
|
|
404
|
+
return self.get_message(response_data) # get_message expects dict
|
|
364
405
|
|
|
365
|
-
return
|
|
406
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
366
407
|
|
|
367
408
|
def get_message(self, response: dict) -> str:
|
|
368
409
|
"""Extract message text from response"""
|
|
@@ -370,6 +411,7 @@ class LambdaChat(Provider):
|
|
|
370
411
|
return response.get("text", "")
|
|
371
412
|
|
|
372
413
|
if __name__ == "__main__":
|
|
414
|
+
# Ensure curl_cffi is installed
|
|
373
415
|
print("-" * 80)
|
|
374
416
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
375
417
|
print("-" * 80)
|
|
@@ -389,4 +431,4 @@ if __name__ == "__main__":
|
|
|
389
431
|
display_text = "Empty or invalid response"
|
|
390
432
|
print(f"{model:<50} {status:<10} {display_text}")
|
|
391
433
|
except Exception as e:
|
|
392
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
434
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Llama3.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator
|
|
4
5
|
|
|
@@ -52,8 +53,8 @@ class Sambanova(Provider):
|
|
|
52
53
|
self.model = model
|
|
53
54
|
self.system_prompt = system_prompt
|
|
54
55
|
|
|
55
|
-
|
|
56
|
-
self.session
|
|
56
|
+
# Initialize curl_cffi Session
|
|
57
|
+
self.session = Session()
|
|
57
58
|
self.is_conversation = is_conversation
|
|
58
59
|
self.max_tokens_to_sample = max_tokens
|
|
59
60
|
self.timeout = timeout
|
|
@@ -80,8 +81,13 @@ class Sambanova(Provider):
|
|
|
80
81
|
self.base_url = "https://api.sambanova.ai/v1/chat/completions"
|
|
81
82
|
self.headers = {
|
|
82
83
|
"Authorization": f"Bearer {self.api_key}",
|
|
83
|
-
"Content-Type": "application/json"
|
|
84
|
+
"Content-Type": "application/json",
|
|
85
|
+
# Add User-Agent or sec-ch-ua headers if needed, or rely on impersonate
|
|
84
86
|
}
|
|
87
|
+
|
|
88
|
+
# Update curl_cffi session headers and proxies
|
|
89
|
+
self.session.headers.update(self.headers)
|
|
90
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
85
91
|
|
|
86
92
|
def ask(
|
|
87
93
|
self,
|
|
@@ -105,36 +111,42 @@ class Sambanova(Provider):
|
|
|
105
111
|
|
|
106
112
|
payload = {
|
|
107
113
|
"model": self.model,
|
|
108
|
-
"stream": stream,
|
|
109
114
|
"messages": [
|
|
110
115
|
{"role": "system", "content": self.system_prompt},
|
|
111
116
|
{"role": "user", "content": conversation_prompt},
|
|
112
117
|
],
|
|
113
118
|
"max_tokens": self.max_tokens_to_sample,
|
|
119
|
+
"stream": True # API seems to always stream based on endpoint name
|
|
114
120
|
}
|
|
115
121
|
|
|
116
122
|
def for_stream():
|
|
117
|
-
streaming_text = ""
|
|
123
|
+
streaming_text = "" # Initialize outside try block
|
|
118
124
|
try:
|
|
125
|
+
# Use curl_cffi session post with impersonate
|
|
119
126
|
response = self.session.post(
|
|
120
|
-
self.base_url,
|
|
127
|
+
self.base_url,
|
|
128
|
+
# headers are set on the session
|
|
129
|
+
json=payload,
|
|
130
|
+
stream=True,
|
|
131
|
+
timeout=self.timeout,
|
|
132
|
+
# proxies are set on the session
|
|
133
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
121
134
|
)
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
for line in response.iter_lines():
|
|
128
|
-
if line:
|
|
129
|
-
# Remove the "data:" prefix and extra whitespace if present
|
|
130
|
-
line_str = line.decode('utf-8').strip() if isinstance(line, bytes) else line.strip()
|
|
131
|
-
if line_str.startswith("data:"):
|
|
132
|
-
data = line_str[5:].strip()
|
|
133
|
-
else:
|
|
134
|
-
data = line_str
|
|
135
|
-
if data == "[DONE]":
|
|
136
|
-
break
|
|
135
|
+
response.raise_for_status() # Check for HTTP errors
|
|
136
|
+
|
|
137
|
+
# Iterate over bytes and decode manually
|
|
138
|
+
for line_bytes in response.iter_lines():
|
|
139
|
+
if line_bytes:
|
|
137
140
|
try:
|
|
141
|
+
line_str = line_bytes.decode('utf-8').strip()
|
|
142
|
+
if line_str.startswith("data:"):
|
|
143
|
+
data = line_str[5:].strip()
|
|
144
|
+
else:
|
|
145
|
+
data = line_str # Handle cases where 'data:' prefix might be missing
|
|
146
|
+
|
|
147
|
+
if data == "[DONE]":
|
|
148
|
+
break
|
|
149
|
+
|
|
138
150
|
json_data = json.loads(data)
|
|
139
151
|
# Skip entries without valid choices
|
|
140
152
|
if not json_data.get("choices"):
|
|
@@ -143,26 +155,52 @@ class Sambanova(Provider):
|
|
|
143
155
|
delta = choice.get("delta", {})
|
|
144
156
|
if "content" in delta:
|
|
145
157
|
content = delta["content"]
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
158
|
+
if content: # Ensure content is not None or empty
|
|
159
|
+
streaming_text += content
|
|
160
|
+
resp = {"text": content}
|
|
161
|
+
# Yield dict or raw string chunk
|
|
162
|
+
yield resp if not raw else content
|
|
149
163
|
# If finish_reason is provided, consider the stream complete
|
|
150
164
|
if choice.get("finish_reason"):
|
|
151
165
|
break
|
|
152
|
-
except json.JSONDecodeError:
|
|
153
|
-
continue
|
|
154
|
-
|
|
166
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
167
|
+
continue # Ignore lines that are not valid JSON or cannot be decoded
|
|
168
|
+
|
|
169
|
+
# Update history after stream finishes
|
|
170
|
+
self.last_response = streaming_text # Store aggregated text
|
|
155
171
|
self.conversation.update_chat_history(
|
|
156
172
|
prompt, self.last_response
|
|
157
173
|
)
|
|
158
|
-
except
|
|
159
|
-
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
174
|
+
except CurlError as e: # Catch CurlError
|
|
175
|
+
raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
|
|
176
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
177
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
178
|
+
raise exceptions.ProviderConnectionError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
179
|
+
|
|
160
180
|
|
|
161
181
|
def for_non_stream():
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
182
|
+
# Aggregate the stream using the updated for_stream logic
|
|
183
|
+
full_response_text = ""
|
|
184
|
+
try:
|
|
185
|
+
# Ensure raw=False so for_stream yields dicts
|
|
186
|
+
for chunk_data in for_stream():
|
|
187
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
188
|
+
full_response_text += chunk_data["text"]
|
|
189
|
+
# Handle raw string case if raw=True was passed
|
|
190
|
+
elif raw and isinstance(chunk_data, str):
|
|
191
|
+
full_response_text += chunk_data
|
|
192
|
+
except Exception as e:
|
|
193
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
194
|
+
if not full_response_text:
|
|
195
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
165
196
|
|
|
197
|
+
# last_response and history are updated within for_stream
|
|
198
|
+
# Return the final aggregated response dict or raw string
|
|
199
|
+
return full_response_text if raw else {"text": self.last_response} # Return dict for consistency
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
203
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
166
204
|
return for_stream() if stream else for_non_stream()
|
|
167
205
|
|
|
168
206
|
def chat(
|
|
@@ -173,12 +211,28 @@ class Sambanova(Provider):
|
|
|
173
211
|
conversationally: bool = False,
|
|
174
212
|
) -> Union[str, Generator[str, None, None]]:
|
|
175
213
|
"""Generate response `str`"""
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
214
|
+
|
|
215
|
+
def for_stream_chat():
|
|
216
|
+
# ask() yields dicts or strings when streaming
|
|
217
|
+
gen = self.ask(
|
|
218
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
219
|
+
optimizer=optimizer, conversationally=conversationally
|
|
220
|
+
)
|
|
221
|
+
for response_dict in gen:
|
|
222
|
+
yield self.get_message(response_dict) # get_message expects dict or string
|
|
223
|
+
|
|
224
|
+
def for_non_stream_chat():
|
|
225
|
+
# ask() returns dict or str when not streaming
|
|
226
|
+
response_data = self.ask(
|
|
227
|
+
prompt,
|
|
228
|
+
stream=False,
|
|
229
|
+
raw=False, # Ensure ask returns dict
|
|
230
|
+
optimizer=optimizer,
|
|
231
|
+
conversationally=conversationally,
|
|
232
|
+
)
|
|
233
|
+
return self.get_message(response_data) # get_message expects dict or string
|
|
234
|
+
|
|
235
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
182
236
|
|
|
183
237
|
def get_message(self, response: Any) -> str:
|
|
184
238
|
"""
|
|
@@ -197,6 +251,7 @@ class Sambanova(Provider):
|
|
|
197
251
|
return ""
|
|
198
252
|
|
|
199
253
|
if __name__ == "__main__":
|
|
254
|
+
# Ensure curl_cffi is installed
|
|
200
255
|
from rich import print
|
|
201
256
|
ai = Sambanova(api_key='')
|
|
202
257
|
response = ai.chat(input(">>> "), stream=True)
|