webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import time
4
5
  import random
@@ -31,42 +32,41 @@ class LambdaChat(Provider):
31
32
  def __init__(
32
33
  self,
33
34
  is_conversation: bool = True,
34
- max_tokens: int = 2000,
35
+ max_tokens: int = 2000, # Note: max_tokens is not used by this API
35
36
  timeout: int = 60,
36
37
  filepath: str = None,
37
38
  update_file: bool = True,
38
39
  proxies: dict = {},
39
40
  model: str = "deepseek-llama3.3-70b",
40
- assistantId: str = None,
41
- system_prompt: str = "You are a helpful assistant. Please answer the following question.",
41
+ assistantId: str = None, # Note: assistantId is not used by this API
42
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.", # Note: system_prompt is not used by this API
42
43
  ):
43
44
  """Initialize the LambdaChat client."""
44
45
  if model not in self.AVAILABLE_MODELS:
45
46
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
47
 
47
48
  self.model = model
48
- self.session = requests.Session()
49
- self.session.proxies.update(proxies)
49
+ # Initialize curl_cffi Session
50
+ self.session = Session()
50
51
  self.assistantId = assistantId
51
52
  self.system_prompt = system_prompt
52
53
 
53
54
  # Set up headers for all requests
54
55
  self.headers = {
55
- "Content-Type": "application/json",
56
- "User-Agent": LitAgent().random(),
57
- "Accept": "*/*",
58
- "Accept-Encoding": "gzip, deflate, br, zstd",
59
- "Accept-Language": "en-US,en;q=0.9",
60
- "Origin": self.url,
61
- "Referer": f"{self.url}/",
62
- "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
63
- "Sec-Ch-Ua-Mobile": "?0",
64
- "Sec-Ch-Ua-Platform": "\"Windows\"",
65
- "Sec-Fetch-Dest": "empty",
56
+ "Content-Type": "application/json", # Keep Content-Type for JSON posts
57
+ "Accept": "*/*", # Keep Accept
58
+ # "User-Agent": LitAgent().random(), # Removed, handled by impersonate
59
+ "Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
60
+ "Origin": self.url, # Keep Origin
61
+ "Referer": f"{self.url}/", # Keep Referer (will be updated per request)
62
+ # "Sec-Ch-Ua": "\"Chromium\";v=\"120\"", # Removed, handled by impersonate
63
+ # "Sec-Ch-Ua-Mobile": "?0", # Removed, handled by impersonate
64
+ # "Sec-Ch-Ua-Platform": "\"Windows\"", # Removed, handled by impersonate
65
+ "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-* headers
66
66
  "Sec-Fetch-Mode": "cors",
67
67
  "Sec-Fetch-Site": "same-origin",
68
- "DNT": "1",
69
- "Priority": "u=1, i"
68
+ "DNT": "1", # Keep DNT
69
+ "Priority": "u=1, i" # Keep Priority
70
70
  }
71
71
 
72
72
  # Provider settings
@@ -81,11 +81,17 @@ class LambdaChat(Provider):
81
81
  # Store conversation data for different models
82
82
  self._conversation_data = {}
83
83
 
84
+ # Update curl_cffi session headers and proxies
85
+ self.session.headers.update(self.headers)
86
+ self.session.proxies = proxies # Assign proxies directly
87
+
84
88
  def create_conversation(self, model: str):
85
89
  """Create a new conversation with the specified model."""
86
90
  url = f"{self.url}/conversation"
87
91
  payload = {
88
- "model": model
92
+ "model": model,
93
+ "preprompt": self.system_prompt,
94
+
89
95
  }
90
96
 
91
97
  # Update referer for this specific request
@@ -93,7 +99,13 @@ class LambdaChat(Provider):
93
99
  headers["Referer"] = f"{self.url}/models/{model}"
94
100
 
95
101
  try:
96
- response = self.session.post(url, json=payload, headers=headers)
102
+ # Use curl_cffi session post with impersonate
103
+ response = self.session.post(
104
+ url,
105
+ json=payload,
106
+ headers=headers, # Use updated headers with specific Referer
107
+ impersonate="chrome110" # Use a common impersonation profile
108
+ )
97
109
 
98
110
  if response.status_code == 401:
99
111
  raise exceptions.AuthenticationError("Authentication failed.")
@@ -113,14 +125,21 @@ class LambdaChat(Provider):
113
125
  }
114
126
 
115
127
  return conversation_id
116
- except requests.exceptions.RequestException:
128
+ except CurlError as e: # Catch CurlError
129
+ # Log or handle CurlError specifically if needed
130
+ return None
131
+ except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
117
132
  return None
118
133
 
119
134
  def fetch_message_id(self, conversation_id: str) -> str:
120
135
  """Fetch the latest message ID for a conversation."""
121
136
  try:
122
137
  url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
123
- response = self.session.get(url, headers=self.headers)
138
+ response = self.session.get(
139
+ url,
140
+ headers=self.headers, # Use base headers
141
+ impersonate="chrome110" # Use a common impersonation profile
142
+ )
124
143
  response.raise_for_status()
125
144
 
126
145
  # Parse the JSON data from the response
@@ -150,7 +169,9 @@ class LambdaChat(Provider):
150
169
 
151
170
  return message_id
152
171
 
153
- except Exception:
172
+ except CurlError: # Catch CurlError
173
+ return str(uuid.uuid4()) # Fallback on CurlError
174
+ except Exception: # Catch other potential exceptions
154
175
  # Fall back to a UUID if there's an error
155
176
  return str(uuid.uuid4())
156
177
 
@@ -221,10 +242,10 @@ class LambdaChat(Provider):
221
242
  def ask(
222
243
  self,
223
244
  prompt: str,
224
- stream: bool = False,
245
+ stream: bool = False, # API supports streaming
225
246
  raw: bool = False,
226
- optimizer: str = None,
227
- conversationally: bool = False,
247
+ optimizer: str = None, # Note: optimizer is not used by this API
248
+ conversationally: bool = False, # Note: conversationally is not used by this API
228
249
  web_search: bool = False,
229
250
  ) -> Union[Dict[str, Any], Generator]:
230
251
  """Send a message to the Lambda Chat API"""
@@ -279,39 +300,41 @@ class LambdaChat(Provider):
279
300
  # Try with multipart/form-data first
280
301
  response = None
281
302
  try:
303
+ # Use curl_cffi session post with impersonate
282
304
  response = self.session.post(
283
305
  url,
284
306
  data=body,
285
- headers=multipart_headers,
307
+ headers=multipart_headers, # Use multipart headers
286
308
  stream=True,
287
- timeout=self.timeout
309
+ timeout=self.timeout,
310
+ impersonate="chrome110" # Use a common impersonation profile
288
311
  )
289
- except requests.exceptions.RequestException:
290
- pass
291
-
312
+ response.raise_for_status() # Check status after potential error
313
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
314
+ response = None # Ensure response is None if multipart fails
315
+
292
316
  # If multipart fails or returns error, try with regular JSON
293
317
  if not response or response.status_code != 200:
318
+ # Use curl_cffi session post with impersonate
294
319
  response = self.session.post(
295
320
  url,
296
- json=request_data,
297
- headers=headers,
321
+ json=request_data, # Use JSON payload
322
+ headers=headers, # Use regular headers
298
323
  stream=True,
299
- timeout=self.timeout
324
+ timeout=self.timeout,
325
+ impersonate="chrome110" # Use a common impersonation profile
300
326
  )
301
327
 
302
- # If both methods fail, raise exception
303
- if response.status_code != 200:
304
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
328
+ response.raise_for_status() # Check status after potential fallback
305
329
 
306
330
  # Process the streaming response
307
331
  yield from self.process_response(response, prompt)
308
332
 
309
- except Exception as e:
310
- if isinstance(e, requests.exceptions.RequestException):
311
- if hasattr(e, 'response') and e.response is not None:
312
- status_code = e.response.status_code
313
- if status_code == 401:
314
- raise exceptions.AuthenticationError("Authentication failed.")
333
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
334
+ # Handle specific exceptions if needed
335
+ if isinstance(e, CurlError):
336
+ # Log or handle CurlError specifically
337
+ pass
315
338
 
316
339
  # Try another model if current one fails
317
340
  if len(self.AVAILABLE_MODELS) > 1:
@@ -328,15 +351,29 @@ class LambdaChat(Provider):
328
351
  return
329
352
 
330
353
  # If we get here, all models failed
331
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
354
+ raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
355
+
332
356
 
333
357
  def for_non_stream():
358
+ # Aggregate the stream using the updated for_stream logic
334
359
  response_text = ""
335
- for response in for_stream():
336
- if "text" in response:
337
- response_text += response["text"]
338
- self.last_response = {"text": response_text}
339
- return self.last_response
360
+ try:
361
+ # Ensure raw=False so for_stream yields dicts
362
+ for chunk_data in for_stream():
363
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
364
+ response_text += chunk_data["text"]
365
+ # Handle raw string case if raw=True was passed
366
+ elif raw and isinstance(chunk_data, str):
367
+ response_text += chunk_data
368
+ except Exception as e:
369
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
370
+ if not response_text:
371
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
372
+
373
+ # last_response and history are updated within process_response called by for_stream
374
+ # Return the final aggregated response dict or raw string
375
+ return response_text if raw else {"text": response_text} # Return dict for consistency
376
+
340
377
 
341
378
  return for_stream() if stream else for_non_stream()
342
379
 
@@ -344,25 +381,29 @@ class LambdaChat(Provider):
344
381
  self,
345
382
  prompt: str,
346
383
  stream: bool = False,
347
- optimizer: str = None,
348
- conversationally: bool = False,
384
+ optimizer: str = None, # Note: optimizer is not used by this API
385
+ conversationally: bool = False, # Note: conversationally is not used by this API
349
386
  web_search: bool = False
350
387
  ) -> Union[str, Generator]:
351
388
  """Generate a response to a prompt"""
352
- def for_stream():
353
- for response in self.ask(
354
- prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
355
- ):
356
- yield self.get_message(response)
389
+ def for_stream_chat():
390
+ # ask() yields dicts or strings when streaming
391
+ gen = self.ask(
392
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
393
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
394
+ )
395
+ for response_dict in gen:
396
+ yield self.get_message(response_dict) # get_message expects dict
357
397
 
358
- def for_non_stream():
359
- return self.get_message(
360
- self.ask(
361
- prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
362
- )
398
+ def for_non_stream_chat():
399
+ # ask() returns dict or str when not streaming
400
+ response_data = self.ask(
401
+ prompt, stream=False, raw=False, # Ensure ask returns dict
402
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
363
403
  )
404
+ return self.get_message(response_data) # get_message expects dict
364
405
 
365
- return for_stream() if stream else for_non_stream()
406
+ return for_stream_chat() if stream else for_non_stream_chat()
366
407
 
367
408
  def get_message(self, response: dict) -> str:
368
409
  """Extract message text from response"""
@@ -370,6 +411,7 @@ class LambdaChat(Provider):
370
411
  return response.get("text", "")
371
412
 
372
413
  if __name__ == "__main__":
414
+ # Ensure curl_cffi is installed
373
415
  print("-" * 80)
374
416
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
375
417
  print("-" * 80)
@@ -389,4 +431,4 @@ if __name__ == "__main__":
389
431
  display_text = "Empty or invalid response"
390
432
  print(f"{model:<50} {status:<10} {display_text}")
391
433
  except Exception as e:
392
- print(f"{model:<50} {'✗':<10} {str(e)}")
434
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Generator
4
5
 
@@ -52,8 +53,8 @@ class Sambanova(Provider):
52
53
  self.model = model
53
54
  self.system_prompt = system_prompt
54
55
 
55
- self.session = requests.Session()
56
- self.session.proxies = proxies
56
+ # Initialize curl_cffi Session
57
+ self.session = Session()
57
58
  self.is_conversation = is_conversation
58
59
  self.max_tokens_to_sample = max_tokens
59
60
  self.timeout = timeout
@@ -80,8 +81,13 @@ class Sambanova(Provider):
80
81
  self.base_url = "https://api.sambanova.ai/v1/chat/completions"
81
82
  self.headers = {
82
83
  "Authorization": f"Bearer {self.api_key}",
83
- "Content-Type": "application/json"
84
+ "Content-Type": "application/json",
85
+ # Add User-Agent or sec-ch-ua headers if needed, or rely on impersonate
84
86
  }
87
+
88
+ # Update curl_cffi session headers and proxies
89
+ self.session.headers.update(self.headers)
90
+ self.session.proxies = proxies # Assign proxies directly
85
91
 
86
92
  def ask(
87
93
  self,
@@ -105,36 +111,42 @@ class Sambanova(Provider):
105
111
 
106
112
  payload = {
107
113
  "model": self.model,
108
- "stream": stream,
109
114
  "messages": [
110
115
  {"role": "system", "content": self.system_prompt},
111
116
  {"role": "user", "content": conversation_prompt},
112
117
  ],
113
118
  "max_tokens": self.max_tokens_to_sample,
119
+ "stream": True # API seems to always stream based on endpoint name
114
120
  }
115
121
 
116
122
  def for_stream():
117
- streaming_text = ""
123
+ streaming_text = "" # Initialize outside try block
118
124
  try:
125
+ # Use curl_cffi session post with impersonate
119
126
  response = self.session.post(
120
- self.base_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout
127
+ self.base_url,
128
+ # headers are set on the session
129
+ json=payload,
130
+ stream=True,
131
+ timeout=self.timeout,
132
+ # proxies are set on the session
133
+ impersonate="chrome110" # Use a common impersonation profile
121
134
  )
122
- if not response.ok:
123
- raise exceptions.FailedToGenerateResponseError(
124
- f"Request failed: {response.status_code} - {response.text}"
125
- )
126
-
127
- for line in response.iter_lines():
128
- if line:
129
- # Remove the "data:" prefix and extra whitespace if present
130
- line_str = line.decode('utf-8').strip() if isinstance(line, bytes) else line.strip()
131
- if line_str.startswith("data:"):
132
- data = line_str[5:].strip()
133
- else:
134
- data = line_str
135
- if data == "[DONE]":
136
- break
135
+ response.raise_for_status() # Check for HTTP errors
136
+
137
+ # Iterate over bytes and decode manually
138
+ for line_bytes in response.iter_lines():
139
+ if line_bytes:
137
140
  try:
141
+ line_str = line_bytes.decode('utf-8').strip()
142
+ if line_str.startswith("data:"):
143
+ data = line_str[5:].strip()
144
+ else:
145
+ data = line_str # Handle cases where 'data:' prefix might be missing
146
+
147
+ if data == "[DONE]":
148
+ break
149
+
138
150
  json_data = json.loads(data)
139
151
  # Skip entries without valid choices
140
152
  if not json_data.get("choices"):
@@ -143,26 +155,52 @@ class Sambanova(Provider):
143
155
  delta = choice.get("delta", {})
144
156
  if "content" in delta:
145
157
  content = delta["content"]
146
- streaming_text += content
147
- # Yield content directly as a string for consistency
148
- yield content
158
+ if content: # Ensure content is not None or empty
159
+ streaming_text += content
160
+ resp = {"text": content}
161
+ # Yield dict or raw string chunk
162
+ yield resp if not raw else content
149
163
  # If finish_reason is provided, consider the stream complete
150
164
  if choice.get("finish_reason"):
151
165
  break
152
- except json.JSONDecodeError:
153
- continue
154
- self.last_response = streaming_text
166
+ except (json.JSONDecodeError, UnicodeDecodeError):
167
+ continue # Ignore lines that are not valid JSON or cannot be decoded
168
+
169
+ # Update history after stream finishes
170
+ self.last_response = streaming_text # Store aggregated text
155
171
  self.conversation.update_chat_history(
156
172
  prompt, self.last_response
157
173
  )
158
- except requests.exceptions.RequestException as e:
159
- raise exceptions.ProviderConnectionError(f"Request failed: {e}")
174
+ except CurlError as e: # Catch CurlError
175
+ raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
176
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
177
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
178
+ raise exceptions.ProviderConnectionError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
179
+
160
180
 
161
181
  def for_non_stream():
162
- for _ in for_stream():
163
- pass
164
- return self.last_response
182
+ # Aggregate the stream using the updated for_stream logic
183
+ full_response_text = ""
184
+ try:
185
+ # Ensure raw=False so for_stream yields dicts
186
+ for chunk_data in for_stream():
187
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
188
+ full_response_text += chunk_data["text"]
189
+ # Handle raw string case if raw=True was passed
190
+ elif raw and isinstance(chunk_data, str):
191
+ full_response_text += chunk_data
192
+ except Exception as e:
193
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
194
+ if not full_response_text:
195
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
165
196
 
197
+ # last_response and history are updated within for_stream
198
+ # Return the final aggregated response dict or raw string
199
+ return full_response_text if raw else {"text": self.last_response} # Return dict for consistency
200
+
201
+
202
+ # Since the API endpoint suggests streaming, always call the stream generator.
203
+ # The non-stream wrapper will handle aggregation if stream=False.
166
204
  return for_stream() if stream else for_non_stream()
167
205
 
168
206
  def chat(
@@ -173,12 +211,28 @@ class Sambanova(Provider):
173
211
  conversationally: bool = False,
174
212
  ) -> Union[str, Generator[str, None, None]]:
175
213
  """Generate response `str`"""
176
- if stream:
177
- # For stream mode, yield the text chunks directly
178
- return self.ask(prompt, stream=True, optimizer=optimizer, conversationally=conversationally)
179
- else:
180
- # For non-stream mode, return the complete text response
181
- return self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally)
214
+
215
+ def for_stream_chat():
216
+ # ask() yields dicts or strings when streaming
217
+ gen = self.ask(
218
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
219
+ optimizer=optimizer, conversationally=conversationally
220
+ )
221
+ for response_dict in gen:
222
+ yield self.get_message(response_dict) # get_message expects dict or string
223
+
224
+ def for_non_stream_chat():
225
+ # ask() returns dict or str when not streaming
226
+ response_data = self.ask(
227
+ prompt,
228
+ stream=False,
229
+ raw=False, # Ensure ask returns dict
230
+ optimizer=optimizer,
231
+ conversationally=conversationally,
232
+ )
233
+ return self.get_message(response_data) # get_message expects dict or string
234
+
235
+ return for_stream_chat() if stream else for_non_stream_chat()
182
236
 
183
237
  def get_message(self, response: Any) -> str:
184
238
  """
@@ -197,6 +251,7 @@ class Sambanova(Provider):
197
251
  return ""
198
252
 
199
253
  if __name__ == "__main__":
254
+ # Ensure curl_cffi is installed
200
255
  from rich import print
201
256
  ai = Sambanova(api_key='')
202
257
  response = ai.chat(input(">>> "), stream=True)