webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import time
4
5
  import random
@@ -6,8 +7,8 @@ import re
6
7
  import uuid
7
8
  from typing import Any, Dict, List, Optional, Union, Generator
8
9
 
9
- from webscout.AIutel import Conversation
10
- from webscout.AIbase import Provider
10
+ from webscout.AIutel import Conversation, sanitize_stream
11
+ from webscout.AIbase import Provider # Import sanitize_stream
11
12
  from webscout import exceptions
12
13
  from webscout.litagent import LitAgent
13
14
 
@@ -31,42 +32,41 @@ class LambdaChat(Provider):
31
32
  def __init__(
32
33
  self,
33
34
  is_conversation: bool = True,
34
- max_tokens: int = 2000,
35
+ max_tokens: int = 2000, # Note: max_tokens is not used by this API
35
36
  timeout: int = 60,
36
37
  filepath: str = None,
37
38
  update_file: bool = True,
38
39
  proxies: dict = {},
39
40
  model: str = "deepseek-llama3.3-70b",
40
- assistantId: str = None,
41
- system_prompt: str = "You are a helpful assistant. Please answer the following question.",
41
+ assistantId: str = None, # Note: assistantId is not used by this API
42
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.", # Note: system_prompt is not used by this API
42
43
  ):
43
44
  """Initialize the LambdaChat client."""
44
45
  if model not in self.AVAILABLE_MODELS:
45
46
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
47
 
47
48
  self.model = model
48
- self.session = requests.Session()
49
- self.session.proxies.update(proxies)
49
+ # Initialize curl_cffi Session
50
+ self.session = Session()
50
51
  self.assistantId = assistantId
51
52
  self.system_prompt = system_prompt
52
53
 
53
54
  # Set up headers for all requests
54
55
  self.headers = {
55
- "Content-Type": "application/json",
56
- "User-Agent": LitAgent().random(),
57
- "Accept": "*/*",
58
- "Accept-Encoding": "gzip, deflate, br, zstd",
59
- "Accept-Language": "en-US,en;q=0.9",
60
- "Origin": self.url,
61
- "Referer": f"{self.url}/",
62
- "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
63
- "Sec-Ch-Ua-Mobile": "?0",
64
- "Sec-Ch-Ua-Platform": "\"Windows\"",
65
- "Sec-Fetch-Dest": "empty",
56
+ "Content-Type": "application/json", # Keep Content-Type for JSON posts
57
+ "Accept": "*/*", # Keep Accept
58
+ # "User-Agent": LitAgent().random(), # Removed, handled by impersonate
59
+ "Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
60
+ "Origin": self.url, # Keep Origin
61
+ "Referer": f"{self.url}/", # Keep Referer (will be updated per request)
62
+ # "Sec-Ch-Ua": "\"Chromium\";v=\"120\"", # Removed, handled by impersonate
63
+ # "Sec-Ch-Ua-Mobile": "?0", # Removed, handled by impersonate
64
+ # "Sec-Ch-Ua-Platform": "\"Windows\"", # Removed, handled by impersonate
65
+ "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-* headers
66
66
  "Sec-Fetch-Mode": "cors",
67
67
  "Sec-Fetch-Site": "same-origin",
68
- "DNT": "1",
69
- "Priority": "u=1, i"
68
+ "DNT": "1", # Keep DNT
69
+ "Priority": "u=1, i" # Keep Priority
70
70
  }
71
71
 
72
72
  # Provider settings
@@ -81,11 +81,17 @@ class LambdaChat(Provider):
81
81
  # Store conversation data for different models
82
82
  self._conversation_data = {}
83
83
 
84
+ # Update curl_cffi session headers and proxies
85
+ self.session.headers.update(self.headers)
86
+ self.session.proxies = proxies # Assign proxies directly
87
+
84
88
  def create_conversation(self, model: str):
85
89
  """Create a new conversation with the specified model."""
86
90
  url = f"{self.url}/conversation"
87
91
  payload = {
88
- "model": model
92
+ "model": model,
93
+ "preprompt": self.system_prompt,
94
+
89
95
  }
90
96
 
91
97
  # Update referer for this specific request
@@ -93,7 +99,13 @@ class LambdaChat(Provider):
93
99
  headers["Referer"] = f"{self.url}/models/{model}"
94
100
 
95
101
  try:
96
- response = self.session.post(url, json=payload, headers=headers)
102
+ # Use curl_cffi session post with impersonate
103
+ response = self.session.post(
104
+ url,
105
+ json=payload,
106
+ headers=headers, # Use updated headers with specific Referer
107
+ impersonate="chrome110" # Use a common impersonation profile
108
+ )
97
109
 
98
110
  if response.status_code == 401:
99
111
  raise exceptions.AuthenticationError("Authentication failed.")
@@ -113,14 +125,21 @@ class LambdaChat(Provider):
113
125
  }
114
126
 
115
127
  return conversation_id
116
- except requests.exceptions.RequestException:
128
+ except CurlError as e: # Catch CurlError
129
+ # Log or handle CurlError specifically if needed
130
+ return None
131
+ except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
117
132
  return None
118
133
 
119
134
  def fetch_message_id(self, conversation_id: str) -> str:
120
135
  """Fetch the latest message ID for a conversation."""
121
136
  try:
122
137
  url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
123
- response = self.session.get(url, headers=self.headers)
138
+ response = self.session.get(
139
+ url,
140
+ headers=self.headers, # Use base headers
141
+ impersonate="chrome110" # Use a common impersonation profile
142
+ )
124
143
  response.raise_for_status()
125
144
 
126
145
  # Parse the JSON data from the response
@@ -150,7 +169,9 @@ class LambdaChat(Provider):
150
169
 
151
170
  return message_id
152
171
 
153
- except Exception:
172
+ except CurlError: # Catch CurlError
173
+ return str(uuid.uuid4()) # Fallback on CurlError
174
+ except Exception: # Catch other potential exceptions
154
175
  # Fall back to a UUID if there's an error
155
176
  return str(uuid.uuid4())
156
177
 
@@ -161,70 +182,29 @@ class LambdaChat(Provider):
161
182
  boundary += "".join(random.choice(boundary_chars) for _ in range(16))
162
183
  return boundary
163
184
 
164
- def process_response(self, response, prompt: str):
165
- """Process streaming response and extract content."""
166
- full_text = ""
167
- sources = None
185
+ @staticmethod
186
+ def _lambdachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
187
+ """Extracts content from LambdaChat stream JSON objects."""
188
+ if not isinstance(chunk, dict) or "type" not in chunk:
189
+ return None
190
+
168
191
  reasoning_text = ""
169
- has_reasoning = False
170
-
171
- for line in response.iter_lines(decode_unicode=True):
172
- if not line:
173
- continue
174
-
175
- try:
176
- # Parse each line as JSON
177
- data = json.loads(line)
178
-
179
- # Handle different response types
180
- if "type" not in data:
181
- continue
182
-
183
- if data["type"] == "stream" and "token" in data:
184
- token = data["token"].replace("\u0000", "")
185
- full_text += token
186
- resp = {"text": token}
187
- yield resp
188
- elif data["type"] == "finalAnswer":
189
- final_text = data.get("text", "")
190
- if final_text and not full_text:
191
- full_text = final_text
192
- resp = {"text": final_text}
193
- yield resp
194
- elif data["type"] == "webSearch" and "sources" in data:
195
- sources = data["sources"]
196
- elif data["type"] == "reasoning":
197
- has_reasoning = True
198
- if data.get("subtype") == "stream" and "token" in data:
199
- reasoning_text += data["token"]
200
-
201
- # If we have reasoning, prepend it to the next text output
202
- if reasoning_text and not full_text:
203
- resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
204
- yield resp
205
-
206
- except json.JSONDecodeError:
207
- continue
208
-
209
- # Update conversation history only for saving to file if needed
210
- if full_text and self.conversation.file:
211
- if has_reasoning:
212
- full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
213
- self.last_response = {"text": full_text_with_reasoning}
214
- self.conversation.update_chat_history(prompt, full_text_with_reasoning)
215
- else:
216
- self.last_response = {"text": full_text}
217
- self.conversation.update_chat_history(prompt, full_text)
218
-
219
- return full_text
192
+ if chunk["type"] == "stream" and "token" in chunk:
193
+ return chunk["token"].replace("\u0000", "")
194
+ elif chunk["type"] == "finalAnswer":
195
+ return chunk.get("text")
196
+ elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
197
+ # Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
198
+ return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
199
+ return None
220
200
 
221
201
  def ask(
222
202
  self,
223
203
  prompt: str,
224
- stream: bool = False,
204
+ stream: bool = False, # API supports streaming
225
205
  raw: bool = False,
226
- optimizer: str = None,
227
- conversationally: bool = False,
206
+ optimizer: str = None, # Note: optimizer is not used by this API
207
+ conversationally: bool = False, # Note: conversationally is not used by this API
228
208
  web_search: bool = False,
229
209
  ) -> Union[Dict[str, Any], Generator]:
230
210
  """Send a message to the Lambda Chat API"""
@@ -275,43 +255,59 @@ class LambdaChat(Provider):
275
255
  multipart_headers["Content-Length"] = str(len(body))
276
256
 
277
257
  def for_stream():
258
+ streaming_text = "" # Initialize for history
278
259
  try:
279
260
  # Try with multipart/form-data first
280
261
  response = None
281
262
  try:
263
+ # Use curl_cffi session post with impersonate
282
264
  response = self.session.post(
283
265
  url,
284
266
  data=body,
285
- headers=multipart_headers,
267
+ headers=multipart_headers, # Use multipart headers
286
268
  stream=True,
287
- timeout=self.timeout
269
+ timeout=self.timeout,
270
+ impersonate="chrome110" # Use a common impersonation profile
288
271
  )
289
- except requests.exceptions.RequestException:
290
- pass
291
-
272
+ response.raise_for_status() # Check status after potential error
273
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
274
+ response = None # Ensure response is None if multipart fails
275
+
292
276
  # If multipart fails or returns error, try with regular JSON
293
277
  if not response or response.status_code != 200:
278
+ # Use curl_cffi session post with impersonate
294
279
  response = self.session.post(
295
280
  url,
296
- json=request_data,
297
- headers=headers,
281
+ json=request_data, # Use JSON payload
282
+ headers=headers, # Use regular headers
298
283
  stream=True,
299
- timeout=self.timeout
284
+ timeout=self.timeout,
285
+ impersonate="chrome110" # Use a common impersonation profile
300
286
  )
301
287
 
302
- # If both methods fail, raise exception
303
- if response.status_code != 200:
304
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
288
+ response.raise_for_status() # Check status after potential fallback
305
289
 
306
- # Process the streaming response
307
- yield from self.process_response(response, prompt)
290
+ # Use sanitize_stream
291
+ processed_stream = sanitize_stream(
292
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
293
+ intro_value=None, # No prefix
294
+ to_json=True, # Stream sends JSON lines
295
+ content_extractor=self._lambdachat_extractor, # Use the specific extractor
296
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
297
+ )
298
+
299
+ for content_chunk in processed_stream:
300
+ # content_chunk is the string extracted by _lambdachat_extractor
301
+ if content_chunk and isinstance(content_chunk, str):
302
+ streaming_text += content_chunk # Aggregate text for history
303
+ resp = {"text": content_chunk}
304
+ yield resp if not raw else content_chunk
308
305
 
309
- except Exception as e:
310
- if isinstance(e, requests.exceptions.RequestException):
311
- if hasattr(e, 'response') and e.response is not None:
312
- status_code = e.response.status_code
313
- if status_code == 401:
314
- raise exceptions.AuthenticationError("Authentication failed.")
306
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
307
+ # Handle specific exceptions if needed
308
+ if isinstance(e, CurlError):
309
+ # Log or handle CurlError specifically
310
+ pass
315
311
 
316
312
  # Try another model if current one fails
317
313
  if len(self.AVAILABLE_MODELS) > 1:
@@ -328,15 +324,33 @@ class LambdaChat(Provider):
328
324
  return
329
325
 
330
326
  # If we get here, all models failed
331
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
327
+ raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
328
+
329
+ # Update history after stream finishes
330
+ if streaming_text and self.conversation.file:
331
+ self.last_response = {"text": streaming_text}
332
+ self.conversation.update_chat_history(prompt, streaming_text)
332
333
 
333
334
  def for_non_stream():
335
+ # Aggregate the stream using the updated for_stream logic
334
336
  response_text = ""
335
- for response in for_stream():
336
- if "text" in response:
337
- response_text += response["text"]
338
- self.last_response = {"text": response_text}
339
- return self.last_response
337
+ try:
338
+ # Ensure raw=False so for_stream yields dicts
339
+ for chunk_data in for_stream():
340
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
341
+ response_text += chunk_data["text"]
342
+ # Handle raw string case if raw=True was passed
343
+ elif raw and isinstance(chunk_data, str):
344
+ response_text += chunk_data
345
+ except Exception as e:
346
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
347
+ if not response_text:
348
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
349
+
350
+ # last_response and history are updated within process_response called by for_stream
351
+ # Return the final aggregated response dict or raw string
352
+ return response_text if raw else {"text": response_text} # Return dict for consistency
353
+
340
354
 
341
355
  return for_stream() if stream else for_non_stream()
342
356
 
@@ -344,25 +358,29 @@ class LambdaChat(Provider):
344
358
  self,
345
359
  prompt: str,
346
360
  stream: bool = False,
347
- optimizer: str = None,
348
- conversationally: bool = False,
361
+ optimizer: str = None, # Note: optimizer is not used by this API
362
+ conversationally: bool = False, # Note: conversationally is not used by this API
349
363
  web_search: bool = False
350
364
  ) -> Union[str, Generator]:
351
365
  """Generate a response to a prompt"""
352
- def for_stream():
353
- for response in self.ask(
354
- prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
355
- ):
356
- yield self.get_message(response)
366
+ def for_stream_chat():
367
+ # ask() yields dicts or strings when streaming
368
+ gen = self.ask(
369
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
370
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
371
+ )
372
+ for response_dict in gen:
373
+ yield self.get_message(response_dict) # get_message expects dict
357
374
 
358
- def for_non_stream():
359
- return self.get_message(
360
- self.ask(
361
- prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
362
- )
375
+ def for_non_stream_chat():
376
+ # ask() returns dict or str when not streaming
377
+ response_data = self.ask(
378
+ prompt, stream=False, raw=False, # Ensure ask returns dict
379
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
363
380
  )
381
+ return self.get_message(response_data) # get_message expects dict
364
382
 
365
- return for_stream() if stream else for_non_stream()
383
+ return for_stream_chat() if stream else for_non_stream_chat()
366
384
 
367
385
  def get_message(self, response: dict) -> str:
368
386
  """Extract message text from response"""
@@ -370,6 +388,7 @@ class LambdaChat(Provider):
370
388
  return response.get("text", "")
371
389
 
372
390
  if __name__ == "__main__":
391
+ # Ensure curl_cffi is installed
373
392
  print("-" * 80)
374
393
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
375
394
  print("-" * 80)
@@ -389,4 +408,4 @@ if __name__ == "__main__":
389
408
  display_text = "Empty or invalid response"
390
409
  print(f"{model:<50} {status:<10} {display_text}")
391
410
  except Exception as e:
392
- print(f"{model:<50} {'✗':<10} {str(e)}")
411
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Generator
4
5
 
@@ -52,8 +53,8 @@ class Sambanova(Provider):
52
53
  self.model = model
53
54
  self.system_prompt = system_prompt
54
55
 
55
- self.session = requests.Session()
56
- self.session.proxies = proxies
56
+ # Initialize curl_cffi Session
57
+ self.session = Session()
57
58
  self.is_conversation = is_conversation
58
59
  self.max_tokens_to_sample = max_tokens
59
60
  self.timeout = timeout
@@ -80,8 +81,13 @@ class Sambanova(Provider):
80
81
  self.base_url = "https://api.sambanova.ai/v1/chat/completions"
81
82
  self.headers = {
82
83
  "Authorization": f"Bearer {self.api_key}",
83
- "Content-Type": "application/json"
84
+ "Content-Type": "application/json",
85
+ # Add User-Agent or sec-ch-ua headers if needed, or rely on impersonate
84
86
  }
87
+
88
+ # Update curl_cffi session headers and proxies
89
+ self.session.headers.update(self.headers)
90
+ self.session.proxies = proxies # Assign proxies directly
85
91
 
86
92
  def ask(
87
93
  self,
@@ -105,36 +111,42 @@ class Sambanova(Provider):
105
111
 
106
112
  payload = {
107
113
  "model": self.model,
108
- "stream": stream,
109
114
  "messages": [
110
115
  {"role": "system", "content": self.system_prompt},
111
116
  {"role": "user", "content": conversation_prompt},
112
117
  ],
113
118
  "max_tokens": self.max_tokens_to_sample,
119
+ "stream": True # API seems to always stream based on endpoint name
114
120
  }
115
121
 
116
122
  def for_stream():
117
- streaming_text = ""
123
+ streaming_text = "" # Initialize outside try block
118
124
  try:
125
+ # Use curl_cffi session post with impersonate
119
126
  response = self.session.post(
120
- self.base_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout
127
+ self.base_url,
128
+ # headers are set on the session
129
+ json=payload,
130
+ stream=True,
131
+ timeout=self.timeout,
132
+ # proxies are set on the session
133
+ impersonate="chrome110" # Use a common impersonation profile
121
134
  )
122
- if not response.ok:
123
- raise exceptions.FailedToGenerateResponseError(
124
- f"Request failed: {response.status_code} - {response.text}"
125
- )
126
-
127
- for line in response.iter_lines():
128
- if line:
129
- # Remove the "data:" prefix and extra whitespace if present
130
- line_str = line.decode('utf-8').strip() if isinstance(line, bytes) else line.strip()
131
- if line_str.startswith("data:"):
132
- data = line_str[5:].strip()
133
- else:
134
- data = line_str
135
- if data == "[DONE]":
136
- break
135
+ response.raise_for_status() # Check for HTTP errors
136
+
137
+ # Iterate over bytes and decode manually
138
+ for line_bytes in response.iter_lines():
139
+ if line_bytes:
137
140
  try:
141
+ line_str = line_bytes.decode('utf-8').strip()
142
+ if line_str.startswith("data:"):
143
+ data = line_str[5:].strip()
144
+ else:
145
+ data = line_str # Handle cases where 'data:' prefix might be missing
146
+
147
+ if data == "[DONE]":
148
+ break
149
+
138
150
  json_data = json.loads(data)
139
151
  # Skip entries without valid choices
140
152
  if not json_data.get("choices"):
@@ -143,26 +155,52 @@ class Sambanova(Provider):
143
155
  delta = choice.get("delta", {})
144
156
  if "content" in delta:
145
157
  content = delta["content"]
146
- streaming_text += content
147
- # Yield content directly as a string for consistency
148
- yield content
158
+ if content: # Ensure content is not None or empty
159
+ streaming_text += content
160
+ resp = {"text": content}
161
+ # Yield dict or raw string chunk
162
+ yield resp if not raw else content
149
163
  # If finish_reason is provided, consider the stream complete
150
164
  if choice.get("finish_reason"):
151
165
  break
152
- except json.JSONDecodeError:
153
- continue
154
- self.last_response = streaming_text
166
+ except (json.JSONDecodeError, UnicodeDecodeError):
167
+ continue # Ignore lines that are not valid JSON or cannot be decoded
168
+
169
+ # Update history after stream finishes
170
+ self.last_response = streaming_text # Store aggregated text
155
171
  self.conversation.update_chat_history(
156
172
  prompt, self.last_response
157
173
  )
158
- except requests.exceptions.RequestException as e:
159
- raise exceptions.ProviderConnectionError(f"Request failed: {e}")
174
+ except CurlError as e: # Catch CurlError
175
+ raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
176
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
177
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
178
+ raise exceptions.ProviderConnectionError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
179
+
160
180
 
161
181
  def for_non_stream():
162
- for _ in for_stream():
163
- pass
164
- return self.last_response
182
+ # Aggregate the stream using the updated for_stream logic
183
+ full_response_text = ""
184
+ try:
185
+ # Ensure raw=False so for_stream yields dicts
186
+ for chunk_data in for_stream():
187
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
188
+ full_response_text += chunk_data["text"]
189
+ # Handle raw string case if raw=True was passed
190
+ elif raw and isinstance(chunk_data, str):
191
+ full_response_text += chunk_data
192
+ except Exception as e:
193
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
194
+ if not full_response_text:
195
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
165
196
 
197
+ # last_response and history are updated within for_stream
198
+ # Return the final aggregated response dict or raw string
199
+ return full_response_text if raw else {"text": self.last_response} # Return dict for consistency
200
+
201
+
202
+ # Since the API endpoint suggests streaming, always call the stream generator.
203
+ # The non-stream wrapper will handle aggregation if stream=False.
166
204
  return for_stream() if stream else for_non_stream()
167
205
 
168
206
  def chat(
@@ -173,12 +211,28 @@ class Sambanova(Provider):
173
211
  conversationally: bool = False,
174
212
  ) -> Union[str, Generator[str, None, None]]:
175
213
  """Generate response `str`"""
176
- if stream:
177
- # For stream mode, yield the text chunks directly
178
- return self.ask(prompt, stream=True, optimizer=optimizer, conversationally=conversationally)
179
- else:
180
- # For non-stream mode, return the complete text response
181
- return self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally)
214
+
215
+ def for_stream_chat():
216
+ # ask() yields dicts or strings when streaming
217
+ gen = self.ask(
218
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
219
+ optimizer=optimizer, conversationally=conversationally
220
+ )
221
+ for response_dict in gen:
222
+ yield self.get_message(response_dict) # get_message expects dict or string
223
+
224
+ def for_non_stream_chat():
225
+ # ask() returns dict or str when not streaming
226
+ response_data = self.ask(
227
+ prompt,
228
+ stream=False,
229
+ raw=False, # Ensure ask returns dict
230
+ optimizer=optimizer,
231
+ conversationally=conversationally,
232
+ )
233
+ return self.get_message(response_data) # get_message expects dict or string
234
+
235
+ return for_stream_chat() if stream else for_non_stream_chat()
182
236
 
183
237
  def get_message(self, response: Any) -> str:
184
238
  """
@@ -197,6 +251,7 @@ class Sambanova(Provider):
197
251
  return ""
198
252
 
199
253
  if __name__ == "__main__":
254
+ # Ensure curl_cffi is installed
200
255
  from rich import print
201
256
  ai = Sambanova(api_key='')
202
257
  response = ai.chat(input(">>> "), stream=True)