webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  import re
2
- import requests
2
+ from curl_cffi.requests import Session
3
+ from curl_cffi import CurlError
3
4
  import uuid
4
5
  import base64
5
6
  import json
@@ -9,11 +10,11 @@ import time
9
10
  from datetime import datetime
10
11
  from typing import Any, Dict, Optional, Generator, Union, List
11
12
 
13
+ from webscout import exceptions
12
14
  from webscout.AIutel import Optimizers
13
15
  from webscout.AIutel import Conversation
14
16
  from webscout.AIutel import AwesomePrompts
15
- from webscout.AIbase import Provider, AsyncProvider
16
- from webscout import exceptions
17
+ from webscout.AIbase import Provider
17
18
 
18
19
  class Toolbaz(Provider):
19
20
  """
@@ -47,7 +48,7 @@ class Toolbaz(Provider):
47
48
  def __init__(
48
49
  self,
49
50
  is_conversation: bool = True,
50
- max_tokens: int = 600,
51
+ max_tokens: int = 600, # Note: max_tokens is not directly used by the API
51
52
  timeout: int = 30,
52
53
  intro: str = None,
53
54
  filepath: str = None,
@@ -56,7 +57,7 @@ class Toolbaz(Provider):
56
57
  history_offset: int = 10250,
57
58
  act: str = None,
58
59
  model: str = "gemini-2.0-flash",
59
- system_prompt: str = "You are a helpful AI assistant."
60
+ system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not directly used by the API
60
61
  ):
61
62
  """
62
63
  Initializes the Toolbaz API with given parameters.
@@ -64,28 +65,31 @@ class Toolbaz(Provider):
64
65
  if model not in self.AVAILABLE_MODELS:
65
66
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
67
 
67
- self.session = requests.Session()
68
+ # Initialize curl_cffi Session
69
+ self.session = Session()
68
70
  self.is_conversation = is_conversation
69
71
  self.max_tokens_to_sample = max_tokens
70
72
  self.timeout = timeout
71
73
  self.last_response = {}
72
74
  self.system_prompt = system_prompt
73
75
  self.model = model
74
- self.proxies = proxies
76
+ self.proxies = proxies # Store proxies for later use in requests
75
77
 
76
- # Set up headers
78
+ # Set up headers for the curl_cffi session
77
79
  self.session.headers.update({
78
- "user-agent": "Mozilla/5.0 (Linux; Android 10)",
80
+ "user-agent": "Mozilla/5.0 (Linux; Android 10)", # Keep specific user-agent
79
81
  "accept": "*/*",
80
82
  "accept-language": "en-US",
81
83
  "cache-control": "no-cache",
82
- "connection": "keep-alive",
83
84
  "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
84
85
  "origin": "https://toolbaz.com",
85
86
  "pragma": "no-cache",
86
87
  "referer": "https://toolbaz.com/",
87
88
  "sec-fetch-mode": "cors"
89
+ # Add sec-ch-ua headers if needed for impersonation consistency
88
90
  })
91
+ # Assign proxies directly to the session
92
+ self.session.proxies = proxies
89
93
 
90
94
  # Initialize conversation history
91
95
  self.__available_optimizers = (
@@ -139,20 +143,34 @@ class Toolbaz(Provider):
139
143
  "session_id": session_id,
140
144
  "token": token
141
145
  }
142
- resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
143
- resp.raise_for_status()
146
+ # Use curl_cffi session post WITHOUT impersonate for token request
147
+ resp = self.session.post(
148
+ "https://data.toolbaz.com/token.php",
149
+ data=data
150
+ # Removed impersonate="chrome110" for this specific request
151
+ )
152
+ resp.raise_for_status() # Check for HTTP errors
144
153
  result = resp.json()
145
154
  if result.get("success"):
146
155
  return {"token": result["token"], "session_id": session_id}
147
- return None
148
- except Exception:
149
- return None
156
+ # Raise error if success is not true
157
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed: API response indicates failure. Response: {result}")
158
+ except CurlError as e: # Catch CurlError specifically
159
+ # Raise a specific error indicating CurlError during auth
160
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to network error (CurlError): {e}") from e
161
+ except json.JSONDecodeError as e:
162
+ # Raise error for JSON decoding issues
163
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed: Could not decode JSON response. Error: {e}. Response text: {getattr(resp, 'text', 'N/A')}") from e
164
+ except Exception as e: # Catch other potential errors (like HTTPError from raise_for_status)
165
+ # Raise a specific error indicating a general failure during auth
166
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
167
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to an unexpected error ({type(e).__name__}): {e} - {err_text}") from e
150
168
 
151
169
  def ask(
152
170
  self,
153
171
  prompt: str,
154
172
  stream: bool = False,
155
- raw: bool = False, # Kept for compatibility with other providers
173
+ raw: bool = False, # Kept for compatibility, but output is always dict/string
156
174
  optimizer: Optional[str] = None,
157
175
  conversationally: bool = False,
158
176
  ) -> Union[Dict[str, Any], Generator]:
@@ -166,9 +184,9 @@ class Toolbaz(Provider):
166
184
  conversation_prompt if conversationally else prompt
167
185
  )
168
186
 
169
- auth = self.get_auth()
170
- if not auth:
171
- raise exceptions.ProviderConnectionError("Failed to authenticate with Toolbaz API")
187
+ # get_auth now raises exceptions on failure
188
+ auth = self.get_auth()
189
+ # No need to check if auth is None, as an exception would have been raised
172
190
 
173
191
  data = {
174
192
  "text": conversation_prompt,
@@ -179,12 +197,13 @@ class Toolbaz(Provider):
179
197
 
180
198
  def for_stream():
181
199
  try:
200
+ # Use curl_cffi session post with impersonate for the main request
182
201
  resp = self.session.post(
183
202
  "https://data.toolbaz.com/writing.php",
184
203
  data=data,
185
204
  stream=True,
186
- proxies=self.proxies,
187
- timeout=self.timeout
205
+ timeout=self.timeout,
206
+ impersonate="chrome110" # Keep impersonate here
188
207
  )
189
208
  resp.raise_for_status()
190
209
 
@@ -192,54 +211,71 @@ class Toolbaz(Provider):
192
211
  tag_start = "[model:"
193
212
  streaming_text = ""
194
213
 
195
- for chunk in resp.iter_content(chunk_size=1):
196
- if chunk:
197
- text = chunk.decode(errors="ignore")
214
+ # Iterate over bytes and decode manually
215
+ for chunk_bytes in resp.iter_content(chunk_size=1024): # Read in larger chunks
216
+ if chunk_bytes:
217
+ text = chunk_bytes.decode(errors="ignore")
198
218
  buffer += text
199
- # Remove all complete [model: ...] tags in buffer
200
- while True:
201
- match = re.search(r"\[model:.*?\]", buffer)
202
- if not match:
203
- break
204
- buffer = buffer[:match.start()] + buffer[match.end():]
205
- # Only yield up to the last possible start of a tag
206
- last_tag = buffer.rfind(tag_start)
207
- if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
208
- if buffer:
209
- streaming_text += buffer
210
- yield {"text": buffer}
211
- buffer = ""
219
+
220
+ processed_buffer = ""
221
+ last_processed_index = 0
222
+ # Find all complete tags and process text between them
223
+ for match in re.finditer(r"\[model:.*?\]", buffer):
224
+ # Add text before the tag
225
+ segment = buffer[last_processed_index:match.start()]
226
+ if segment:
227
+ processed_buffer += segment
228
+ last_processed_index = match.end()
229
+
230
+ # Add remaining text after the last complete tag
231
+ processed_buffer += buffer[last_processed_index:]
232
+
233
+ # Now, check for incomplete tag at the end
234
+ last_tag_start_index = processed_buffer.rfind(tag_start)
235
+
236
+ if last_tag_start_index != -1:
237
+ # Text before the potential incomplete tag
238
+ text_to_yield = processed_buffer[:last_tag_start_index]
239
+ # Keep the potential incomplete tag start for the next iteration
240
+ buffer = processed_buffer[last_tag_start_index:]
212
241
  else:
213
- if buffer[:last_tag]:
214
- streaming_text += buffer[:last_tag]
215
- yield {"text": buffer[:last_tag]}
216
- buffer = buffer[last_tag:]
217
-
218
- # Remove any remaining [model: ...] tag in the buffer
219
- buffer = re.sub(r"\[model:.*?\]", "", buffer)
220
- if buffer:
221
- streaming_text += buffer
222
- yield {"text": buffer}
242
+ # No potential incomplete tag found, yield everything processed
243
+ text_to_yield = processed_buffer
244
+ buffer = "" # Clear buffer as everything is processed
245
+
246
+ if text_to_yield:
247
+ streaming_text += text_to_yield
248
+ # Yield dict or raw string
249
+ yield {"text": text_to_yield} if not raw else text_to_yield
250
+
251
+ # Process any remaining text in the buffer after the loop finishes
252
+ # Remove any potential tags (complete or incomplete)
253
+ final_text = re.sub(r"\[model:.*?\]", "", buffer)
254
+ if final_text:
255
+ streaming_text += final_text
256
+ yield {"text": final_text} if not raw else final_text
223
257
 
224
258
  self.last_response = {"text": streaming_text}
225
259
  self.conversation.update_chat_history(prompt, streaming_text)
226
260
 
227
- except requests.exceptions.RequestException as e:
228
- raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
229
- except Exception as e:
230
- raise exceptions.ProviderConnectionError(f"Unexpected error: {str(e)}") from e
261
+ except CurlError as e: # Catch CurlError
262
+ raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
263
+ except Exception as e: # Catch other exceptions
264
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error during stream: {str(e)}") from e
231
265
 
232
266
  def for_non_stream():
233
267
  try:
268
+ # Use curl_cffi session post with impersonate for the main request
234
269
  resp = self.session.post(
235
270
  "https://data.toolbaz.com/writing.php",
236
271
  data=data,
237
- proxies=self.proxies,
238
- timeout=self.timeout
272
+ timeout=self.timeout,
273
+ impersonate="chrome110" # Keep impersonate here
239
274
  )
240
275
  resp.raise_for_status()
241
276
 
242
- text = resp.text
277
+ # Use response.text which is already decoded
278
+ text = resp.text
243
279
  # Remove [model: ...] tags
244
280
  text = re.sub(r"\[model:.*?\]", "", text)
245
281
 
@@ -248,9 +284,9 @@ class Toolbaz(Provider):
248
284
 
249
285
  return self.last_response
250
286
 
251
- except requests.exceptions.RequestException as e:
252
- raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
253
- except Exception as e:
287
+ except CurlError as e: # Catch CurlError
288
+ raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
289
+ except Exception as e: # Catch other exceptions
254
290
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
255
291
 
256
292
  return for_stream() if stream else for_non_stream()
@@ -263,26 +299,28 @@ class Toolbaz(Provider):
263
299
  conversationally: bool = False,
264
300
  ) -> Union[str, Generator[str, None, None]]:
265
301
  """Generates a response from the Toolbaz API."""
266
- def for_stream():
267
- for response in self.ask(
302
+ def for_stream_chat():
303
+ # ask() yields dicts when raw=False
304
+ for response_dict in self.ask(
268
305
  prompt,
269
306
  stream=True,
307
+ raw=False, # Ensure ask yields dicts
270
308
  optimizer=optimizer,
271
309
  conversationally=conversationally
272
310
  ):
273
- yield self.get_message(response)
311
+ yield self.get_message(response_dict)
274
312
 
275
- def for_non_stream():
276
- return self.get_message(
277
- self.ask(
278
- prompt,
279
- stream=False,
280
- optimizer=optimizer,
281
- conversationally=conversationally,
282
- )
313
+ def for_non_stream_chat():
314
+ # ask() returns a dict when stream=False
315
+ response_dict = self.ask(
316
+ prompt,
317
+ stream=False,
318
+ optimizer=optimizer,
319
+ conversationally=conversationally,
283
320
  )
321
+ return self.get_message(response_dict)
284
322
 
285
- return for_stream() if stream else for_non_stream()
323
+ return for_stream_chat() if stream else for_non_stream_chat()
286
324
 
287
325
  def get_message(self, response: Dict[str, Any]) -> str:
288
326
  """Extract the message from the response.
@@ -298,23 +336,40 @@ class Toolbaz(Provider):
298
336
 
299
337
  # Example usage
300
338
  if __name__ == "__main__":
339
+ # Ensure curl_cffi is installed
340
+ from rich import print # Use rich print if available
341
+ print("-" * 80)
342
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
343
+ print("-" * 80)
301
344
  # Test the provider with different models
302
345
  for model in Toolbaz.AVAILABLE_MODELS:
303
346
  try:
304
347
  test_ai = Toolbaz(model=model, timeout=60)
305
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
348
+ # Test stream first
349
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
306
350
  response_text = ""
307
- for chunk in response:
351
+ # print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
352
+ for chunk in response_stream:
308
353
  response_text += chunk
309
- print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
354
+ # Optional: print chunks for visual feedback
355
+ # print(chunk, end="", flush=True)
310
356
 
311
357
  if response_text and len(response_text.strip()) > 0:
312
358
  status = "✓"
313
- # Truncate response if too long
314
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
359
+ # Clean and truncate response
360
+ clean_text = response_text.strip()
361
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
315
362
  else:
316
- status = "✗"
317
- display_text = "Empty or invalid response"
363
+ status = "✗ (Stream)"
364
+ display_text = "Empty or invalid stream response"
318
365
  print(f"\r{model:<50} {status:<10} {display_text}")
366
+
367
+ # Optional: Add non-stream test if needed
368
+ # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
369
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
370
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
371
+ # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
372
+
319
373
  except Exception as e:
320
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
374
+ # Print full error for debugging
375
+ print(f"\r{model:<50} {'✗':<10} Error: {str(e)}")
@@ -1,10 +1,11 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
 
4
5
  from webscout.AIutel import Optimizers
5
6
  from webscout.AIutel import Conversation
6
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream
7
- from webscout.AIbase import Provider, AsyncProvider
8
+ from webscout.AIbase import Provider
8
9
  from webscout import exceptions
9
10
  from typing import Union, Any, AsyncGenerator, Dict
10
11
  from webscout.litagent import LitAgent
@@ -26,7 +27,7 @@ class TurboSeek(Provider):
26
27
  proxies: dict = {},
27
28
  history_offset: int = 10250,
28
29
  act: str = None,
29
- model: str = "Llama 3.1 70B"
30
+ model: str = "Llama 3.1 70B" # Note: model parameter is not used by the API endpoint
30
31
  ):
31
32
  """Instantiates TurboSeek
32
33
 
@@ -41,7 +42,8 @@ class TurboSeek(Provider):
41
42
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
42
43
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
43
44
  """
44
- self.session = requests.Session()
45
+ # Initialize curl_cffi Session
46
+ self.session = Session()
45
47
  self.is_conversation = is_conversation
46
48
  self.max_tokens_to_sample = max_tokens
47
49
  self.chat_endpoint = "https://www.turboseek.io/api/getAnswer"
@@ -49,14 +51,9 @@ class TurboSeek(Provider):
49
51
  self.timeout = timeout
50
52
  self.last_response = {}
51
53
  self.headers = {
52
- "authority": "www.turboseek.io",
53
- "method": "POST",
54
- "path": "/api/getAnswer",
55
- "scheme": "https",
56
54
  "accept": "*/*",
57
55
  "accept-encoding": "gzip, deflate, br, zstd",
58
56
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
59
- "content-length": "63",
60
57
  "content-type": "application/json",
61
58
  "dnt": "1",
62
59
  "origin": "https://www.turboseek.io",
@@ -76,7 +73,9 @@ class TurboSeek(Provider):
76
73
  for method in dir(Optimizers)
77
74
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
75
  )
76
+ # Update curl_cffi session headers and proxies
79
77
  self.session.headers.update(self.headers)
78
+ self.session.proxies = proxies # Assign proxies directly
80
79
  Conversation.intro = (
81
80
  AwesomePrompts().get_act(
82
81
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -88,7 +87,6 @@ class TurboSeek(Provider):
88
87
  is_conversation, self.max_tokens_to_sample, filepath, update_file
89
88
  )
90
89
  self.conversation.history_offset = history_offset
91
- self.session.proxies = proxies
92
90
 
93
91
  def ask(
94
92
  self,
@@ -125,41 +123,75 @@ class TurboSeek(Provider):
125
123
  f"Optimizer is not one of {self.__available_optimizers}"
126
124
  )
127
125
 
128
- self.session.headers.update(self.headers)
129
126
  payload = {
130
127
  "question": conversation_prompt,
131
128
  "sources": []
132
129
  }
133
130
 
134
131
  def for_stream():
135
- response = self.session.post(
136
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
137
- )
138
- if not response.ok:
139
- raise exceptions.FailedToGenerateResponseError(
140
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
132
+ try: # Add try block for CurlError
133
+ # Use curl_cffi session post with impersonate
134
+ response = self.session.post(
135
+ self.chat_endpoint,
136
+ json=payload,
137
+ stream=True,
138
+ timeout=self.timeout,
139
+ impersonate="chrome120", # Try a different impersonation profile
141
140
  )
142
- streaming_text = ""
143
- for value in response.iter_lines(
144
- chunk_size=self.stream_chunk_size,
145
- ):
146
- try:
147
- if value and value.startswith(b"data: "): #Check for bytes and decode
148
- data = json.loads(value[6:].decode('utf-8')) # Decode manually
149
- if "text" in data:
150
- streaming_text += data["text"]
151
- resp = dict(text=data["text"])
152
- self.last_response.update(resp)
153
- yield value if raw else resp
154
- except json.decoder.JSONDecodeError:
155
- pass
156
- self.conversation.update_chat_history(
157
- prompt, self.get_message(self.last_response)
158
- )
141
+ if not response.ok:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
+ )
145
+ streaming_text = ""
146
+ # Iterate over bytes and decode manually
147
+ for value_bytes in response.iter_lines():
148
+ try:
149
+ if value_bytes and value_bytes.startswith(b"data: "): # Check for bytes
150
+ # Decode bytes to string
151
+ line = value_bytes[6:].decode('utf-8')
152
+ data = json.loads(line)
153
+ if "text" in data:
154
+ # Decode potential unicode escapes
155
+ content = data["text"].encode().decode('unicode_escape')
156
+ streaming_text += content
157
+ resp = dict(text=content)
158
+ self.last_response.update(resp) # Update last_response incrementally
159
+ # Yield raw bytes or dict based on flag
160
+ yield value_bytes if raw else resp
161
+ except (json.decoder.JSONDecodeError, UnicodeDecodeError):
162
+ pass # Ignore lines that are not valid JSON or cannot be decoded
163
+ # Update conversation history after stream finishes
164
+ if streaming_text: # Only update if content was received
165
+ self.conversation.update_chat_history(
166
+ prompt, streaming_text # Use the fully aggregated text
167
+ )
168
+ except CurlError as e: # Catch CurlError
169
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
170
+ except Exception as e: # Catch other potential exceptions
171
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
172
+
159
173
 
160
174
  def for_non_stream():
161
- for _ in for_stream():
162
- pass
175
+ # Aggregate the stream using the updated for_stream logic
176
+ full_text = ""
177
+ for chunk_data in for_stream():
178
+ # Ensure chunk_data is a dict (not raw) and has 'text'
179
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
180
+ full_text += chunk_data["text"]
181
+ # If raw=True, chunk_data is bytes, decode and process if needed (though raw non-stream is less common)
182
+ elif isinstance(chunk_data, bytes):
183
+ try:
184
+ if chunk_data.startswith(b"data: "):
185
+ line = chunk_data[6:].decode('utf-8')
186
+ data = json.loads(line)
187
+ if "text" in data:
188
+ content = data["text"].encode().decode('unicode_escape')
189
+ full_text += content
190
+ except (json.decoder.JSONDecodeError, UnicodeDecodeError):
191
+ pass
192
+ # last_response and history are updated within for_stream
193
+ # Ensure last_response reflects the complete aggregated text
194
+ self.last_response = {"text": full_text}
163
195
  return self.last_response
164
196
 
165
197
  return for_stream() if stream else for_non_stream()
@@ -209,11 +241,30 @@ class TurboSeek(Provider):
209
241
  str: Message extracted
210
242
  """
211
243
  assert isinstance(response, dict), "Response should be of dict data-type only"
212
- return response["text"]
244
+ # Text is already decoded in ask method
245
+ return response.get("text", "")
246
+
213
247
  if __name__ == '__main__':
248
+ # Ensure curl_cffi is installed
214
249
  from rich import print
215
- ai = TurboSeek()
216
- response = ai.chat("hello buddy", stream=True)
217
- for chunk in response:
218
- print(chunk, end="", flush=True)
250
+ try: # Add try-except block for testing
251
+ ai = TurboSeek(timeout=60)
252
+ print("[bold blue]Testing Stream:[/bold blue]")
253
+ response_stream = ai.chat("hello buddy", stream=True)
254
+ full_stream_response = ""
255
+ for chunk in response_stream:
256
+ print(chunk, end="", flush=True)
257
+ full_stream_response += chunk
258
+ print("\n[bold green]Stream Test Complete.[/bold green]\n")
259
+
260
+ # Optional: Test non-stream
261
+ # print("[bold blue]Testing Non-Stream:[/bold blue]")
262
+ # response_non_stream = ai.chat("What is the capital of France?", stream=False)
263
+ # print(response_non_stream)
264
+ # print("[bold green]Non-Stream Test Complete.[/bold green]")
265
+
266
+ except exceptions.FailedToGenerateResponseError as e:
267
+ print(f"\n[bold red]API Error:[/bold red] {e}")
268
+ except Exception as e:
269
+ print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
219
270