webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,9 +1,10 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import secrets
4
5
  from typing import Any, Dict, Optional, Generator, Union
5
6
 
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
7
8
  from webscout.AIbase import Provider
8
9
  from webscout import exceptions
9
10
 
@@ -26,13 +27,13 @@ class SCNet(Provider):
26
27
  self,
27
28
  model: str = "QWQ-32B",
28
29
  is_conversation: bool = True,
29
- max_tokens: int = 2048,
30
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
30
31
  timeout: int = 30,
31
32
  intro: Optional[str] = None,
32
33
  filepath: Optional[str] = None,
33
34
  update_file: bool = True,
34
35
  proxies: Optional[dict] = None,
35
- history_offset: int = 0,
36
+ history_offset: int = 0, # Note: history_offset might not be fully effective due to API structure
36
37
  act: Optional[str] = None,
37
38
  system_prompt: str = (
38
39
  "You are a helpful, advanced LLM assistant. "
@@ -46,14 +47,15 @@ class SCNet(Provider):
46
47
  self.model = model
47
48
  self.modelId = self.MODEL_NAME_TO_ID[model]
48
49
  self.system_prompt = system_prompt
49
- self.session = requests.Session()
50
+ # Initialize curl_cffi Session
51
+ self.session = Session()
50
52
  self.is_conversation = is_conversation
51
53
  self.max_tokens_to_sample = max_tokens
52
54
  self.timeout = timeout
53
55
  self.last_response: Dict[str, Any] = {}
54
56
  self.proxies = proxies or {}
55
57
  self.cookies = {
56
- "Token": secrets.token_hex(16),
58
+ "Token": secrets.token_hex(16), # Keep cookie generation logic
57
59
  }
58
60
  self.headers = {
59
61
  "accept": "text/event-stream",
@@ -61,8 +63,17 @@ class SCNet(Provider):
61
63
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
62
64
  "referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
63
65
  "origin": "https://www.scnet.cn",
66
+ # Add sec-ch-ua headers if needed for impersonation consistency
64
67
  }
65
68
  self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
69
+
70
+ # Update curl_cffi session headers, proxies, and cookies
71
+ self.session.headers.update(self.headers)
72
+ self.session.proxies = self.proxies # Assign proxies directly
73
+ # Set cookies on the session object for curl_cffi
74
+ for name, value in self.cookies.items():
75
+ self.session.cookies.set(name, value)
76
+
66
77
  self.__available_optimizers = (
67
78
  method for method in dir(Optimizers)
68
79
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
@@ -75,6 +86,13 @@ class SCNet(Provider):
75
86
  self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
76
87
  self.conversation.history_offset = history_offset
77
88
 
89
+ @staticmethod
90
+ def _scnet_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
91
+ """Extracts content from SCNet stream JSON objects."""
92
+ if isinstance(chunk, dict):
93
+ return chunk.get("content")
94
+ return None
95
+
78
96
  def ask(
79
97
  self,
80
98
  prompt: str,
@@ -86,9 +104,7 @@ class SCNet(Provider):
86
104
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
105
  if optimizer:
88
106
  if optimizer in self.__available_optimizers:
89
- conversation_prompt = getattr(Optimizers, optimizer)(
90
- conversation_prompt if conversationally else prompt
91
- )
107
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
92
108
  else:
93
109
  raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
94
110
 
@@ -105,39 +121,57 @@ class SCNet(Provider):
105
121
 
106
122
  def for_stream():
107
123
  try:
108
- with self.session.post(
124
+ # Use curl_cffi session post with impersonate
125
+ # Cookies are now handled by the session object
126
+ response = self.session.post(
109
127
  self.url,
110
- headers=self.headers,
111
- cookies=self.cookies,
112
128
  json=payload,
113
129
  stream=True,
114
130
  timeout=self.timeout,
115
- proxies=self.proxies
116
- ) as resp:
117
- streaming_text = ""
118
- for line in resp.iter_lines(decode_unicode=True):
119
- if line and line.startswith("data:"):
120
- data = line[5:].strip()
121
- if data and data != "[done]":
122
- try:
123
- obj = json.loads(data)
124
- content = obj.get("content", "")
125
- streaming_text += content
126
- yield {"text": content} if raw else {"text": content}
127
- except Exception:
128
- continue
129
- elif data == "[done]":
130
- break
131
- self.last_response = {"text": streaming_text}
132
- self.conversation.update_chat_history(prompt, streaming_text)
133
- except Exception as e:
134
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
131
+ impersonate="chrome120" # Changed impersonation to chrome120
132
+ )
133
+ response.raise_for_status() # Check for HTTP errors
134
+
135
+ streaming_text = ""
136
+ # Use sanitize_stream
137
+ processed_stream = sanitize_stream(
138
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
139
+ intro_value="data:",
140
+ to_json=True, # Stream sends JSON
141
+ skip_markers=["[done]"],
142
+ content_extractor=self._scnet_extractor, # Use the specific extractor
143
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
144
+ )
145
+
146
+ for content_chunk in processed_stream:
147
+ # content_chunk is the string extracted by _scnet_extractor
148
+ if content_chunk and isinstance(content_chunk, str):
149
+ streaming_text += content_chunk
150
+ yield {"text": content_chunk} if not raw else content_chunk
151
+ # Update history and last response after stream finishes
152
+ self.last_response = {"text": streaming_text}
153
+ self.conversation.update_chat_history(prompt, streaming_text)
154
+
155
+ except CurlError as e: # Catch CurlError
156
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
157
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
158
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
159
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
135
160
 
136
161
  def for_non_stream():
162
+ # Aggregate the stream using the updated for_stream logic
137
163
  text = ""
138
- for chunk in for_stream():
139
- text += chunk["text"]
140
- return {"text": text}
164
+ # Ensure raw=False so for_stream yields dicts
165
+ for chunk_data in for_stream():
166
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
167
+ text += chunk_data["text"]
168
+ # Handle raw string case if raw=True was passed
169
+ elif isinstance(chunk_data, str):
170
+ text += chunk_data
171
+ # last_response and history are updated within for_stream
172
+ # Return the final aggregated response dict or raw string
173
+ return text if raw else self.last_response
174
+
141
175
 
142
176
  return for_stream() if stream else for_non_stream()
143
177
 
@@ -148,40 +182,59 @@ class SCNet(Provider):
148
182
  optimizer: Optional[str] = None,
149
183
  conversationally: bool = False,
150
184
  ) -> Union[str, Generator[str, None, None]]:
151
- def for_stream():
152
- for response in self.ask(
153
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
154
- ):
155
- yield self.get_message(response)
156
- def for_non_stream():
157
- return self.get_message(
158
- self.ask(
159
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
160
- )
185
+ def for_stream_chat():
186
+ # ask() yields dicts or strings when streaming
187
+ gen = self.ask(
188
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
189
+ optimizer=optimizer, conversationally=conversationally
161
190
  )
162
- return for_stream() if stream else for_non_stream()
191
+ for response_dict in gen:
192
+ yield self.get_message(response_dict) # get_message expects dict
193
+
194
+ def for_non_stream_chat():
195
+ # ask() returns dict or str when not streaming
196
+ response_data = self.ask(
197
+ prompt, stream=False, raw=False, # Ensure ask returns dict
198
+ optimizer=optimizer, conversationally=conversationally
199
+ )
200
+ return self.get_message(response_data) # get_message expects dict
201
+
202
+ return for_stream_chat() if stream else for_non_stream_chat()
163
203
 
164
204
  def get_message(self, response: dict) -> str:
165
205
  assert isinstance(response, dict), "Response should be of dict data-type only"
166
206
  return response["text"]
167
207
 
168
208
  if __name__ == "__main__":
209
+ # Ensure curl_cffi is installed
169
210
  print("-" * 80)
170
211
  print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
171
212
  print("-" * 80)
172
213
  for model in SCNet.AVAILABLE_MODELS:
173
214
  try:
174
215
  test_ai = SCNet(model=model["name"], timeout=60)
175
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
216
+ # Test stream first
217
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
176
218
  response_text = ""
177
- for chunk in response:
219
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {'Streaming...':<10}", end="", flush=True)
220
+ for chunk in response_stream:
178
221
  response_text += chunk
222
+
179
223
  if response_text and len(response_text.strip()) > 0:
180
224
  status = "✓"
181
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
225
+ # Clean and truncate response
226
+ clean_text = response_text.strip()
227
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
182
228
  else:
183
- status = "✗"
184
- display_text = "Empty or invalid response"
185
- print(f"{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
229
+ status = "✗ (Stream)"
230
+ display_text = "Empty or invalid stream response"
231
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
232
+
233
+ # Optional: Add non-stream test if needed
234
+ # print(f"\r{model['modelId']:<10} {model['name']:<30} {'Non-Stream...':<10}", end="", flush=True)
235
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
236
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
237
+ # print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗ (Non-Stream)':<10} Empty non-stream response")
238
+
186
239
  except Exception as e:
187
- print(f"{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
240
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
@@ -1,11 +1,12 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from datetime import datetime
4
5
  from typing import Any, Dict, Optional, Generator, Union
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
@@ -26,7 +27,6 @@ class SearchChatAI(Provider):
26
27
  proxies: dict = {},
27
28
  history_offset: int = 10250,
28
29
  act: str = None,
29
- system_prompt: str = "You are a helpful assistant."
30
30
  ):
31
31
  """Initializes the SearchChatAI API client."""
32
32
  self.url = "https://search-chat.ai/api/chat-test-stop.php"
@@ -34,7 +34,6 @@ class SearchChatAI(Provider):
34
34
  self.is_conversation = is_conversation
35
35
  self.max_tokens_to_sample = max_tokens
36
36
  self.last_response = {}
37
- self.system_prompt = system_prompt
38
37
 
39
38
  # Initialize LitAgent for user agent generation
40
39
  self.agent = LitAgent()
@@ -58,9 +57,11 @@ class SearchChatAI(Provider):
58
57
  "User-Agent": self.fingerprint["user_agent"],
59
58
  }
60
59
 
61
- self.session = requests.Session()
60
+ # Initialize curl_cffi Session
61
+ self.session = Session()
62
+ # Update curl_cffi session headers and proxies
62
63
  self.session.headers.update(self.headers)
63
- self.session.proxies.update(proxies)
64
+ self.session.proxies = proxies # Assign proxies directly
64
65
 
65
66
  self.__available_optimizers = (
66
67
  method
@@ -99,7 +100,7 @@ class SearchChatAI(Provider):
99
100
  "User-Agent": self.fingerprint["user_agent"],
100
101
  })
101
102
 
102
- # Update session headers
103
+ # Update session headers (already done in the original code, should work with curl_cffi session)
103
104
  for header, value in self.headers.items():
104
105
  self.session.headers[header] = value
105
106
 
@@ -152,87 +153,76 @@ class SearchChatAI(Provider):
152
153
 
153
154
  def for_stream():
154
155
  try:
155
- with self.session.post(
156
+ # Use curl_cffi session post with impersonate
157
+ response = self.session.post(
156
158
  self.url,
159
+ # headers are set on the session
157
160
  json=payload,
158
161
  stream=True,
159
- timeout=self.timeout
160
- ) as response:
161
- if response.status_code != 200:
162
+ timeout=self.timeout,
163
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
164
+ )
165
+ if response.status_code != 200:
166
+ # Add identity refresh logic on 403/429
167
+ if response.status_code in [403, 429]:
168
+ self.refresh_identity()
169
+ response = self.session.post(
170
+ self.url,
171
+ json=payload,
172
+ stream=True,
173
+ timeout=self.timeout,
174
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
175
+ )
176
+ if not response.ok:
177
+ raise exceptions.FailedToGenerateResponseError(
178
+ f"Request failed after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
179
+ )
180
+ else:
162
181
  raise exceptions.FailedToGenerateResponseError(
163
- f"Request failed with status code {response.status_code}"
182
+ f"Request failed with status code {response.status_code} - {response.text}"
164
183
  )
165
184
 
166
- streaming_text = ""
167
- for line in response.iter_lines():
168
- if line:
169
- line = line.decode('utf-8')
170
- if line.startswith('data: '):
171
- data_str = line[6:] # Remove 'data: ' prefix
172
-
173
- if data_str == '[DONE]':
174
- break
175
-
176
- try:
177
- data = json.loads(data_str)
178
- if "choices" in data and len(data["choices"]) > 0:
179
- delta = data["choices"][0].get("delta", {})
180
- if "content" in delta:
181
- content = delta["content"]
182
- streaming_text += content
183
- resp = dict(text=content)
184
- yield resp if raw else content
185
- except json.JSONDecodeError:
186
- continue
187
-
188
- self.last_response = {"text": streaming_text}
189
- self.conversation.update_chat_history(prompt, streaming_text)
190
-
191
- except requests.RequestException as e:
192
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
193
-
194
- def for_non_stream():
195
- try:
196
- response = self.session.post(
197
- self.url,
198
- json=payload,
199
- stream=True, # Keep streaming enabled
200
- timeout=self.timeout
185
+ streaming_text = ""
186
+ # Use sanitize_stream
187
+ processed_stream = sanitize_stream(
188
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
189
+ intro_value="data:",
190
+ to_json=True, # Stream sends JSON
191
+ skip_markers=["[DONE]"],
192
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
193
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
201
194
  )
202
- if response.status_code != 200:
203
- raise exceptions.FailedToGenerateResponseError(
204
- f"Request failed with status code {response.status_code}"
205
- )
206
-
207
- full_text = ""
208
- for line in response.iter_lines():
209
- if line:
210
- line = line.decode('utf-8')
211
- if line.startswith('data: '):
212
- data_str = line[6:] # Remove 'data: ' prefix
213
-
214
- if data_str == '[DONE]':
215
- break
216
-
217
- try:
218
- data = json.loads(data_str)
219
- if "choices" in data and len(data["choices"]) > 0:
220
- delta = data["choices"][0].get("delta", {})
221
- if "content" in delta:
222
- content = delta["content"]
223
- full_text += content
224
- except json.JSONDecodeError:
225
- continue
195
+
196
+ for content_chunk in processed_stream:
197
+ # content_chunk is the string extracted by the content_extractor
198
+ if content_chunk and isinstance(content_chunk, str):
199
+ streaming_text += content_chunk
200
+ yield dict(text=content_chunk) if not raw else content_chunk
226
201
 
227
- if full_text:
228
- self.last_response = {"text": full_text}
229
- self.conversation.update_chat_history(prompt, full_text)
230
- return {"text": full_text}
231
- else:
232
- raise exceptions.FailedToGenerateResponseError("No response content found")
233
-
234
- except Exception as e:
235
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
202
+ # Update history and last response after stream finishes
203
+ self.last_response = {"text": streaming_text}
204
+ self.conversation.update_chat_history(prompt, streaming_text)
205
+ except CurlError as e: # Catch CurlError
206
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
207
+ except Exception as e: # Catch other potential exceptions
208
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
209
+
210
+ def for_non_stream():
211
+ # Aggregate the stream using the updated for_stream logic
212
+ full_text = ""
213
+ # Iterate through the generator provided by for_stream
214
+ # Ensure raw=False so for_stream yields dicts
215
+ for chunk_data in for_stream():
216
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
217
+ full_text += chunk_data["text"]
218
+ # If raw=True was somehow passed, handle string chunks
219
+ elif isinstance(chunk_data, str):
220
+ full_text += chunk_data
221
+
222
+ # last_response and history are updated within for_stream
223
+ # Return the final aggregated response dict or raw string
224
+ return full_text if raw else self.last_response
225
+
236
226
 
237
227
  return for_stream() if stream else for_non_stream()
238
228
 
@@ -255,16 +245,24 @@ class SearchChatAI(Provider):
255
245
  Returns:
256
246
  Either a string response or a generator for streaming
257
247
  """
258
- def for_stream():
259
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
260
- yield self.get_message(response)
248
+ def for_stream_chat():
249
+ # ask() yields dicts or strings when streaming
250
+ gen = self.ask(
251
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
252
+ optimizer=optimizer, conversationally=conversationally
253
+ )
254
+ for response_dict in gen:
255
+ yield self.get_message(response_dict) # get_message expects dict
261
256
 
262
- def for_non_stream():
263
- return self.get_message(
264
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
257
+ def for_non_stream_chat():
258
+ # ask() returns dict or str when not streaming
259
+ response_data = self.ask(
260
+ prompt, stream=False, raw=False, # Ensure ask returns dict
261
+ optimizer=optimizer, conversationally=conversationally
265
262
  )
263
+ return self.get_message(response_data) # get_message expects dict
266
264
 
267
- return for_stream() if stream else for_non_stream()
265
+ return for_stream_chat() if stream else for_non_stream_chat()
268
266
 
269
267
  def get_message(self, response: dict) -> str:
270
268
  """Extract the message from the response."""
@@ -272,6 +270,7 @@ class SearchChatAI(Provider):
272
270
  return response["text"]
273
271
 
274
272
  if __name__ == "__main__":
273
+ # Ensure curl_cffi is installed
275
274
  print("-" * 80)
276
275
  print(f"{'Status':<10} {'Response'}")
277
276
  print("-" * 80)
@@ -290,4 +289,4 @@ if __name__ == "__main__":
290
289
  display_text = "Empty or invalid response"
291
290
  print(f"{status:<10} {display_text}")
292
291
  except Exception as e:
293
- print(f"{'✗':<10} {str(e)}")
292
+ print(f"{'✗':<10} {str(e)}")