webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,11 +1,11 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Generator
4
- import requests.exceptions
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
@@ -35,7 +35,7 @@ class TypeGPT(Provider):
35
35
  proxies: dict = {},
36
36
  history_offset: int = 10250,
37
37
  act: str = None,
38
- model: str = "gpt-4o",
38
+ model: str = "gpt-4o-mini-2024-07-18",
39
39
  system_prompt: str = "You are a helpful assistant.",
40
40
  temperature: float = 0.5,
41
41
  presence_penalty: int = 0,
@@ -46,7 +46,8 @@ class TypeGPT(Provider):
46
46
  if model not in self.AVAILABLE_MODELS:
47
47
  raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
48
48
 
49
- self.session = requests.Session()
49
+ # Initialize curl_cffi Session
50
+ self.session = Session()
50
51
  self.is_conversation = is_conversation
51
52
  self.max_tokens_to_sample = max_tokens
52
53
  self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
@@ -82,6 +83,8 @@ class TypeGPT(Provider):
82
83
  )
83
84
  self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
84
85
  self.conversation.history_offset = history_offset
86
+ # Update curl_cffi session headers and proxies
87
+ self.session.headers.update(self.headers)
85
88
  self.session.proxies = proxies
86
89
 
87
90
  def ask(
@@ -120,57 +123,87 @@ class TypeGPT(Provider):
120
123
 
121
124
  def for_stream():
122
125
  try:
126
+ # Use curl_cffi session post with impersonate
123
127
  response = self.session.post(
124
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
128
+ self.api_endpoint,
129
+ headers=self.headers,
130
+ json=payload,
131
+ stream=True,
132
+ timeout=self.timeout,
133
+ impersonate="chrome120"
125
134
  )
126
- except requests.exceptions.ConnectionError as ce:
135
+ except CurlError as ce:
127
136
  raise exceptions.FailedToGenerateResponseError(
128
- f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
137
+ f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
129
138
  ) from ce
130
139
 
131
- if not response.ok:
132
- raise exceptions.FailedToGenerateResponseError(
133
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
134
- )
135
- message_load = ""
136
- for line in response.iter_lines():
137
- if line:
138
- line = line.decode("utf-8")
139
- if line.startswith("data: "):
140
- line = line[6:] # Remove "data: " prefix
141
- # Skip [DONE] message
142
- if line.strip() == "[DONE]":
143
- break
144
- try:
145
- data = json.loads(line)
146
- # Extract and yield only new content
147
- if 'choices' in data and len(data['choices']) > 0:
148
- delta = data['choices'][0].get('delta', {})
149
- if 'content' in delta:
150
- new_content = delta['content']
151
- message_load += new_content
152
- # Yield only the new content
153
- yield dict(text=new_content) if not raw else new_content
154
- self.last_response = dict(text=message_load)
155
- except json.JSONDecodeError:
156
- continue
157
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
140
+ response.raise_for_status() # Check for HTTP errors first
141
+
142
+ streaming_text = ""
143
+ # Use sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
146
+ intro_value="data:",
147
+ to_json=True, # Stream sends JSON
148
+ skip_markers=["[DONE]"],
149
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
150
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
151
+ )
152
+
153
+ for content_chunk in processed_stream:
154
+ # content_chunk is the string extracted by the content_extractor
155
+ if content_chunk and isinstance(content_chunk, str):
156
+ streaming_text += content_chunk
157
+ yield dict(text=content_chunk) if not raw else content_chunk
158
+ # Update last_response incrementally
159
+ self.last_response = dict(text=streaming_text)
160
+
161
+ # Update conversation history after stream finishes
162
+ if streaming_text: # Only update if something was received
163
+ self.conversation.update_chat_history(prompt, streaming_text)
164
+
158
165
 
159
166
  def for_non_stream():
160
167
  try:
161
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
162
- except requests.exceptions.ConnectionError as ce:
168
+ # Use curl_cffi session post with impersonate
169
+ response = self.session.post(
170
+ self.api_endpoint,
171
+ headers=self.headers,
172
+ json=payload,
173
+ timeout=self.timeout,
174
+ impersonate="chrome120"
175
+ )
176
+ except CurlError as ce:
163
177
  raise exceptions.FailedToGenerateResponseError(
164
- f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
178
+ f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
165
179
  ) from ce
166
180
 
167
- if not response.ok:
168
- raise exceptions.FailedToGenerateResponseError(
169
- f"Request failed - {response.status_code}: {response.text}"
181
+ response.raise_for_status() # Check for HTTP errors
182
+
183
+ try:
184
+ response_text = response.text # Get raw text
185
+
186
+ # Use sanitize_stream for non-streaming JSON response
187
+ processed_stream = sanitize_stream(
188
+ data=response_text,
189
+ to_json=True, # Parse the whole text as JSON
190
+ intro_value=None,
191
+ # Extractor for non-stream structure
192
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
193
+ yield_raw_on_error=False
170
194
  )
171
- self.last_response = response.json()
172
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
173
- return self.last_response
195
+
196
+ # Extract the single result
197
+ content = ""
198
+ for extracted_content in processed_stream:
199
+ content = extracted_content if isinstance(extracted_content, str) else ""
200
+
201
+ self.last_response = {"text": content} # Store in expected format
202
+ self.conversation.update_chat_history(prompt, content)
203
+ return self.last_response
204
+ except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
205
+ raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
206
+
174
207
 
175
208
  return for_stream() if stream else for_non_stream()
176
209
 
@@ -183,23 +216,36 @@ class TypeGPT(Provider):
183
216
  ) -> Union[str, Generator[str, None, None]]:
184
217
  """Generate response string or stream."""
185
218
  if stream:
219
+ # ask() yields dicts or strings when streaming
186
220
  gen = self.ask(
187
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
221
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
222
+ optimizer=optimizer, conversationally=conversationally
188
223
  )
189
- for chunk in gen:
190
- yield self.get_message(chunk) # Extract text from streamed chunks
224
+ for chunk_dict in gen:
225
+ # get_message expects a dict
226
+ yield self.get_message(chunk_dict)
191
227
  else:
192
- return self.get_message(self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally))
228
+ # ask() returns a dict when not streaming
229
+ response_dict = self.ask(
230
+ prompt, stream=False,
231
+ optimizer=optimizer, conversationally=conversationally
232
+ )
233
+ return self.get_message(response_dict)
193
234
 
194
235
  def get_message(self, response: Dict[str, Any]) -> str:
195
236
  """Retrieves message from response."""
196
- if isinstance(response, str): # Handle raw responses
197
- return response
198
- elif isinstance(response, dict):
237
+ if isinstance(response, dict):
199
238
  assert isinstance(response, dict), "Response should be of dict data-type only"
200
- return response.get("text", "") # Extract text from dictionary response
239
+ # Handle potential unicode escapes in the final text
240
+ text = response.get("text", "")
241
+ try:
242
+ # Attempt to decode escapes, return original if fails
243
+ return text.encode('utf-8').decode('unicode_escape')
244
+ except UnicodeDecodeError:
245
+ return text
201
246
  else:
202
- raise TypeError("Invalid response type. Expected str or dict.")
247
+ # This case should ideally not be reached if ask() behaves as expected
248
+ raise TypeError(f"Invalid response type: {type(response)}. Expected dict.")
203
249
 
204
250
  if __name__ == "__main__":
205
251
  print("-" * 80)
@@ -213,20 +259,31 @@ if __name__ == "__main__":
213
259
  for model in TypeGPT.AVAILABLE_MODELS:
214
260
  try:
215
261
  test_ai = TypeGPT(model=model, timeout=60)
216
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
262
+ # Test stream first
263
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
217
264
  response_text = ""
218
- for chunk in response:
265
+ print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
266
+ for chunk in response_stream:
219
267
  response_text += chunk
220
- print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
268
+ # Optional: print chunks as they arrive for visual feedback
269
+ # print(chunk, end="", flush=True)
221
270
 
222
271
  if response_text and len(response_text.strip()) > 0:
223
272
  status = "✓"
224
- # Truncate response if too long
225
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
273
+ # Clean and truncate response
274
+ clean_text = response_text.strip() # Already decoded in get_message
275
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
226
276
  else:
227
- status = "✗"
228
- display_text = "Empty or invalid response"
277
+ status = "✗ (Stream)"
278
+ display_text = "Empty or invalid stream response"
229
279
  print(f"\r{model:<50} {status:<10} {display_text}")
280
+
281
+ # Optional: Add non-stream test if needed, but stream test covers basic functionality
282
+ # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
283
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
284
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
285
+ # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
286
+
287
+
230
288
  except Exception as e:
231
289
  print(f"\r{model:<50} {'✗':<10} {str(e)}")
232
-
@@ -1,10 +1,11 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import uuid
4
5
  import re
5
6
  from typing import Any, Dict, Optional, Generator, Union
6
7
  from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
9
  from webscout.AIutel import AwesomePrompts
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
@@ -76,7 +77,9 @@ class UncovrAI(Provider):
76
77
  "Sec-Fetch-Site": "same-origin"
77
78
  }
78
79
 
79
- self.session = requests.Session()
80
+ # Initialize curl_cffi Session
81
+ self.session = Session()
82
+ # Update curl_cffi session headers and proxies
80
83
  self.session.headers.update(self.headers)
81
84
  self.session.proxies.update(proxies)
82
85
 
@@ -106,6 +109,17 @@ class UncovrAI(Provider):
106
109
  )
107
110
  self.conversation.history_offset = history_offset
108
111
 
112
+ @staticmethod
113
+ def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts content from the UncovrAI stream format '0:"..."'."""
115
+ if isinstance(chunk, str):
116
+ match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
117
+ if match:
118
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
119
+ content = match.group(1).encode().decode('unicode_escape')
120
+ return content.replace('\\\\', '\\').replace('\\"', '"')
121
+ return None
122
+
109
123
  def refresh_identity(self, browser: str = None):
110
124
  """
111
125
  Refreshes the browser identity fingerprint.
@@ -169,87 +183,118 @@ class UncovrAI(Provider):
169
183
 
170
184
  def for_stream():
171
185
  try:
172
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
173
- if response.status_code != 200:
174
- # If we get a non-200 response, try refreshing our identity once
175
- if response.status_code in [403, 429]:
176
- self.refresh_identity()
177
- # Retry with new identity
178
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
179
- if not retry_response.ok:
180
- raise exceptions.FailedToGenerateResponseError(
181
- f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
182
- )
183
- response = retry_response
184
- else:
186
+ # Use curl_cffi session post with impersonate
187
+ response = self.session.post(
188
+ self.url,
189
+ json=payload,
190
+ stream=True,
191
+ timeout=self.timeout,
192
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
193
+ )
194
+
195
+ if response.status_code != 200:
196
+ # If we get a non-200 response, try refreshing our identity once
197
+ if response.status_code in [403, 429]:
198
+ self.refresh_identity()
199
+ # Retry with new identity using curl_cffi session
200
+ retry_response = self.session.post(
201
+ self.url,
202
+ json=payload,
203
+ stream=True,
204
+ timeout=self.timeout,
205
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
206
+ )
207
+ if not retry_response.ok:
185
208
  raise exceptions.FailedToGenerateResponseError(
186
- f"Request failed with status code {response.status_code}"
209
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
187
210
  )
211
+ response = retry_response # Use the successful retry response
212
+ else:
213
+ raise exceptions.FailedToGenerateResponseError(
214
+ f"Request failed with status code {response.status_code} - {response.text}"
215
+ )
216
+
217
+ streaming_text = ""
218
+ # Use sanitize_stream with the custom extractor
219
+ processed_stream = sanitize_stream(
220
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
221
+ intro_value=None, # No simple prefix
222
+ to_json=False, # Content is not JSON
223
+ content_extractor=self._uncovr_extractor, # Use the specific extractor
224
+ yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
225
+ )
226
+
227
+ for content_chunk in processed_stream:
228
+ if content_chunk and isinstance(content_chunk, str):
229
+ streaming_text += content_chunk
230
+ yield dict(text=content_chunk) if not raw else content_chunk
231
+
232
+ self.last_response = {"text": streaming_text}
233
+ self.conversation.update_chat_history(prompt, streaming_text)
188
234
 
189
- streaming_text = ""
190
- for line in response.iter_lines():
191
- if line:
192
- try:
193
- line = line.decode('utf-8')
194
- # Use regex to match content messages
195
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
196
- if content_match: # Content message
197
- content = content_match.group(1)
198
- streaming_text += content
199
- resp = dict(text=content)
200
- yield resp if raw else resp
201
- # Check for error messages
202
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
203
- if error_match:
204
- error_msg = error_match.group(1)
205
- raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
206
- except (json.JSONDecodeError, UnicodeDecodeError):
207
- continue
208
-
209
- self.last_response = {"text": streaming_text}
210
- self.conversation.update_chat_history(prompt, streaming_text)
211
-
212
- except requests.RequestException as e:
213
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
235
+ except CurlError as e: # Catch CurlError
236
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
+ except Exception as e: # Catch other potential exceptions
238
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
+
214
240
 
215
241
  def for_non_stream():
216
242
  try:
217
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
243
+ # Use curl_cffi session post with impersonate
244
+ response = self.session.post(
245
+ self.url,
246
+ json=payload,
247
+ timeout=self.timeout,
248
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
249
+ )
250
+
218
251
  if response.status_code != 200:
219
252
  if response.status_code in [403, 429]:
220
253
  self.refresh_identity()
221
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
254
+ # Retry with new identity using curl_cffi session
255
+ response = self.session.post(
256
+ self.url,
257
+ json=payload,
258
+ timeout=self.timeout,
259
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
260
+ )
222
261
  if not response.ok:
223
262
  raise exceptions.FailedToGenerateResponseError(
224
263
  f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
225
264
  )
226
265
  else:
227
266
  raise exceptions.FailedToGenerateResponseError(
228
- f"Request failed with status code {response.status_code}"
267
+ f"Request failed with status code {response.status_code} - {response.text}"
229
268
  )
230
269
 
270
+ response_text = response.text # Get the full response text
271
+
272
+ # Use sanitize_stream to process the non-streaming text
273
+ # It won't parse as JSON, but will apply the extractor line by line
274
+ processed_stream = sanitize_stream(
275
+ data=response_text.splitlines(), # Split into lines first
276
+ intro_value=None,
277
+ to_json=False,
278
+ content_extractor=self._uncovr_extractor,
279
+ yield_raw_on_error=True
280
+ )
281
+
282
+ # Aggregate the results from the generator
231
283
  full_response = ""
232
- for line in response.iter_lines():
233
- if line:
234
- try:
235
- line = line.decode('utf-8')
236
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
237
- if content_match:
238
- content = content_match.group(1)
239
- full_response += content
240
- # Check for error messages
241
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
242
- if error_match:
243
- error_msg = error_match.group(1)
244
- raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
245
- except (json.JSONDecodeError, UnicodeDecodeError):
246
- continue
284
+ for content in processed_stream:
285
+ if content and isinstance(content, str):
286
+ full_response += content
247
287
 
288
+ # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
248
289
  self.last_response = {"text": full_response}
249
290
  self.conversation.update_chat_history(prompt, full_response)
250
291
  return {"text": full_response}
251
- except Exception as e:
252
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
292
+
293
+ except CurlError as e: # Catch CurlError
294
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
+ except Exception as e: # Catch other potential exceptions
296
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
+
253
298
 
254
299
  return for_stream() if stream else for_non_stream()
255
300
 
@@ -283,9 +328,12 @@ class UncovrAI(Provider):
283
328
 
284
329
  def get_message(self, response: dict) -> str:
285
330
  assert isinstance(response, dict), "Response should be of dict data-type only"
286
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
331
+ # Formatting handled by extractor
332
+ text = response.get("text", "")
333
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
287
334
 
288
335
  if __name__ == "__main__":
336
+ # Ensure curl_cffi is installed
289
337
  print("-" * 80)
290
338
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
291
339
  print("-" * 80)
@@ -293,20 +341,28 @@ if __name__ == "__main__":
293
341
  for model in UncovrAI.AVAILABLE_MODELS:
294
342
  try:
295
343
  test_ai = UncovrAI(model=model, timeout=60)
296
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
297
- response_text = ""
298
- for chunk in response:
299
- response_text += chunk
344
+ # Test non-stream first as stream logic depends on it
345
+ response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
300
346
 
301
- if response_text and len(response_text.strip()) > 0:
302
- status = "✓"
303
- # Clean and truncate response
304
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
305
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
347
+ if response_non_stream and len(response_non_stream.strip()) > 0:
348
+ # Now test stream
349
+ response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
+ response_text = ""
351
+ for chunk in response_stream:
352
+ response_text += chunk
353
+
354
+ if response_text and len(response_text.strip()) > 0:
355
+ status = "✓"
356
+ # Clean and truncate response
357
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
+ else:
360
+ status = "✗ (Stream)"
361
+ display_text = "Empty or invalid stream response"
306
362
  else:
307
- status = "✗"
308
- display_text = "Empty or invalid response"
363
+ status = "✗ (Non-Stream)"
364
+ display_text = "Empty or invalid non-stream response"
365
+
309
366
  print(f"\r{model:<50} {status:<10} {display_text}")
310
367
  except Exception as e:
311
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
312
-
368
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")