webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import uuid
4
5
  import re
@@ -76,7 +77,9 @@ class UncovrAI(Provider):
76
77
  "Sec-Fetch-Site": "same-origin"
77
78
  }
78
79
 
79
- self.session = requests.Session()
80
+ # Initialize curl_cffi Session
81
+ self.session = Session()
82
+ # Update curl_cffi session headers and proxies
80
83
  self.session.headers.update(self.headers)
81
84
  self.session.proxies.update(proxies)
82
85
 
@@ -169,87 +172,124 @@ class UncovrAI(Provider):
169
172
 
170
173
  def for_stream():
171
174
  try:
172
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
173
- if response.status_code != 200:
174
- # If we get a non-200 response, try refreshing our identity once
175
- if response.status_code in [403, 429]:
176
- self.refresh_identity()
177
- # Retry with new identity
178
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
179
- if not retry_response.ok:
180
- raise exceptions.FailedToGenerateResponseError(
181
- f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
182
- )
183
- response = retry_response
184
- else:
175
+ # Use curl_cffi session post with impersonate
176
+ response = self.session.post(
177
+ self.url,
178
+ json=payload,
179
+ stream=True,
180
+ timeout=self.timeout,
181
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
182
+ )
183
+
184
+ if response.status_code != 200:
185
+ # If we get a non-200 response, try refreshing our identity once
186
+ if response.status_code in [403, 429]:
187
+ self.refresh_identity()
188
+ # Retry with new identity using curl_cffi session
189
+ retry_response = self.session.post(
190
+ self.url,
191
+ json=payload,
192
+ stream=True,
193
+ timeout=self.timeout,
194
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
195
+ )
196
+ if not retry_response.ok:
185
197
  raise exceptions.FailedToGenerateResponseError(
186
- f"Request failed with status code {response.status_code}"
198
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
187
199
  )
200
+ response = retry_response # Use the successful retry response
201
+ else:
202
+ raise exceptions.FailedToGenerateResponseError(
203
+ f"Request failed with status code {response.status_code} - {response.text}"
204
+ )
205
+
206
+ streaming_text = ""
207
+ # Iterate over bytes and decode manually
208
+ for line_bytes in response.iter_lines():
209
+ if line_bytes:
210
+ try:
211
+ line = line_bytes.decode('utf-8')
212
+ # Use regex to match content messages
213
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
214
+ if content_match: # Content message
215
+ content = content_match.group(1).encode().decode('unicode_escape') # Decode escapes
216
+ streaming_text += content
217
+ resp = dict(text=content)
218
+ yield resp if raw else resp
219
+ # Check for error messages
220
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
221
+ if error_match:
222
+ error_msg = error_match.group(1)
223
+ raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
224
+ except (json.JSONDecodeError, UnicodeDecodeError):
225
+ continue
226
+
227
+ self.last_response = {"text": streaming_text}
228
+ self.conversation.update_chat_history(prompt, streaming_text)
188
229
 
189
- streaming_text = ""
190
- for line in response.iter_lines():
191
- if line:
192
- try:
193
- line = line.decode('utf-8')
194
- # Use regex to match content messages
195
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
196
- if content_match: # Content message
197
- content = content_match.group(1)
198
- streaming_text += content
199
- resp = dict(text=content)
200
- yield resp if raw else resp
201
- # Check for error messages
202
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
203
- if error_match:
204
- error_msg = error_match.group(1)
205
- raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
206
- except (json.JSONDecodeError, UnicodeDecodeError):
207
- continue
208
-
209
- self.last_response = {"text": streaming_text}
210
- self.conversation.update_chat_history(prompt, streaming_text)
211
-
212
- except requests.RequestException as e:
213
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
230
+ except CurlError as e: # Catch CurlError
231
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
232
+ except Exception as e: # Catch other potential exceptions
233
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
234
+
214
235
 
215
236
  def for_non_stream():
216
237
  try:
217
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
238
+ # Use curl_cffi session post with impersonate
239
+ response = self.session.post(
240
+ self.url,
241
+ json=payload,
242
+ timeout=self.timeout,
243
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
244
+ )
245
+
218
246
  if response.status_code != 200:
219
247
  if response.status_code in [403, 429]:
220
248
  self.refresh_identity()
221
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
249
+ # Retry with new identity using curl_cffi session
250
+ response = self.session.post(
251
+ self.url,
252
+ json=payload,
253
+ timeout=self.timeout,
254
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
255
+ )
222
256
  if not response.ok:
223
257
  raise exceptions.FailedToGenerateResponseError(
224
258
  f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
225
259
  )
226
260
  else:
227
261
  raise exceptions.FailedToGenerateResponseError(
228
- f"Request failed with status code {response.status_code}"
262
+ f"Request failed with status code {response.status_code} - {response.text}"
229
263
  )
230
264
 
265
+ # Process the non-streamed response content (assuming it's similar line format)
231
266
  full_response = ""
232
- for line in response.iter_lines():
267
+ # Use response.text which should contain the full body for non-streamed curl_cffi requests
268
+ for line in response.text.splitlines():
233
269
  if line:
234
270
  try:
235
- line = line.decode('utf-8')
271
+ # line is already decoded string
236
272
  content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
237
273
  if content_match:
238
- content = content_match.group(1)
274
+ content = content_match.group(1).encode().decode('unicode_escape') # Decode escapes
239
275
  full_response += content
240
276
  # Check for error messages
241
277
  error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
242
278
  if error_match:
243
279
  error_msg = error_match.group(1)
244
280
  raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
245
- except (json.JSONDecodeError, UnicodeDecodeError):
281
+ except (json.JSONDecodeError): # UnicodeDecodeError less likely here
246
282
  continue
247
283
 
248
284
  self.last_response = {"text": full_response}
249
285
  self.conversation.update_chat_history(prompt, full_response)
250
286
  return {"text": full_response}
251
- except Exception as e:
252
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
287
+
288
+ except CurlError as e: # Catch CurlError
289
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
290
+ except Exception as e: # Catch other potential exceptions
291
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
292
+
253
293
 
254
294
  return for_stream() if stream else for_non_stream()
255
295
 
@@ -286,6 +326,7 @@ class UncovrAI(Provider):
286
326
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
287
327
 
288
328
  if __name__ == "__main__":
329
+ # Ensure curl_cffi is installed
289
330
  print("-" * 80)
290
331
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
291
332
  print("-" * 80)
@@ -293,20 +334,29 @@ if __name__ == "__main__":
293
334
  for model in UncovrAI.AVAILABLE_MODELS:
294
335
  try:
295
336
  test_ai = UncovrAI(model=model, timeout=60)
296
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
297
- response_text = ""
298
- for chunk in response:
299
- response_text += chunk
337
+ # Test non-stream first as stream logic depends on it
338
+ response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
300
339
 
301
- if response_text and len(response_text.strip()) > 0:
302
- status = "✓"
303
- # Clean and truncate response
304
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
305
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
340
+ if response_non_stream and len(response_non_stream.strip()) > 0:
341
+ # Now test stream
342
+ response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
343
+ response_text = ""
344
+ for chunk in response_stream:
345
+ response_text += chunk
346
+
347
+ if response_text and len(response_text.strip()) > 0:
348
+ status = "✓"
349
+ # Clean and truncate response
350
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
351
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
352
+ else:
353
+ status = "✗ (Stream)"
354
+ display_text = "Empty or invalid stream response"
306
355
  else:
307
- status = "✗"
308
- display_text = "Empty or invalid response"
356
+ status = "✗ (Non-Stream)"
357
+ display_text = "Empty or invalid non-stream response"
358
+
309
359
  print(f"\r{model:<50} {status:<10} {display_text}")
310
360
  except Exception as e:
311
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
361
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
312
362
 
@@ -1,14 +1,17 @@
1
- from typing import Union, Any, Dict
1
+ from typing import Optional, Union, Any, Dict
2
2
  from uuid import uuid4
3
- import requests
3
+ from curl_cffi import CurlError
4
+ from curl_cffi.requests import Session
4
5
  import re
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
13
+ # Import HTTPVersion enum
14
+ from curl_cffi.const import CurlHttpVersion
12
15
 
13
16
  class X0GPT(Provider):
14
17
  """
@@ -60,7 +63,8 @@ class X0GPT(Provider):
60
63
  >>> print(ai.system_prompt)
61
64
  'You are a friendly assistant.'
62
65
  """
63
- self.session = requests.Session()
66
+ # Initialize curl_cffi Session instead of requests.Session
67
+ self.session = Session()
64
68
  self.is_conversation = is_conversation
65
69
  self.max_tokens_to_sample = max_tokens
66
70
  self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
@@ -77,18 +81,18 @@ class X0GPT(Provider):
77
81
  "path": "/api/stream/reply",
78
82
  "scheme": "https",
79
83
  "accept": "*/*",
80
- "accept-encoding": "gzip, deflate, br, zstd",
84
+ "accept-encoding": "gzip, deflate, br, zstd", # Keep zstd for now
81
85
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
82
- "content-length": "114",
86
+ # "content-length": "114", # Let curl_cffi handle content-length
83
87
  "content-type": "application/json",
84
88
  "dnt": "1",
85
89
  "origin": "https://x0-gpt.devwtf.in",
86
- "priority": "u=1, i",
90
+ # "priority": "u=1, i", # Remove priority header
87
91
  "referer": "https://x0-gpt.devwtf.in/chat",
88
92
  "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
89
93
  "sec-ch-ua-mobile": "?0",
90
94
  "sec-ch-ua-platform": '"Windows"',
91
- "user-agent": self.agent.random() # Use LitAgent to generate a random user agent
95
+ "user-agent": self.agent.random()
92
96
  }
93
97
 
94
98
  self.__available_optimizers = (
@@ -96,7 +100,10 @@ class X0GPT(Provider):
96
100
  for method in dir(Optimizers)
97
101
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
98
102
  )
103
+ # Update curl_cffi session headers and proxies
99
104
  self.session.headers.update(self.headers)
105
+ self.session.proxies = proxies
106
+
100
107
  Conversation.intro = (
101
108
  AwesomePrompts().get_act(
102
109
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -108,7 +115,17 @@ class X0GPT(Provider):
108
115
  is_conversation, self.max_tokens_to_sample, filepath, update_file
109
116
  )
110
117
  self.conversation.history_offset = history_offset
111
- self.session.proxies = proxies
118
+
119
+ @staticmethod
120
+ def _x0gpt_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
121
+ """Extracts content from the x0gpt stream format '0:"..."'."""
122
+ if isinstance(chunk, str):
123
+ match = re.search(r'0:"(.*?)"', chunk)
124
+ if match:
125
+ # Decode potential unicode escapes like \u00e9
126
+ content = match.group(1).encode().decode('unicode_escape')
127
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
128
+ return None
112
129
 
113
130
  def ask(
114
131
  self,
@@ -158,25 +175,48 @@ class X0GPT(Provider):
158
175
  }
159
176
 
160
177
  def for_stream():
161
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
162
- if not response.ok:
163
- raise exceptions.FailedToGenerateResponseError(
164
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
178
+ try:
179
+ # Use curl_cffi session post with updated impersonate and http_version
180
+ response = self.session.post(
181
+ self.api_endpoint,
182
+ headers=self.headers,
183
+ json=payload,
184
+ stream=True,
185
+ timeout=self.timeout,
186
+ impersonate="chrome120", # Try a different impersonation profile
187
+ http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
165
188
  )
166
- streaming_response = ""
167
- for line in response.iter_lines(decode_unicode=True):
168
- if line:
169
- match = re.search(r'0:"(.*?)"', line)
170
- if match:
171
- content = match.group(1)
172
- streaming_response += content
173
- yield content if raw else dict(text=content)
174
- self.last_response.update(dict(text=streaming_response))
175
- self.conversation.update_chat_history(
176
- prompt, self.get_message(self.last_response)
177
- )
189
+ if not response.ok:
190
+ raise exceptions.FailedToGenerateResponseError(
191
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
192
+ )
193
+
194
+ streaming_response = ""
195
+ # Use sanitize_stream with the custom extractor
196
+ processed_stream = sanitize_stream(
197
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
198
+ intro_value=None, # No simple prefix to remove here
199
+ to_json=False, # Content is not JSON
200
+ content_extractor=self._x0gpt_extractor # Use the specific extractor
201
+ )
202
+
203
+ for content_chunk in processed_stream:
204
+ if content_chunk and isinstance(content_chunk, str):
205
+ streaming_response += content_chunk
206
+ yield content_chunk if raw else dict(text=content_chunk)
207
+
208
+ self.last_response.update(dict(text=streaming_response))
209
+ self.conversation.update_chat_history(
210
+ prompt, self.get_message(self.last_response)
211
+ )
212
+ except CurlError as e: # Catch CurlError
213
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
214
+ except Exception as e: # Catch other potential exceptions
215
+ # Include the original exception type in the message for clarity
216
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
178
217
 
179
218
  def for_non_stream():
219
+ # This function implicitly uses the updated for_stream
180
220
  for _ in for_stream():
181
221
  pass
182
222
  return self.last_response
@@ -245,7 +285,10 @@ class X0GPT(Provider):
245
285
  'Why did the scarecrow win an award? Because he was outstanding in his field!'
246
286
  """
247
287
  assert isinstance(response, dict), "Response should be of dict data-type only"
248
- formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
288
+ # Ensure text exists before processing
289
+ text = response.get("text", "")
290
+ # Formatting is now mostly handled by the extractor, just return
291
+ formatted_text = text
249
292
  return formatted_text
250
293
 
251
294
  if __name__ == "__main__":
webscout/Provider/yep.py CHANGED
@@ -1,11 +1,12 @@
1
1
  import uuid
2
- import cloudscraper
3
2
  import json
3
+ from curl_cffi import CurlError
4
+ from curl_cffi.requests import Session
4
5
 
5
6
  from typing import Any, Dict, Optional, Generator, Union, List, TypeVar
6
7
 
7
8
  from webscout.AIutel import Optimizers
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
@@ -62,7 +63,8 @@ class YEPCHAT(Provider):
62
63
  f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
63
64
  )
64
65
 
65
- self.session = cloudscraper.create_scraper()
66
+ # Initialize curl_cffi Session instead of cloudscraper
67
+ self.session = Session()
66
68
  self.is_conversation = is_conversation
67
69
  self.max_tokens_to_sample = max_tokens
68
70
  self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
@@ -111,11 +113,10 @@ class YEPCHAT(Provider):
111
113
  is_conversation, self.max_tokens_to_sample, filepath, update_file, tools=tools
112
114
  )
113
115
  self.conversation.history_offset = history_offset
116
+ # Set consistent headers and proxies for the curl_cffi session
117
+ self.session.headers.update(self.headers)
114
118
  self.session.proxies = proxies
115
-
116
- # Set consistent headers for the scraper session
117
- for header, value in self.headers.items():
118
- self.session.headers[header] = value
119
+ # Note: curl_cffi handles cookies differently, passed directly in requests
119
120
 
120
121
  def refresh_identity(self, browser: str = None):
121
122
  """
@@ -137,10 +138,9 @@ class YEPCHAT(Provider):
137
138
  })
138
139
 
139
140
  # Update session headers
140
- for header, value in self.headers.items():
141
- self.session.headers[header] = value
141
+ self.session.headers.update(self.headers)
142
142
 
143
- # Generate new cookies
143
+ # Generate new cookies (will be passed in requests)
144
144
  self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
145
145
 
146
146
  return self.fingerprint
@@ -192,73 +192,82 @@ class YEPCHAT(Provider):
192
192
 
193
193
  def for_stream():
194
194
  try:
195
- with self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout) as response:
196
- if not response.ok:
197
- # If we get a non-200 response, try refreshing our identity once
198
- if response.status_code in [403, 429]:
199
- self.refresh_identity()
200
- # Retry with new identity
201
- with self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout) as retry_response:
202
- if not retry_response.ok:
203
- raise exceptions.FailedToGenerateResponseError(
204
- f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
205
- )
206
- response = retry_response
207
- else:
195
+ # buffer = b"" # No longer needed here
196
+ # Use curl_cffi session post, pass cookies explicitly
197
+ response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
198
+
199
+ if not response.ok:
200
+ # If we get a non-200 response, try refreshing our identity once
201
+ if response.status_code in [403, 429]:
202
+ self.refresh_identity()
203
+ # Retry with new identity
204
+ # Use curl_cffi session post, pass cookies explicitly
205
+ retry_response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
206
+ if not retry_response.ok:
208
207
  raise exceptions.FailedToGenerateResponseError(
209
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
208
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
209
  )
210
+ response = retry_response # Use the successful retry response
211
+ else:
212
+ raise exceptions.FailedToGenerateResponseError(
213
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
214
+ )
211
215
 
212
- streaming_text = ""
213
- for line in response.iter_lines(decode_unicode=True):
214
- if line:
215
- line = line.strip()
216
- if line.startswith("data: "):
217
- json_str = line[6:]
218
- if json_str == "[DONE]":
219
- break
220
- try:
221
- json_data = json.loads(json_str)
222
- if 'choices' in json_data:
223
- choice = json_data['choices'][0]
224
- if 'delta' in choice and 'content' in choice['delta']:
225
- content = choice['delta']['content']
226
- streaming_text += content
227
-
228
- # Yield ONLY the new content:
229
- resp = dict(text=content)
230
- yield resp if raw else resp
231
- except json.JSONDecodeError:
232
- pass
233
-
234
- # Check if the response contains a tool call
235
- response_data = self.conversation.handle_tool_response(streaming_text)
236
-
237
- if response_data["is_tool_call"]:
238
- # Handle tool call results
239
- if response_data["success"]:
240
- for tool_call in response_data.get("tool_calls", []):
241
- tool_name = tool_call.get("name", "unknown_tool")
242
- result = response_data["result"]
243
- self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
244
- else:
245
- # If tool call failed, update history with error
246
- self.conversation.update_chat_history(prompt,
247
- f"Error executing tool call: {response_data['result']}")
216
+ # --- Start of stream processing block (should be outside the 'if not response.ok' block) ---
217
+ streaming_text = ""
218
+
219
+ # Use sanitize_stream to process the lines
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass the byte iterator directly
222
+ intro_value="data:",
223
+ to_json=True, # Yep sends JSON after 'data:'
224
+ skip_markers=["[DONE]"], # Skip the final marker
225
+ yield_raw_on_error=False, # Only process valid JSON data
226
+ # --- Add the content extractor ---
227
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None
228
+ )
229
+ # The loop now yields the final extracted string content directly
230
+ for content_chunk in processed_stream:
231
+ # --- TEMPORARY DEBUG PRINT ---
232
+ # print(f"\nDEBUG: Received extracted content: {content_chunk!r}\n", flush=True) # Keep or remove debug print as needed
233
+ if content_chunk and isinstance(content_chunk, str): # Ensure it's a non-empty string
234
+ streaming_text += content_chunk
235
+ # Yield dict or raw string chunk based on 'raw' flag
236
+ yield dict(text=content_chunk) if not raw else content_chunk
237
+ # --- End of stream processing block ---
238
+
239
+ # Check if the response contains a tool call (This should happen *after* processing the stream)
240
+ response_data = self.conversation.handle_tool_response(streaming_text)
241
+
242
+ if response_data["is_tool_call"]:
243
+ # Handle tool call results
244
+ if response_data["success"]:
245
+ for tool_call in response_data.get("tool_calls", []):
246
+ tool_name = tool_call.get("name", "unknown_tool")
247
+ result = response_data["result"]
248
+ self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
248
249
  else:
249
- # Normal response handling
250
- self.conversation.update_chat_history(prompt, streaming_text)
250
+ # If tool call failed, update history with error
251
+ self.conversation.update_chat_history(prompt,
252
+ f"Error executing tool call: {response_data['result']}")
253
+ else:
254
+ # Normal response handling
255
+ self.conversation.update_chat_history(prompt, streaming_text)
251
256
 
257
+ except CurlError as e: # Catch CurlError
258
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
252
259
  except Exception as e:
253
260
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
254
261
 
255
262
  def for_non_stream():
256
263
  try:
257
- response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout)
264
+ # Use curl_cffi session post, pass cookies explicitly
265
+ response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
258
266
  if not response.ok:
259
267
  if response.status_code in [403, 429]:
260
268
  self.refresh_identity()
261
- response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout)
269
+ # Use curl_cffi session post, pass cookies explicitly
270
+ response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
262
271
  if not response.ok:
263
272
  raise exceptions.FailedToGenerateResponseError(
264
273
  f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
@@ -268,6 +277,7 @@ class YEPCHAT(Provider):
268
277
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
269
278
  )
270
279
 
280
+ # ... existing non-stream response handling code ...
271
281
  response_data = response.json()
272
282
  if 'choices' in response_data and len(response_data['choices']) > 0:
273
283
  content = response_data['choices'][0].get('message', {}).get('content', '')
@@ -298,8 +308,10 @@ class YEPCHAT(Provider):
298
308
  return {"text": content}
299
309
  else:
300
310
  raise exceptions.FailedToGenerateResponseError("No response content found")
311
+ except CurlError as e: # Catch CurlError
312
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
301
313
  except Exception as e:
302
- raise exceptions.FailedToGenerateResponseError(f"Request failed: e")
314
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
303
315
 
304
316
  return for_stream() if stream else for_non_stream()
305
317
 
@@ -373,4 +385,5 @@ if __name__ == "__main__":
373
385
  display_text = "Empty or invalid response"
374
386
  print(f"{model:<50} {status:<10} {display_text}")
375
387
  except Exception as e:
376
- print(f"{model:<50} {'✗':<10} {str(e)}")
388
+ print(f"{model:<50} {'✗':<10} {str(e)}")
389
+