webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import os
4
5
  from uuid import uuid4
@@ -57,40 +58,37 @@ class AllenAI(Provider):
57
58
  history_offset: int = 10250,
58
59
  act: str = None,
59
60
  model: str = "OLMo-2-1124-13B-Instruct",
60
- host: str = None # Now optional - will auto-detect if not provided
61
+ host: str = None
61
62
  ):
62
63
  """Initializes the AllenAI API client."""
63
64
  if model not in self.AVAILABLE_MODELS:
64
65
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
65
66
 
66
67
  self.url = "https://playground.allenai.org"
67
- # Updated API endpoint to v3 from v4
68
68
  self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
69
69
  self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
70
70
 
71
- # Updated headers based on JS implementation
71
+ # Updated headers (remove those handled by impersonate)
72
72
  self.headers = {
73
- 'User-Agent': "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36",
74
73
  'Accept': '*/*',
75
74
  'Accept-Language': 'id-ID,id;q=0.9',
76
75
  'Origin': self.url,
77
76
  'Referer': f"{self.url}/",
78
- 'Connection': 'keep-alive',
79
77
  'Cache-Control': 'no-cache',
80
78
  'Pragma': 'no-cache',
81
79
  'Priority': 'u=1, i',
82
80
  'Sec-Fetch-Dest': 'empty',
83
81
  'Sec-Fetch-Mode': 'cors',
84
82
  'Sec-Fetch-Site': 'cross-site',
85
- 'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
86
- 'sec-ch-ua-mobile': '?1',
87
- 'sec-ch-ua-platform': '"Android"',
88
83
  'Content-Type': 'application/json'
89
84
  }
90
85
 
91
- self.session = requests.Session()
86
+ # Initialize curl_cffi Session
87
+ self.session = Session()
88
+ # Update curl_cffi session headers and proxies
92
89
  self.session.headers.update(self.headers)
93
- self.session.proxies.update(proxies)
90
+ self.session.proxies = proxies
91
+
94
92
  self.model = model
95
93
 
96
94
  # Auto-detect host if not provided
@@ -133,27 +131,30 @@ class AllenAI(Provider):
133
131
  def whoami(self):
134
132
  """Gets or creates a user ID for authentication with Allen AI API"""
135
133
  temp_id = str(uuid4())
136
- headers = self.session.headers.copy()
137
- headers.update({"x-anonymous-user-id": temp_id})
134
+ request_headers = self.session.headers.copy() # Use session headers as base
135
+ request_headers.update({"x-anonymous-user-id": temp_id})
138
136
 
139
137
  try:
138
+ # Use curl_cffi session get with impersonate
140
139
  response = self.session.get(
141
140
  self.whoami_endpoint,
142
- headers=headers,
143
- timeout=self.timeout
141
+ headers=request_headers, # Pass updated headers
142
+ timeout=self.timeout,
143
+ impersonate="chrome110" # Use a common impersonation profile
144
144
  )
145
+ response.raise_for_status() # Check for HTTP errors
145
146
 
146
- if response.status_code == 200:
147
- data = response.json()
148
- self.x_anonymous_user_id = data.get("client", temp_id)
149
- return data
150
- else:
151
- self.x_anonymous_user_id = temp_id
152
- return {"client": temp_id}
147
+ data = response.json()
148
+ self.x_anonymous_user_id = data.get("client", temp_id)
149
+ return data
153
150
 
154
- except Exception as e:
151
+ except CurlError as e: # Catch CurlError
155
152
  self.x_anonymous_user_id = temp_id
156
- return {"client": temp_id, "error": str(e)}
153
+ return {"client": temp_id, "error": f"CurlError: {e}"}
154
+ except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
155
+ self.x_anonymous_user_id = temp_id
156
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
157
+ return {"client": temp_id, "error": f"{type(e).__name__}: {e} - {err_text}"}
157
158
 
158
159
 
159
160
  def parse_stream(self, raw_data):
@@ -172,7 +173,7 @@ class AllenAI(Provider):
172
173
  def ask(
173
174
  self,
174
175
  prompt: str,
175
- stream: bool = False,
176
+ stream: bool = False, # API supports streaming
176
177
  raw: bool = False,
177
178
  optimizer: str = None,
178
179
  conversationally: bool = False,
@@ -185,20 +186,22 @@ class AllenAI(Provider):
185
186
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
186
187
  if optimizer:
187
188
  if optimizer in self.__available_optimizers:
188
- conversation_prompt = getattr(Optimizers, optimizer)(
189
- conversation_prompt if conversationally else prompt
190
- )
189
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
191
190
  else:
192
191
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
193
192
 
194
193
  # Ensure we have a user ID
195
194
  if not self.x_anonymous_user_id:
196
195
  self.whoami()
196
+ # Check if whoami failed and we still don't have an ID
197
+ if not self.x_anonymous_user_id:
198
+ raise exceptions.AuthenticationError("Failed to obtain anonymous user ID.")
197
199
 
198
- # Prepare the API request
199
- self.session.headers.update({
200
+ # Prepare the API request headers for this specific request
201
+ request_headers = self.session.headers.copy()
202
+ request_headers.update({
200
203
  "x-anonymous-user-id": self.x_anonymous_user_id,
201
- "Content-Type": "application/json"
204
+ "Content-Type": "application/json" # Ensure Content-Type is set
202
205
  })
203
206
 
204
207
  # Create options dictionary
@@ -232,122 +235,149 @@ class AllenAI(Provider):
232
235
  "host": current_host,
233
236
  "opts": opts
234
237
  }
235
-
236
- # Add parent if exists
237
- if self.parent:
238
- payload["parent"] = self.parent
238
+ payload["host"] = current_host # Ensure host is updated in payload
239
239
 
240
240
  try:
241
241
  if stream:
242
- return self._stream_request(payload, prompt, raw)
242
+ # Pass request_headers to the stream method
243
+ return self._stream_request(payload, prompt, request_headers, raw)
243
244
  else:
244
- return self._non_stream_request(payload, prompt)
245
- except exceptions.FailedToGenerateResponseError as e:
245
+ # Pass request_headers to the non-stream method
246
+ return self._non_stream_request(payload, prompt, request_headers, raw)
247
+ except (exceptions.FailedToGenerateResponseError, CurlError, Exception) as e:
246
248
  last_error = e
247
249
  # Log the error but continue to try other hosts
248
- print(f"Host '{current_host}' failed for model '{self.model}', trying next host...")
250
+ print(f"Host '{current_host}' failed for model '{self.model}' ({type(e).__name__}), trying next host...")
249
251
  continue
250
252
 
251
253
  # If we've tried all hosts and none worked, raise the last error
252
254
  raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
253
255
 
254
- def _stream_request(self, payload, prompt, raw=False):
255
- """Handle streaming requests with the given payload"""
256
+ def _stream_request(self, payload, prompt, request_headers, raw=False):
257
+ """Handle streaming requests with the given payload and headers"""
258
+ streaming_text = "" # Initialize outside try block
259
+ current_parent = None # Initialize outside try block
256
260
  try:
261
+ # Use curl_cffi session post with impersonate
257
262
  response = self.session.post(
258
263
  self.api_endpoint,
264
+ headers=request_headers, # Use headers passed to this method
259
265
  json=payload,
260
266
  stream=True,
261
- timeout=self.timeout
267
+ timeout=self.timeout,
268
+ impersonate="chrome110" # Use a common impersonation profile
262
269
  )
270
+ response.raise_for_status() # Check for HTTP errors
263
271
 
264
- if response.status_code != 200:
265
- raise exceptions.FailedToGenerateResponseError(
266
- f"Request failed with status code {response.status_code}: {response.text}"
267
- )
268
-
269
- streaming_text = ""
270
- current_parent = None
271
-
272
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
273
- if not chunk:
272
+ # Iterate over bytes and decode manually
273
+ for chunk_bytes in response.iter_content(chunk_size=None): # Process byte chunks
274
+ if not chunk_bytes:
274
275
  continue
275
276
 
276
- decoded = chunk.decode(errors="ignore")
277
- for line in decoded.splitlines():
278
- line = line.strip()
279
- if not line:
280
- continue
281
-
282
- try:
277
+ try:
278
+ decoded = chunk_bytes.decode('utf-8')
279
+ for line in decoded.splitlines(): # Process lines within the chunk
280
+ line = line.strip()
281
+ if not line:
282
+ continue
283
+
283
284
  data = json.loads(line)
284
- except json.JSONDecodeError:
285
- continue
286
-
287
- if isinstance(data, dict):
288
- # Check for message pattern from JS implementation
289
- if data.get("message", "").startswith("msg_") and "content" in data:
290
- content = data.get("content", "")
291
- if content:
292
- streaming_text += content
293
- resp = dict(text=content)
294
- yield resp if raw else resp
295
285
 
296
- # Legacy handling for older API
297
- elif "message" in data and data.get("content"):
298
- content = data.get("content")
299
- if content.strip():
286
+ # Check for message pattern from JS implementation
287
+ if isinstance(data, dict):
288
+ content_found = False
289
+ if data.get("message", "").startswith("msg_") and "content" in data:
290
+ content = data.get("content", "")
291
+ content_found = True
292
+ # Legacy handling for older API
293
+ elif "message" in data and data.get("content"):
294
+ content = data.get("content")
295
+ content_found = True
296
+
297
+ if content_found and content:
300
298
  streaming_text += content
301
299
  resp = dict(text=content)
302
- yield resp if raw else resp
303
-
304
- # Update parent ID if present
305
- if data.get("id"):
306
- current_parent = data.get("id")
307
- elif data.get("children"):
308
- for child in data["children"]:
309
- if child.get("role") == "assistant":
310
- current_parent = child.get("id")
311
- break
312
-
313
- # Handle completion
314
- if data.get("final") or data.get("finish_reason") == "stop":
315
- if current_parent:
316
- self.parent = current_parent
300
+ yield resp if not raw else content
317
301
 
318
- # Update conversation history
319
- self.conversation.update_chat_history(prompt, streaming_text)
320
- self.last_response = {"text": streaming_text}
321
- return
302
+ # Update parent ID if present
303
+ if data.get("id"):
304
+ current_parent = data.get("id")
305
+ elif data.get("children"):
306
+ for child in data["children"]:
307
+ if child.get("role") == "assistant":
308
+ current_parent = child.get("id")
309
+ break
310
+
311
+ # Handle completion
312
+ if data.get("final") or data.get("finish_reason") == "stop":
313
+ if current_parent:
314
+ self.parent = current_parent
315
+
316
+ # Update conversation history
317
+ self.conversation.update_chat_history(prompt, streaming_text)
318
+ self.last_response = {"text": streaming_text}
319
+ return # End the generator
320
+
321
+ except (json.JSONDecodeError, UnicodeDecodeError):
322
+ continue # Ignore lines/chunks that are not valid JSON or cannot be decoded
322
323
 
323
- except requests.RequestException as e:
324
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
324
+ # If loop finishes without returning (e.g., no final message), update history
325
+ if current_parent:
326
+ self.parent = current_parent
327
+ self.conversation.update_chat_history(prompt, streaming_text)
328
+ self.last_response = {"text": streaming_text}
329
+
330
+ except CurlError as e: # Catch CurlError
331
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
332
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
333
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
334
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
325
335
 
326
- def _non_stream_request(self, payload, prompt):
327
- """Handle non-streaming requests with the given payload"""
336
+
337
+ def _non_stream_request(self, payload, prompt, request_headers, raw=False):
338
+ """Handle non-streaming requests with the given payload and headers"""
328
339
  try:
329
- # For non-streaming requests, we can directly send without stream=True
340
+ # Use curl_cffi session post with impersonate
330
341
  response = self.session.post(
331
342
  self.api_endpoint,
343
+ headers=request_headers, # Use headers passed to this method
332
344
  json=payload,
333
- stream=False,
334
- timeout=self.timeout
345
+ stream=False, # Explicitly set stream to False
346
+ timeout=self.timeout,
347
+ impersonate="chrome110" # Use a common impersonation profile
335
348
  )
336
-
337
- if response.status_code != 200:
338
- raise exceptions.FailedToGenerateResponseError(
339
- f"Request failed with status code {response.status_code}: {response.text}"
340
- )
349
+ response.raise_for_status() # Check for HTTP errors
341
350
 
342
351
  # Parse the response as per JS implementation
343
352
  raw_response = response.text
344
- parsed_response = self.parse_stream(raw_response)
353
+ parsed_response = self.parse_stream(raw_response) # Use existing parser
354
+
355
+ # Update parent ID from the full response if possible (might need adjustment based on actual non-stream response structure)
356
+ # This part is speculative as the non-stream structure isn't fully clear from the stream logic
357
+ try:
358
+ lines = raw_response.splitlines()
359
+ if lines:
360
+ last_line_data = json.loads(lines[-1])
361
+ if last_line_data.get("id"):
362
+ self.parent = last_line_data.get("id")
363
+ elif last_line_data.get("children"):
364
+ for child in last_line_data["children"]:
365
+ if child.get("role") == "assistant":
366
+ self.parent = child.get("id")
367
+ break
368
+ except (json.JSONDecodeError, IndexError):
369
+ pass # Ignore errors parsing parent ID from non-stream
370
+
345
371
  self.conversation.update_chat_history(prompt, parsed_response)
346
372
  self.last_response = {"text": parsed_response}
347
- return self.last_response
373
+ return self.last_response if not raw else parsed_response # Return dict or raw string
348
374
 
349
- except Exception as e:
350
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
375
+ except CurlError as e: # Catch CurlError
376
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
377
+ except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
378
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
379
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
380
+
351
381
 
352
382
  def chat(
353
383
  self,
@@ -357,29 +387,35 @@ class AllenAI(Provider):
357
387
  conversationally: bool = False,
358
388
  host: str = None,
359
389
  options: dict = None,
360
- ) -> str:
361
- def for_stream():
362
- for response in self.ask(
390
+ ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
391
+ def for_stream_chat(): # Renamed inner function
392
+ # ask() yields dicts or strings when streaming
393
+ gen = self.ask(
363
394
  prompt,
364
- True,
395
+ stream=True,
396
+ raw=False, # Ensure ask yields dicts
365
397
  optimizer=optimizer,
366
398
  conversationally=conversationally,
367
399
  host=host,
368
400
  options=options
369
- ):
370
- yield self.get_message(response)
371
- def for_non_stream():
372
- return self.get_message(
373
- self.ask(
374
- prompt,
375
- False,
376
- optimizer=optimizer,
377
- conversationally=conversationally,
378
- host=host,
379
- options=options
380
- )
381
401
  )
382
- return for_stream() if stream else for_non_stream()
402
+ for response_dict in gen:
403
+ yield self.get_message(response_dict) # get_message expects dict
404
+
405
+ def for_non_stream_chat(): # Renamed inner function
406
+ # ask() returns dict or str when not streaming
407
+ response_data = self.ask(
408
+ prompt,
409
+ stream=False,
410
+ raw=False, # Ensure ask returns dict
411
+ optimizer=optimizer,
412
+ conversationally=conversationally,
413
+ host=host,
414
+ options=options
415
+ )
416
+ return self.get_message(response_data) # get_message expects dict
417
+
418
+ return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
383
419
 
384
420
  def get_message(self, response: dict) -> str:
385
421
  assert isinstance(response, dict), "Response should be of dict data-type only"
@@ -388,6 +424,7 @@ class AllenAI(Provider):
388
424
 
389
425
 
390
426
  if __name__ == "__main__":
427
+ # Ensure curl_cffi is installed
391
428
  print("-" * 80)
392
429
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
393
430
  print("-" * 80)