webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,419 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ def random(self) -> str:
34
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
35
+
36
+ # --- ExaAI Client ---
37
+
38
+ # ANSI escape codes for formatting
39
+ BOLD = "\033[1m"
40
+ RED = "\033[91m"
41
+ RESET = "\033[0m"
42
+
43
+ class Completions(BaseCompletions):
44
+ def __init__(self, client: 'ExaAI'):
45
+ self._client = client
46
+
47
+ def create(
48
+ self,
49
+ *,
50
+ model: str,
51
+ messages: List[Dict[str, str]],
52
+ max_tokens: Optional[int] = None,
53
+ stream: bool = False,
54
+ temperature: Optional[float] = None,
55
+ top_p: Optional[float] = None,
56
+ **kwargs: Any
57
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
58
+ """
59
+ Creates a model response for the given chat conversation.
60
+ Mimics openai.chat.completions.create
61
+ """
62
+ # Filter out system messages and warn the user if any are present
63
+ filtered_messages = []
64
+ has_system_message = False
65
+
66
+ for msg in messages:
67
+ if msg["role"] == "system":
68
+ has_system_message = True
69
+ continue # Skip system messages
70
+ filtered_messages.append(msg)
71
+
72
+ if has_system_message:
73
+ # Print warning in bold red
74
+ print(f"{BOLD}{RED}Warning: ExaAI does not support system messages, they will be ignored.{RESET}")
75
+
76
+ # If no messages left after filtering, raise an error
77
+ if not filtered_messages:
78
+ raise ValueError("At least one user message is required")
79
+
80
+ # Generate a unique ID for the conversation
81
+ conversation_id = uuid.uuid4().hex[:16]
82
+
83
+ # Prepare the payload for ExaAI API
84
+ payload = {
85
+ "id": conversation_id,
86
+ "messages": filtered_messages
87
+ }
88
+
89
+ # Add optional parameters if provided
90
+ if max_tokens is not None and max_tokens > 0:
91
+ payload["max_tokens"] = max_tokens
92
+
93
+ if temperature is not None:
94
+ payload["temperature"] = temperature
95
+
96
+ if top_p is not None:
97
+ payload["top_p"] = top_p
98
+
99
+ # Add any additional parameters
100
+ for key, value in kwargs.items():
101
+ if key not in payload:
102
+ payload[key] = value
103
+
104
+ request_id = f"chatcmpl-{uuid.uuid4()}"
105
+ created_time = int(time.time())
106
+
107
+ if stream:
108
+ return self._create_stream(request_id, created_time, model, payload)
109
+ else:
110
+ return self._create_non_stream(request_id, created_time, model, payload)
111
+
112
+ def _create_stream(
113
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
114
+ ) -> Generator[ChatCompletionChunk, None, None]:
115
+ try:
116
+ response = self._client.session.post(
117
+ self._client.api_endpoint,
118
+ headers=self._client.headers,
119
+ json=payload,
120
+ stream=True,
121
+ timeout=self._client.timeout
122
+ )
123
+
124
+ # Handle non-200 responses
125
+ if not response.ok:
126
+ raise IOError(
127
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
128
+ )
129
+
130
+ # Track token usage across chunks
131
+ prompt_tokens = 0
132
+ completion_tokens = 0
133
+ total_tokens = 0
134
+
135
+ # Estimate prompt tokens based on message length
136
+ for msg in payload.get("messages", []):
137
+ prompt_tokens += len(msg.get("content", "").split())
138
+
139
+ for line in response.iter_lines(decode_unicode=True):
140
+ if line:
141
+ match = re.search(r'0:"(.*?)"', line)
142
+ if match:
143
+ content = match.group(1)
144
+
145
+ # Format the content (replace escaped newlines)
146
+ content = self._client.format_text(content)
147
+
148
+ # Update token counts
149
+ completion_tokens += 1
150
+ total_tokens = prompt_tokens + completion_tokens
151
+
152
+ # Create the delta object
153
+ delta = ChoiceDelta(
154
+ content=content,
155
+ role="assistant",
156
+ tool_calls=None
157
+ )
158
+
159
+ # Create the choice object
160
+ choice = Choice(
161
+ index=0,
162
+ delta=delta,
163
+ finish_reason=None,
164
+ logprobs=None
165
+ )
166
+
167
+ # Create the chunk object
168
+ chunk = ChatCompletionChunk(
169
+ id=request_id,
170
+ choices=[choice],
171
+ created=created_time,
172
+ model=model,
173
+ system_fingerprint=None
174
+ )
175
+
176
+ # Convert to dict for proper formatting
177
+ chunk_dict = chunk.to_dict()
178
+
179
+ # Add usage information to match OpenAI format
180
+ usage_dict = {
181
+ "prompt_tokens": prompt_tokens,
182
+ "completion_tokens": completion_tokens,
183
+ "total_tokens": total_tokens,
184
+ "estimated_cost": None
185
+ }
186
+
187
+ chunk_dict["usage"] = usage_dict
188
+
189
+ # Return the chunk object for internal processing
190
+ yield chunk
191
+
192
+ # Final chunk with finish_reason="stop"
193
+ delta = ChoiceDelta(
194
+ content=None,
195
+ role=None,
196
+ tool_calls=None
197
+ )
198
+
199
+ choice = Choice(
200
+ index=0,
201
+ delta=delta,
202
+ finish_reason="stop",
203
+ logprobs=None
204
+ )
205
+
206
+ chunk = ChatCompletionChunk(
207
+ id=request_id,
208
+ choices=[choice],
209
+ created=created_time,
210
+ model=model,
211
+ system_fingerprint=None
212
+ )
213
+
214
+ chunk_dict = chunk.to_dict()
215
+ chunk_dict["usage"] = {
216
+ "prompt_tokens": prompt_tokens,
217
+ "completion_tokens": completion_tokens,
218
+ "total_tokens": total_tokens,
219
+ "estimated_cost": None
220
+ }
221
+
222
+ yield chunk
223
+
224
+ except Exception as e:
225
+ print(f"Error during ExaAI stream request: {e}")
226
+ raise IOError(f"ExaAI request failed: {e}") from e
227
+
228
+ def _create_non_stream(
229
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
230
+ ) -> ChatCompletion:
231
+ try:
232
+ # For non-streaming, we still use streaming internally to collect the full response
233
+ response = self._client.session.post(
234
+ self._client.api_endpoint,
235
+ headers=self._client.headers,
236
+ json=payload,
237
+ stream=True,
238
+ timeout=self._client.timeout
239
+ )
240
+
241
+ # Handle non-200 responses
242
+ if not response.ok:
243
+ raise IOError(
244
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
245
+ )
246
+
247
+ # Collect the full response
248
+ full_text = ""
249
+ for line in response.iter_lines(decode_unicode=True):
250
+ if line:
251
+ match = re.search(r'0:"(.*?)"', line)
252
+ if match:
253
+ content = match.group(1)
254
+ full_text += content
255
+
256
+ # Format the text (replace escaped newlines)
257
+ full_text = self._client.format_text(full_text)
258
+
259
+ # Estimate token counts
260
+ prompt_tokens = 0
261
+ for msg in payload.get("messages", []):
262
+ prompt_tokens += len(msg.get("content", "").split())
263
+
264
+ completion_tokens = len(full_text.split())
265
+ total_tokens = prompt_tokens + completion_tokens
266
+
267
+ # Create the message object
268
+ message = ChatCompletionMessage(
269
+ role="assistant",
270
+ content=full_text
271
+ )
272
+
273
+ # Create the choice object
274
+ choice = Choice(
275
+ index=0,
276
+ message=message,
277
+ finish_reason="stop"
278
+ )
279
+
280
+ # Create the usage object
281
+ usage = CompletionUsage(
282
+ prompt_tokens=prompt_tokens,
283
+ completion_tokens=completion_tokens,
284
+ total_tokens=total_tokens
285
+ )
286
+
287
+ # Create the completion object
288
+ completion = ChatCompletion(
289
+ id=request_id,
290
+ choices=[choice],
291
+ created=created_time,
292
+ model=model,
293
+ usage=usage,
294
+ )
295
+
296
+ return completion
297
+
298
+ except Exception as e:
299
+ print(f"Error during ExaAI non-stream request: {e}")
300
+ raise IOError(f"ExaAI request failed: {e}") from e
301
+
302
+ class Chat(BaseChat):
303
+ def __init__(self, client: 'ExaAI'):
304
+ self.completions = Completions(client)
305
+
306
+ class ExaAI(OpenAICompatibleProvider):
307
+ """
308
+ OpenAI-compatible client for ExaAI API.
309
+
310
+ Usage:
311
+ client = ExaAI()
312
+ response = client.chat.completions.create(
313
+ model="O3-Mini",
314
+ messages=[{"role": "user", "content": "Hello!"}]
315
+ )
316
+
317
+ Note:
318
+ ExaAI does not support system messages. Any system messages will be ignored.
319
+ """
320
+
321
+ AVAILABLE_MODELS = ["O3-Mini"]
322
+
323
+ def __init__(
324
+ self,
325
+ timeout: Optional[int] = None,
326
+ browser: str = "chrome"
327
+ ):
328
+ """
329
+ Initialize the ExaAI client.
330
+
331
+ Args:
332
+ timeout: Request timeout in seconds (None for no timeout)
333
+ browser: Browser to emulate in user agent
334
+ """
335
+ self.timeout = timeout
336
+ self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
337
+ self.session = requests.Session()
338
+
339
+ # Initialize LitAgent for user agent generation
340
+ agent = LitAgent()
341
+ self.fingerprint = agent.generate_fingerprint(browser)
342
+
343
+ # Headers for the request
344
+ self.headers = {
345
+ "authority": "o3minichat.exa.ai",
346
+ "accept": self.fingerprint["accept"],
347
+ "accept-encoding": "gzip, deflate, br, zstd",
348
+ "accept-language": self.fingerprint["accept_language"],
349
+ "content-type": "application/json",
350
+ "dnt": "1",
351
+ "origin": "https://o3minichat.exa.ai",
352
+ "priority": "u=1, i",
353
+ "referer": "https://o3minichat.exa.ai/",
354
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
355
+ "sec-ch-ua-mobile": "?0",
356
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
357
+ "sec-fetch-dest": "empty",
358
+ "sec-fetch-mode": "cors",
359
+ "sec-fetch-site": "same-origin",
360
+ "sec-gpc": "1",
361
+ "user-agent": self.fingerprint["user_agent"]
362
+ }
363
+
364
+ self.session.headers.update(self.headers)
365
+
366
+ # Initialize the chat interface
367
+ self.chat = Chat(self)
368
+
369
+ def format_text(self, text: str) -> str:
370
+ """
371
+ Format text by replacing escaped newlines with actual newlines.
372
+
373
+ Args:
374
+ text: Text to format
375
+
376
+ Returns:
377
+ Formatted text
378
+ """
379
+ # Use a more comprehensive approach to handle all escape sequences
380
+ try:
381
+ # First handle double backslashes to avoid issues
382
+ text = text.replace('\\\\', '\\')
383
+
384
+ # Handle common escape sequences
385
+ text = text.replace('\\n', '\n')
386
+ text = text.replace('\\r', '\r')
387
+ text = text.replace('\\t', '\t')
388
+ text = text.replace('\\"', '"')
389
+ text = text.replace("\\'", "'")
390
+
391
+ # Handle any remaining escape sequences using JSON decoding
392
+ # This is a fallback in case there are other escape sequences
393
+ try:
394
+ # Add quotes to make it a valid JSON string
395
+ json_str = f'"{text}"'
396
+ # Use json module to decode all escape sequences
397
+ decoded = json.loads(json_str)
398
+ return decoded
399
+ except json.JSONDecodeError:
400
+ # If JSON decoding fails, return the text with the replacements we've already done
401
+ return text
402
+ except Exception as e:
403
+ # If any error occurs, return the original text
404
+ print(f"Warning: Error formatting text: {e}")
405
+ return text
406
+
407
+ def convert_model_name(self, model: str) -> str:
408
+ """
409
+ Convert model names to ones supported by ExaAI.
410
+
411
+ Args:
412
+ model: Model name to convert (ignored as ExaAI only supports O3-Mini)
413
+
414
+ Returns:
415
+ ExaAI model name
416
+ """
417
+ # ExaAI only supports O3-Mini, regardless of the input model
418
+ print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
419
+ return "O3-Mini"