webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,459 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, get_system_prompt
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ # --- SciraChat Client ---
34
+
35
+ class Completions(BaseCompletions):
36
+ def __init__(self, client: 'SciraChat'):
37
+ self._client = client
38
+
39
+ def create(
40
+ self,
41
+ *,
42
+ model: str,
43
+ messages: List[Dict[str, str]],
44
+ max_tokens: Optional[int] = None,
45
+ stream: bool = False,
46
+ temperature: Optional[float] = None,
47
+ top_p: Optional[float] = None,
48
+ **kwargs: Any
49
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
+ """
51
+ Creates a model response for the given chat conversation.
52
+ Mimics openai.chat.completions.create
53
+ """
54
+
55
+ # Prepare the payload for SciraChat API
56
+ payload = {
57
+ "id": self._client.chat_id,
58
+ "messages": messages,
59
+ "model": self._client.convert_model_name(model),
60
+ "group": "chat", # Always use chat mode (no web search)
61
+ "user_id": self._client.user_id,
62
+ "timezone": "Asia/Calcutta"
63
+ }
64
+
65
+ # Add optional parameters if provided
66
+ if max_tokens is not None and max_tokens > 0:
67
+ payload["max_tokens"] = max_tokens
68
+
69
+ # Add any additional parameters
70
+ for key, value in kwargs.items():
71
+ if key not in payload:
72
+ payload[key] = value
73
+
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ created_time = int(time.time())
76
+
77
+ if stream:
78
+ return self._create_stream(request_id, created_time, model, payload)
79
+ else:
80
+ return self._create_non_stream(request_id, created_time, model, payload)
81
+
82
+ def _create_stream(
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+ try:
86
+ response = self._client.session.post(
87
+ self._client.api_endpoint,
88
+ json=payload,
89
+ stream=True,
90
+ timeout=self._client.timeout
91
+ )
92
+
93
+ # Handle non-200 responses
94
+ if not response.ok:
95
+ # Try to refresh identity if we get a 403 or 429
96
+ if response.status_code in [403, 429]:
97
+ print(f"Received status code {response.status_code}, refreshing identity...")
98
+ self._client.refresh_identity()
99
+ response = self._client.session.post(
100
+ self._client.api_endpoint,
101
+ json=payload,
102
+ stream=True,
103
+ timeout=self._client.timeout
104
+ )
105
+ if not response.ok:
106
+ raise IOError(
107
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
108
+ )
109
+ print("Identity refreshed successfully.")
110
+ else:
111
+ raise IOError(
112
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
+ )
114
+
115
+ # Track token usage across chunks
116
+ prompt_tokens = 0
117
+ completion_tokens = 0
118
+ total_tokens = 0
119
+
120
+ # Estimate prompt tokens based on message length
121
+ prompt_tokens = len(payload.get("messages", [{}])[0].get("content", "").split())
122
+
123
+ for line in response.iter_lines():
124
+ if not line:
125
+ continue
126
+
127
+ try:
128
+ line_str = line.decode('utf-8')
129
+
130
+ # Format: 0:"content" (quoted format)
131
+ match = re.search(r'0:"(.*?)"', line_str)
132
+ if match:
133
+ content = match.group(1)
134
+
135
+ # Format the content (replace escaped newlines)
136
+ content = self._client.format_text(content)
137
+
138
+ # Update token counts
139
+ completion_tokens += 1
140
+ total_tokens = prompt_tokens + completion_tokens
141
+
142
+ # Create the delta object
143
+ delta = ChoiceDelta(
144
+ content=content,
145
+ role="assistant",
146
+ tool_calls=None
147
+ )
148
+
149
+ # Create the choice object
150
+ choice = Choice(
151
+ index=0,
152
+ delta=delta,
153
+ finish_reason=None,
154
+ logprobs=None
155
+ )
156
+
157
+ # Create the chunk object
158
+ chunk = ChatCompletionChunk(
159
+ id=request_id,
160
+ choices=[choice],
161
+ created=created_time,
162
+ model=model,
163
+ system_fingerprint=None
164
+ )
165
+
166
+ # Convert to dict for proper formatting
167
+ chunk_dict = chunk.to_dict()
168
+
169
+ # Add usage information to match OpenAI format
170
+ usage_dict = {
171
+ "prompt_tokens": prompt_tokens,
172
+ "completion_tokens": completion_tokens,
173
+ "total_tokens": total_tokens,
174
+ "estimated_cost": None
175
+ }
176
+
177
+ chunk_dict["usage"] = usage_dict
178
+
179
+ # Return the chunk object for internal processing
180
+ yield chunk
181
+ except Exception as e:
182
+ print(f"Error processing chunk: {e}")
183
+ continue
184
+
185
+ # Final chunk with finish_reason="stop"
186
+ delta = ChoiceDelta(
187
+ content=None,
188
+ role=None,
189
+ tool_calls=None
190
+ )
191
+
192
+ choice = Choice(
193
+ index=0,
194
+ delta=delta,
195
+ finish_reason="stop",
196
+ logprobs=None
197
+ )
198
+
199
+ chunk = ChatCompletionChunk(
200
+ id=request_id,
201
+ choices=[choice],
202
+ created=created_time,
203
+ model=model,
204
+ system_fingerprint=None
205
+ )
206
+
207
+ chunk_dict = chunk.to_dict()
208
+ chunk_dict["usage"] = {
209
+ "prompt_tokens": prompt_tokens,
210
+ "completion_tokens": completion_tokens,
211
+ "total_tokens": total_tokens,
212
+ "estimated_cost": None
213
+ }
214
+
215
+ yield chunk
216
+
217
+ except Exception as e:
218
+ print(f"Error during SciraChat stream request: {e}")
219
+ raise IOError(f"SciraChat request failed: {e}") from e
220
+
221
+ def _create_non_stream(
222
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
223
+ ) -> ChatCompletion:
224
+ try:
225
+ response = self._client.session.post(
226
+ self._client.api_endpoint,
227
+ json=payload,
228
+ timeout=self._client.timeout
229
+ )
230
+
231
+ # Handle non-200 responses
232
+ if not response.ok:
233
+ # Try to refresh identity if we get a 403 or 429
234
+ if response.status_code in [403, 429]:
235
+ print(f"Received status code {response.status_code}, refreshing identity...")
236
+ self._client.refresh_identity()
237
+ response = self._client.session.post(
238
+ self._client.api_endpoint,
239
+ json=payload,
240
+ timeout=self._client.timeout
241
+ )
242
+ if not response.ok:
243
+ raise IOError(
244
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
245
+ )
246
+ print("Identity refreshed successfully.")
247
+ else:
248
+ raise IOError(
249
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
250
+ )
251
+
252
+ # Collect the full response
253
+ full_response = ""
254
+ for line in response.iter_lines():
255
+ if line:
256
+ try:
257
+ line_str = line.decode('utf-8')
258
+
259
+ # Format: 0:"content" (quoted format)
260
+ match = re.search(r'0:"(.*?)"', line_str)
261
+ if match:
262
+ content = match.group(1)
263
+ full_response += content
264
+ except:
265
+ continue
266
+
267
+ # Format the text (replace escaped newlines)
268
+ full_response = self._client.format_text(full_response)
269
+
270
+ # Estimate token counts
271
+ prompt_tokens = len(payload.get("messages", [{}])[0].get("content", "").split())
272
+ completion_tokens = len(full_response.split())
273
+ total_tokens = prompt_tokens + completion_tokens
274
+
275
+ # Create the message object
276
+ message = ChatCompletionMessage(
277
+ role="assistant",
278
+ content=full_response
279
+ )
280
+
281
+ # Create the choice object
282
+ choice = Choice(
283
+ index=0,
284
+ message=message,
285
+ finish_reason="stop"
286
+ )
287
+
288
+ # Create the usage object
289
+ usage = CompletionUsage(
290
+ prompt_tokens=prompt_tokens,
291
+ completion_tokens=completion_tokens,
292
+ total_tokens=total_tokens
293
+ )
294
+
295
+ # Create the completion object
296
+ completion = ChatCompletion(
297
+ id=request_id,
298
+ choices=[choice],
299
+ created=created_time,
300
+ model=model,
301
+ usage=usage,
302
+ )
303
+
304
+ return completion
305
+
306
+ except Exception as e:
307
+ print(f"Error during SciraChat non-stream request: {e}")
308
+ raise IOError(f"SciraChat request failed: {e}") from e
309
+
310
+ class Chat(BaseChat):
311
+ def __init__(self, client: 'SciraChat'):
312
+ self.completions = Completions(client)
313
+
314
+ class SciraChat(OpenAICompatibleProvider):
315
+ """
316
+ OpenAI-compatible client for Scira Chat API.
317
+
318
+ Usage:
319
+ client = SciraChat()
320
+ response = client.chat.completions.create(
321
+ model="scira-default",
322
+ messages=[{"role": "user", "content": "Hello!"}]
323
+ )
324
+ """
325
+
326
+ AVAILABLE_MODELS = [
327
+ "scira-default", # Grok3
328
+ "scira-grok-3-mini", # Grok3-mini (thinking model)
329
+ "scira-vision", # Grok2-Vision (vision model)
330
+ "scira-claude", # Sonnet-3.7
331
+ "scira-optimus", # optimus
332
+ ]
333
+
334
+ def __init__(
335
+ self,
336
+ timeout: Optional[int] = None,
337
+ browser: str = "chrome"
338
+ ):
339
+ """
340
+ Initialize the SciraChat client.
341
+
342
+ Args:
343
+ timeout: Request timeout in seconds (None for no timeout)
344
+ browser: Browser to emulate in user agent
345
+ """
346
+ self.timeout = timeout or 30 # Default to 30 seconds if None
347
+ self.api_endpoint = "https://scira.ai/api/search"
348
+ self.session = requests.Session()
349
+
350
+ # Initialize LitAgent for user agent generation
351
+ self.agent = LitAgent()
352
+ self.fingerprint = self.agent.generate_fingerprint(browser)
353
+
354
+ # Use the fingerprint for headers
355
+ self.headers = {
356
+ "Accept": self.fingerprint["accept"],
357
+ "Accept-Encoding": "gzip, deflate, br, zstd",
358
+ "Accept-Language": self.fingerprint["accept_language"],
359
+ "Content-Type": "application/json",
360
+ "Origin": "https://scira.ai",
361
+ "Referer": "https://scira.ai/",
362
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
363
+ "Sec-CH-UA-Mobile": "?0",
364
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
365
+ "User-Agent": self.fingerprint["user_agent"],
366
+ "Sec-Fetch-Dest": "empty",
367
+ "Sec-Fetch-Mode": "cors",
368
+ "Sec-Fetch-Site": "same-origin"
369
+ }
370
+
371
+ self.session.headers.update(self.headers)
372
+
373
+ # Generate unique IDs for chat session and user
374
+ self.chat_id = str(uuid.uuid4())
375
+ self.user_id = f"user_{str(uuid.uuid4())[:8].upper()}"
376
+
377
+ # Initialize the chat interface
378
+ self.chat = Chat(self)
379
+
380
+ def refresh_identity(self, browser: str = None):
381
+ """
382
+ Refreshes the browser identity fingerprint.
383
+
384
+ Args:
385
+ browser: Specific browser to use for the new fingerprint
386
+ """
387
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
388
+ self.fingerprint = self.agent.generate_fingerprint(browser)
389
+
390
+ # Update headers with new fingerprint
391
+ self.headers.update({
392
+ "Accept": self.fingerprint["accept"],
393
+ "Accept-Language": self.fingerprint["accept_language"],
394
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
395
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
396
+ "User-Agent": self.fingerprint["user_agent"],
397
+ })
398
+
399
+ # Update session headers
400
+ for header, value in self.headers.items():
401
+ self.session.headers[header] = value
402
+
403
+ return self.fingerprint
404
+
405
+ def format_text(self, text: str) -> str:
406
+ """
407
+ Format text by replacing escaped newlines with actual newlines.
408
+
409
+ Args:
410
+ text: Text to format
411
+
412
+ Returns:
413
+ Formatted text
414
+ """
415
+ # Use a more comprehensive approach to handle all escape sequences
416
+ try:
417
+ # First handle double backslashes to avoid issues
418
+ text = text.replace('\\\\', '\\')
419
+
420
+ # Handle common escape sequences
421
+ text = text.replace('\\n', '\n')
422
+ text = text.replace('\\r', '\r')
423
+ text = text.replace('\\t', '\t')
424
+ text = text.replace('\\"', '"')
425
+ text = text.replace("\\'", "'")
426
+
427
+ # Handle any remaining escape sequences using JSON decoding
428
+ # This is a fallback in case there are other escape sequences
429
+ try:
430
+ # Add quotes to make it a valid JSON string
431
+ json_str = f'"{text}"'
432
+ # Use json module to decode all escape sequences
433
+ decoded = json.loads(json_str)
434
+ return decoded
435
+ except json.JSONDecodeError:
436
+ # If JSON decoding fails, return the text with the replacements we've already done
437
+ return text
438
+ except Exception as e:
439
+ # If any error occurs, return the original text
440
+ print(f"Warning: Error formatting text: {e}")
441
+ return text
442
+
443
+ def convert_model_name(self, model: str) -> str:
444
+ """
445
+ Convert model names to ones supported by SciraChat.
446
+
447
+ Args:
448
+ model: Model name to convert
449
+
450
+ Returns:
451
+ SciraChat model name
452
+ """
453
+ # If the model is already a valid SciraChat model, return it
454
+ if model in self.AVAILABLE_MODELS:
455
+ return model
456
+
457
+ # Default to scira-default if model not found
458
+ print(f"Warning: Unknown model '{model}'. Using 'scira-default' instead.")
459
+ return "scira-default"