webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,463 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, get_system_prompt
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ # --- SciraChat Client ---
34
+
35
+ class Completions(BaseCompletions):
36
+ def __init__(self, client: 'SciraChat'):
37
+ self._client = client
38
+
39
+ def create(
40
+ self,
41
+ *,
42
+ model: str,
43
+ messages: List[Dict[str, str]],
44
+ max_tokens: Optional[int] = None,
45
+ stream: bool = False,
46
+ temperature: Optional[float] = None,
47
+ top_p: Optional[float] = None,
48
+ **kwargs: Any
49
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
+ """
51
+ Creates a model response for the given chat conversation.
52
+ Mimics openai.chat.completions.create
53
+ """
54
+
55
+ # Prepare the payload for SciraChat API
56
+ payload = {
57
+ "id": self._client.chat_id,
58
+ "messages": messages,
59
+ "model": self._client.convert_model_name(model),
60
+ "group": "chat", # Always use chat mode (no web search)
61
+ "user_id": self._client.user_id,
62
+ "timezone": "Asia/Calcutta"
63
+ }
64
+
65
+ # Add optional parameters if provided
66
+ if max_tokens is not None and max_tokens > 0:
67
+ payload["max_tokens"] = max_tokens
68
+
69
+ # Add any additional parameters
70
+ for key, value in kwargs.items():
71
+ if key not in payload:
72
+ payload[key] = value
73
+
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ created_time = int(time.time())
76
+
77
+ if stream:
78
+ return self._create_stream(request_id, created_time, model, payload)
79
+ else:
80
+ return self._create_non_stream(request_id, created_time, model, payload)
81
+
82
+ def _create_stream(
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+ try:
86
+ response = self._client.session.post(
87
+ self._client.api_endpoint,
88
+ json=payload,
89
+ stream=True,
90
+ timeout=self._client.timeout
91
+ )
92
+
93
+ # Handle non-200 responses
94
+ if not response.ok:
95
+ # Try to refresh identity if we get a 403 or 429
96
+ if response.status_code in [403, 429]:
97
+ print(f"Received status code {response.status_code}, refreshing identity...")
98
+ self._client.refresh_identity()
99
+ response = self._client.session.post(
100
+ self._client.api_endpoint,
101
+ json=payload,
102
+ stream=True,
103
+ timeout=self._client.timeout
104
+ )
105
+ if not response.ok:
106
+ raise IOError(
107
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
108
+ )
109
+ print("Identity refreshed successfully.")
110
+ else:
111
+ raise IOError(
112
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
+ )
114
+
115
+ # Track token usage across chunks
116
+ prompt_tokens = 0
117
+ completion_tokens = 0
118
+ total_tokens = 0
119
+
120
+ # Estimate prompt tokens based on message length
121
+ prompt_tokens = len(payload.get("messages", [{}])[0].get("content", "").split())
122
+
123
+ for line in response.iter_lines():
124
+ if not line:
125
+ continue
126
+
127
+ try:
128
+ line_str = line.decode('utf-8')
129
+
130
+ # Format: 0:"content" (quoted format)
131
+ match = re.search(r'0:"(.*?)"', line_str)
132
+ if match:
133
+ content = match.group(1)
134
+
135
+ # Format the content (replace escaped newlines)
136
+ content = self._client.format_text(content)
137
+
138
+ # Update token counts
139
+ completion_tokens += 1
140
+ total_tokens = prompt_tokens + completion_tokens
141
+
142
+ # Create the delta object
143
+ delta = ChoiceDelta(
144
+ content=content,
145
+ role="assistant",
146
+ tool_calls=None
147
+ )
148
+
149
+ # Create the choice object
150
+ choice = Choice(
151
+ index=0,
152
+ delta=delta,
153
+ finish_reason=None,
154
+ logprobs=None
155
+ )
156
+
157
+ # Create the chunk object
158
+ chunk = ChatCompletionChunk(
159
+ id=request_id,
160
+ choices=[choice],
161
+ created=created_time,
162
+ model=model,
163
+ system_fingerprint=None
164
+ )
165
+
166
+ # Convert to dict for proper formatting
167
+ chunk_dict = chunk.to_dict()
168
+
169
+ # Add usage information to match OpenAI format
170
+ usage_dict = {
171
+ "prompt_tokens": prompt_tokens,
172
+ "completion_tokens": completion_tokens,
173
+ "total_tokens": total_tokens,
174
+ "estimated_cost": None
175
+ }
176
+
177
+ chunk_dict["usage"] = usage_dict
178
+
179
+ # Return the chunk object for internal processing
180
+ yield chunk
181
+ except Exception as e:
182
+ print(f"Error processing chunk: {e}")
183
+ continue
184
+
185
+ # Final chunk with finish_reason="stop"
186
+ delta = ChoiceDelta(
187
+ content=None,
188
+ role=None,
189
+ tool_calls=None
190
+ )
191
+
192
+ choice = Choice(
193
+ index=0,
194
+ delta=delta,
195
+ finish_reason="stop",
196
+ logprobs=None
197
+ )
198
+
199
+ chunk = ChatCompletionChunk(
200
+ id=request_id,
201
+ choices=[choice],
202
+ created=created_time,
203
+ model=model,
204
+ system_fingerprint=None
205
+ )
206
+
207
+ chunk_dict = chunk.to_dict()
208
+ chunk_dict["usage"] = {
209
+ "prompt_tokens": prompt_tokens,
210
+ "completion_tokens": completion_tokens,
211
+ "total_tokens": total_tokens,
212
+ "estimated_cost": None
213
+ }
214
+
215
+ yield chunk
216
+
217
+ except Exception as e:
218
+ print(f"Error during SciraChat stream request: {e}")
219
+ raise IOError(f"SciraChat request failed: {e}") from e
220
+
221
+ def _create_non_stream(
222
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
223
+ ) -> ChatCompletion:
224
+ try:
225
+ response = self._client.session.post(
226
+ self._client.api_endpoint,
227
+ json=payload,
228
+ timeout=self._client.timeout
229
+ )
230
+
231
+ # Handle non-200 responses
232
+ if not response.ok:
233
+ # Try to refresh identity if we get a 403 or 429
234
+ if response.status_code in [403, 429]:
235
+ print(f"Received status code {response.status_code}, refreshing identity...")
236
+ self._client.refresh_identity()
237
+ response = self._client.session.post(
238
+ self._client.api_endpoint,
239
+ json=payload,
240
+ timeout=self._client.timeout
241
+ )
242
+ if not response.ok:
243
+ raise IOError(
244
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
245
+ )
246
+ print("Identity refreshed successfully.")
247
+ else:
248
+ raise IOError(
249
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
250
+ )
251
+
252
+ # Collect the full response
253
+ full_response = ""
254
+ for line in response.iter_lines():
255
+ if line:
256
+ try:
257
+ line_str = line.decode('utf-8')
258
+
259
+ # Format: 0:"content" (quoted format)
260
+ match = re.search(r'0:"(.*?)"', line_str)
261
+ if match:
262
+ content = match.group(1)
263
+ full_response += content
264
+ except:
265
+ continue
266
+
267
+ # Format the text (replace escaped newlines)
268
+ full_response = self._client.format_text(full_response)
269
+
270
+ # Estimate token counts
271
+ prompt_tokens = len(payload.get("messages", [{}])[0].get("content", "").split())
272
+ completion_tokens = len(full_response.split())
273
+ total_tokens = prompt_tokens + completion_tokens
274
+
275
+ # Create the message object
276
+ message = ChatCompletionMessage(
277
+ role="assistant",
278
+ content=full_response
279
+ )
280
+
281
+ # Create the choice object
282
+ choice = Choice(
283
+ index=0,
284
+ message=message,
285
+ finish_reason="stop"
286
+ )
287
+
288
+ # Create the usage object
289
+ usage = CompletionUsage(
290
+ prompt_tokens=prompt_tokens,
291
+ completion_tokens=completion_tokens,
292
+ total_tokens=total_tokens
293
+ )
294
+
295
+ # Create the completion object
296
+ completion = ChatCompletion(
297
+ id=request_id,
298
+ choices=[choice],
299
+ created=created_time,
300
+ model=model,
301
+ usage=usage,
302
+ )
303
+
304
+ return completion
305
+
306
+ except Exception as e:
307
+ print(f"Error during SciraChat non-stream request: {e}")
308
+ raise IOError(f"SciraChat request failed: {e}") from e
309
+
310
+ class Chat(BaseChat):
311
+ def __init__(self, client: 'SciraChat'):
312
+ self.completions = Completions(client)
313
+
314
+ class SciraChat(OpenAICompatibleProvider):
315
+ """
316
+ OpenAI-compatible client for Scira Chat API.
317
+
318
+ Usage:
319
+ client = SciraChat()
320
+ response = client.chat.completions.create(
321
+ model="scira-default",
322
+ messages=[{"role": "user", "content": "Hello!"}]
323
+ )
324
+ """
325
+
326
+ AVAILABLE_MODELS = {
327
+ "scira-default": "Grok3",
328
+ "scira-grok-3-mini": "Grok3-mini", # thinking model
329
+ "scira-vision" : "Grok2-Vision", # vision model
330
+ "scira-4.1-mini": "GPT4.1-mini",
331
+ "scira-qwq": "QWQ-32B",
332
+ "scira-o4-mini": "o4-mini",
333
+ "scira-google": "gemini 2.5 flash"
334
+
335
+
336
+ }
337
+
338
+ def __init__(
339
+ self,
340
+ timeout: Optional[int] = None,
341
+ browser: str = "chrome"
342
+ ):
343
+ """
344
+ Initialize the SciraChat client.
345
+
346
+ Args:
347
+ timeout: Request timeout in seconds (None for no timeout)
348
+ browser: Browser to emulate in user agent
349
+ """
350
+ self.timeout = timeout or 30 # Default to 30 seconds if None
351
+ self.api_endpoint = "https://scira.ai/api/search"
352
+ self.session = requests.Session()
353
+
354
+ # Initialize LitAgent for user agent generation
355
+ self.agent = LitAgent()
356
+ self.fingerprint = self.agent.generate_fingerprint(browser)
357
+
358
+ # Use the fingerprint for headers
359
+ self.headers = {
360
+ "Accept": self.fingerprint["accept"],
361
+ "Accept-Encoding": "gzip, deflate, br, zstd",
362
+ "Accept-Language": self.fingerprint["accept_language"],
363
+ "Content-Type": "application/json",
364
+ "Origin": "https://scira.ai",
365
+ "Referer": "https://scira.ai/",
366
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
367
+ "Sec-CH-UA-Mobile": "?0",
368
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
369
+ "User-Agent": self.fingerprint["user_agent"],
370
+ "Sec-Fetch-Dest": "empty",
371
+ "Sec-Fetch-Mode": "cors",
372
+ "Sec-Fetch-Site": "same-origin"
373
+ }
374
+
375
+ self.session.headers.update(self.headers)
376
+
377
+ # Generate unique IDs for chat session and user
378
+ self.chat_id = str(uuid.uuid4())
379
+ self.user_id = f"user_{str(uuid.uuid4())[:8].upper()}"
380
+
381
+ # Initialize the chat interface
382
+ self.chat = Chat(self)
383
+
384
+ def refresh_identity(self, browser: str = None):
385
+ """
386
+ Refreshes the browser identity fingerprint.
387
+
388
+ Args:
389
+ browser: Specific browser to use for the new fingerprint
390
+ """
391
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
392
+ self.fingerprint = self.agent.generate_fingerprint(browser)
393
+
394
+ # Update headers with new fingerprint
395
+ self.headers.update({
396
+ "Accept": self.fingerprint["accept"],
397
+ "Accept-Language": self.fingerprint["accept_language"],
398
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
399
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
400
+ "User-Agent": self.fingerprint["user_agent"],
401
+ })
402
+
403
+ # Update session headers
404
+ for header, value in self.headers.items():
405
+ self.session.headers[header] = value
406
+
407
+ return self.fingerprint
408
+
409
+ def format_text(self, text: str) -> str:
410
+ """
411
+ Format text by replacing escaped newlines with actual newlines.
412
+
413
+ Args:
414
+ text: Text to format
415
+
416
+ Returns:
417
+ Formatted text
418
+ """
419
+ # Use a more comprehensive approach to handle all escape sequences
420
+ try:
421
+ # First handle double backslashes to avoid issues
422
+ text = text.replace('\\\\', '\\')
423
+
424
+ # Handle common escape sequences
425
+ text = text.replace('\\n', '\n')
426
+ text = text.replace('\\r', '\r')
427
+ text = text.replace('\\t', '\t')
428
+ text = text.replace('\\"', '"')
429
+ text = text.replace("\\'", "'")
430
+
431
+ # Handle any remaining escape sequences using JSON decoding
432
+ # This is a fallback in case there are other escape sequences
433
+ try:
434
+ # Add quotes to make it a valid JSON string
435
+ json_str = f'"{text}"'
436
+ # Use json module to decode all escape sequences
437
+ decoded = json.loads(json_str)
438
+ return decoded
439
+ except json.JSONDecodeError:
440
+ # If JSON decoding fails, return the text with the replacements we've already done
441
+ return text
442
+ except Exception as e:
443
+ # If any error occurs, return the original text
444
+ print(f"Warning: Error formatting text: {e}")
445
+ return text
446
+
447
+ def convert_model_name(self, model: str) -> str:
448
+ """
449
+ Convert model names to ones supported by SciraChat.
450
+
451
+ Args:
452
+ model: Model name to convert
453
+
454
+ Returns:
455
+ SciraChat model name
456
+ """
457
+ # If the model is already a valid SciraChat model, return it
458
+ if model in self.AVAILABLE_MODELS:
459
+ return model
460
+
461
+ # Default to scira-default if model not found
462
+ print(f"Warning: Unknown model '{model}'. Using 'scira-default' instead.")
463
+ return "scira-default"