webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,389 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import re
5
+ import json
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ def random(self) -> str:
34
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
35
+
36
+ # --- X0GPT Client ---
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'X0GPT'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = 2049,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = None,
50
+ top_p: Optional[float] = None,
51
+ **kwargs: Any
52
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
+ """
54
+ Creates a model response for the given chat conversation.
55
+ Mimics openai.chat.completions.create
56
+ """
57
+ # Prepare the payload for X0GPT API
58
+ payload = {
59
+ "messages": messages,
60
+ "chatId": uuid.uuid4().hex,
61
+ "namespace": None
62
+ }
63
+
64
+ # Add optional parameters if provided
65
+ if max_tokens is not None and max_tokens > 0:
66
+ payload["max_tokens"] = max_tokens
67
+
68
+ if temperature is not None:
69
+ payload["temperature"] = temperature
70
+
71
+ if top_p is not None:
72
+ payload["top_p"] = top_p
73
+
74
+ # Add any additional parameters
75
+ payload.update(kwargs)
76
+
77
+ request_id = f"chatcmpl-{uuid.uuid4()}"
78
+ created_time = int(time.time())
79
+
80
+ if stream:
81
+ return self._create_stream(request_id, created_time, model, payload)
82
+ else:
83
+ return self._create_non_stream(request_id, created_time, model, payload)
84
+
85
+ def _create_stream(
86
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
87
+ ) -> Generator[ChatCompletionChunk, None, None]:
88
+ try:
89
+ response = self._client.session.post(
90
+ self._client.api_endpoint,
91
+ headers=self._client.headers,
92
+ json=payload,
93
+ stream=True,
94
+ timeout=self._client.timeout
95
+ )
96
+
97
+ # Handle non-200 responses
98
+ if not response.ok:
99
+ raise IOError(
100
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
101
+ )
102
+
103
+ # Track token usage across chunks
104
+ prompt_tokens = 0
105
+ completion_tokens = 0
106
+ total_tokens = 0
107
+
108
+ # Estimate prompt tokens based on message length
109
+ for msg in payload.get("messages", []):
110
+ prompt_tokens += len(msg.get("content", "").split())
111
+
112
+ for line in response.iter_lines():
113
+ if line:
114
+ decoded_line = line.decode('utf-8').strip()
115
+
116
+ # X0GPT uses a different format, so we need to extract the content
117
+ match = re.search(r'0:"(.*?)"', decoded_line)
118
+ if match:
119
+ content = match.group(1)
120
+
121
+ # Format the content (replace escaped newlines)
122
+ content = self._client.format_text(content)
123
+
124
+ # Update token counts
125
+ completion_tokens += 1
126
+ total_tokens = prompt_tokens + completion_tokens
127
+
128
+ # Create the delta object
129
+ delta = ChoiceDelta(
130
+ content=content,
131
+ role="assistant",
132
+ tool_calls=None
133
+ )
134
+
135
+ # Create the choice object
136
+ choice = Choice(
137
+ index=0,
138
+ delta=delta,
139
+ finish_reason=None,
140
+ logprobs=None
141
+ )
142
+
143
+ # Create the chunk object
144
+ chunk = ChatCompletionChunk(
145
+ id=request_id,
146
+ choices=[choice],
147
+ created=created_time,
148
+ model=model,
149
+ system_fingerprint=None
150
+ )
151
+
152
+ # Convert to dict for proper formatting
153
+ chunk_dict = chunk.to_dict()
154
+
155
+ # Add usage information to match OpenAI format
156
+ usage_dict = {
157
+ "prompt_tokens": prompt_tokens,
158
+ "completion_tokens": completion_tokens,
159
+ "total_tokens": total_tokens,
160
+ "estimated_cost": None
161
+ }
162
+
163
+ chunk_dict["usage"] = usage_dict
164
+
165
+ # Return the chunk object for internal processing
166
+ yield chunk
167
+
168
+ # Final chunk with finish_reason="stop"
169
+ delta = ChoiceDelta(
170
+ content=None,
171
+ role=None,
172
+ tool_calls=None
173
+ )
174
+
175
+ choice = Choice(
176
+ index=0,
177
+ delta=delta,
178
+ finish_reason="stop",
179
+ logprobs=None
180
+ )
181
+
182
+ chunk = ChatCompletionChunk(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ system_fingerprint=None
188
+ )
189
+
190
+ chunk_dict = chunk.to_dict()
191
+ chunk_dict["usage"] = {
192
+ "prompt_tokens": prompt_tokens,
193
+ "completion_tokens": completion_tokens,
194
+ "total_tokens": total_tokens,
195
+ "estimated_cost": None
196
+ }
197
+
198
+ yield chunk
199
+
200
+ except Exception as e:
201
+ print(f"Error during X0GPT stream request: {e}")
202
+ raise IOError(f"X0GPT request failed: {e}") from e
203
+
204
+ def _create_non_stream(
205
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
206
+ ) -> ChatCompletion:
207
+ try:
208
+ # For non-streaming, we still use streaming internally to collect the full response
209
+ response = self._client.session.post(
210
+ self._client.api_endpoint,
211
+ headers=self._client.headers,
212
+ json=payload,
213
+ stream=True,
214
+ timeout=self._client.timeout
215
+ )
216
+
217
+ # Handle non-200 responses
218
+ if not response.ok:
219
+ raise IOError(
220
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
221
+ )
222
+
223
+ # Collect the full response
224
+ full_text = ""
225
+ for line in response.iter_lines(decode_unicode=True):
226
+ if line:
227
+ match = re.search(r'0:"(.*?)"', line)
228
+ if match:
229
+ content = match.group(1)
230
+ full_text += content
231
+
232
+ # Format the text (replace escaped newlines)
233
+ full_text = self._client.format_text(full_text)
234
+
235
+ # Estimate token counts
236
+ prompt_tokens = 0
237
+ for msg in payload.get("messages", []):
238
+ prompt_tokens += len(msg.get("content", "").split())
239
+
240
+ completion_tokens = len(full_text.split())
241
+ total_tokens = prompt_tokens + completion_tokens
242
+
243
+ # Create the message object
244
+ message = ChatCompletionMessage(
245
+ role="assistant",
246
+ content=full_text
247
+ )
248
+
249
+ # Create the choice object
250
+ choice = Choice(
251
+ index=0,
252
+ message=message,
253
+ finish_reason="stop"
254
+ )
255
+
256
+ # Create the usage object
257
+ usage = CompletionUsage(
258
+ prompt_tokens=prompt_tokens,
259
+ completion_tokens=completion_tokens,
260
+ total_tokens=total_tokens
261
+ )
262
+
263
+ # Create the completion object
264
+ completion = ChatCompletion(
265
+ id=request_id,
266
+ choices=[choice],
267
+ created=created_time,
268
+ model=model,
269
+ usage=usage,
270
+ )
271
+
272
+ return completion
273
+
274
+ except Exception as e:
275
+ print(f"Error during X0GPT non-stream request: {e}")
276
+ raise IOError(f"X0GPT request failed: {e}") from e
277
+
278
+ class Chat(BaseChat):
279
+ def __init__(self, client: 'X0GPT'):
280
+ self.completions = Completions(client)
281
+
282
+ class X0GPT(OpenAICompatibleProvider):
283
+ """
284
+ OpenAI-compatible client for X0GPT API.
285
+
286
+ Usage:
287
+ client = X0GPT()
288
+ response = client.chat.completions.create(
289
+ model="gpt-4",
290
+ messages=[{"role": "user", "content": "Hello!"}]
291
+ )
292
+ """
293
+
294
+ AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
295
+
296
+ def __init__(
297
+ self,
298
+ timeout: Optional[int] = None,
299
+ browser: str = "chrome"
300
+ ):
301
+ """
302
+ Initialize the X0GPT client.
303
+
304
+ Args:
305
+ timeout: Request timeout in seconds (None for no timeout)
306
+ browser: Browser to emulate in user agent
307
+ """
308
+ self.timeout = timeout
309
+ self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
310
+ self.session = requests.Session()
311
+
312
+ # Initialize LitAgent for user agent generation
313
+ agent = LitAgent()
314
+ self.fingerprint = agent.generate_fingerprint(browser)
315
+
316
+ self.headers = {
317
+ "authority": "x0-gpt.devwtf.in",
318
+ "method": "POST",
319
+ "path": "/api/stream/reply",
320
+ "scheme": "https",
321
+ "accept": self.fingerprint["accept"],
322
+ "accept-encoding": "gzip, deflate, br, zstd",
323
+ "accept-language": self.fingerprint["accept_language"],
324
+ "content-type": "application/json",
325
+ "dnt": "1",
326
+ "origin": "https://x0-gpt.devwtf.in",
327
+ "priority": "u=1, i",
328
+ "referer": "https://x0-gpt.devwtf.in/chat",
329
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
330
+ "sec-ch-ua-mobile": "?0",
331
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
332
+ "user-agent": self.fingerprint["user_agent"]
333
+ }
334
+
335
+ self.session.headers.update(self.headers)
336
+
337
+ # Initialize the chat interface
338
+ self.chat = Chat(self)
339
+
340
+ def format_text(self, text: str) -> str:
341
+ """
342
+ Format text by replacing escaped newlines with actual newlines.
343
+
344
+ Args:
345
+ text: Text to format
346
+
347
+ Returns:
348
+ Formatted text
349
+ """
350
+ # Use a more comprehensive approach to handle all escape sequences
351
+ try:
352
+ # First handle double backslashes to avoid issues
353
+ text = text.replace('\\\\', '\\')
354
+
355
+ # Handle common escape sequences
356
+ text = text.replace('\\n', '\n')
357
+ text = text.replace('\\r', '\r')
358
+ text = text.replace('\\t', '\t')
359
+ text = text.replace('\\"', '"')
360
+ text = text.replace("\\'", "'")
361
+
362
+ # Handle any remaining escape sequences using JSON decoding
363
+ # This is a fallback in case there are other escape sequences
364
+ try:
365
+ # Add quotes to make it a valid JSON string
366
+ json_str = f'"{text}"'
367
+ # Use json module to decode all escape sequences
368
+ decoded = json.loads(json_str)
369
+ return decoded
370
+ except json.JSONDecodeError:
371
+ # If JSON decoding fails, return the text with the replacements we've already done
372
+ return text
373
+ except Exception as e:
374
+ # If any error occurs, return the original text
375
+ print(f"Warning: Error formatting text: {e}")
376
+ return text
377
+
378
+ def convert_model_name(self, model: str) -> str:
379
+ """
380
+ Convert model names to ones supported by X0GPT.
381
+
382
+ Args:
383
+ model: Model name to convert
384
+
385
+ Returns:
386
+ X0GPT model name
387
+ """
388
+ # X0GPT doesn't actually use model names, but we'll keep this for compatibility
389
+ return model