webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -0,0 +1,376 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+
6
+ # Use curl_cffi for requests
7
+ from curl_cffi.requests import Session
8
+ from curl_cffi import CurlError
9
+
10
+ # Import base classes and utility structures
11
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from .utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage
15
+ )
16
+
17
+ # Import LitAgent for user agent generation
18
+ try:
19
+ from webscout.litagent import LitAgent
20
+ except ImportError:
21
+ # Define a dummy LitAgent if webscout is not installed or accessible
22
+ class LitAgent:
23
+ def random(self) -> str:
24
+ # Return a default user agent if LitAgent is unavailable
25
+ print("Warning: LitAgent not found. Using default minimal headers.")
26
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
27
+
28
+ # ANSI escape codes for formatting
29
+ BOLD = "\033[1m"
30
+ RED = "\033[91m"
31
+ RESET = "\033[0m"
32
+
33
+ class Completions(BaseCompletions):
34
+ def __init__(self, client: 'MCPCore'):
35
+ self._client = client
36
+
37
+ def create(
38
+ self,
39
+ *,
40
+ model: str,
41
+ messages: List[Dict[str, str]],
42
+ max_tokens: Optional[int] = None,
43
+ stream: bool = False,
44
+ temperature: Optional[float] = None,
45
+ top_p: Optional[float] = None,
46
+ **kwargs: Any
47
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
48
+ """
49
+ Creates a model response for the given chat conversation using MCPCore API.
50
+ Mimics openai.chat.completions.create
51
+ """
52
+ if model not in self._client.AVAILABLE_MODELS:
53
+ raise ValueError(f"Model '{model}' not supported by MCPCore. Available: {self._client.AVAILABLE_MODELS}")
54
+
55
+ # Construct the MCPCore-specific payload
56
+ payload = {
57
+ "stream": stream,
58
+ "model": model,
59
+ "messages": messages,
60
+ "params": kwargs.get("params", {}),
61
+ "tool_servers": kwargs.get("tool_servers", []),
62
+ "features": kwargs.get("features", {"web_search": False}),
63
+ "chat_id": kwargs.get("chat_id", str(uuid.uuid4())),
64
+ "id": str(uuid.uuid4()), # Message ID
65
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
66
+ }
67
+
68
+ # Add optional OpenAI params to MCPCore's 'params' field if provided
69
+ if temperature is not None: payload["params"]["temperature"] = temperature
70
+ if top_p is not None: payload["params"]["top_p"] = top_p
71
+ if max_tokens is not None: payload["params"]["max_tokens"] = max_tokens
72
+
73
+ # Generate standard OpenAI-compatible IDs
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ created_time = int(time.time())
76
+
77
+ if stream:
78
+ return self._create_stream(request_id, created_time, model, payload)
79
+ else:
80
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload)
81
+
82
+ def _create_stream(
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+ """Handles the streaming response from MCPCore."""
86
+ final_usage_data = None # To store usage if received
87
+ try:
88
+ response = self._client.session.post(
89
+ self._client.api_endpoint,
90
+ headers=self._client.headers,
91
+ json=payload,
92
+ stream=True,
93
+ timeout=self._client.timeout,
94
+ impersonate="chrome110" # Impersonation often helps
95
+ )
96
+
97
+ if not response.ok:
98
+ try:
99
+ error_text = response.text
100
+ except Exception:
101
+ error_text = "<Failed to read error response>"
102
+ raise IOError(
103
+ f"MCPCore API Error: {response.status_code} {response.reason} - {error_text}"
104
+ )
105
+
106
+ for line_bytes in response.iter_lines():
107
+ if line_bytes:
108
+ try:
109
+ line = line_bytes.decode('utf-8').strip()
110
+ if line.startswith("data: "):
111
+ json_str = line[6:]
112
+ if json_str == "[DONE]":
113
+ break # End of stream signal
114
+
115
+ json_data = json.loads(json_str)
116
+
117
+ # Check for usage data in the chunk (often comes near the end)
118
+ if 'usage' in json_data and json_data['usage']:
119
+ final_usage_data = json_data['usage']
120
+ # Don't yield a chunk just for usage, wait for content or final chunk
121
+
122
+ if 'choices' in json_data and len(json_data['choices']) > 0:
123
+ choice_data = json_data['choices'][0]
124
+ delta_data = choice_data.get('delta', {})
125
+ finish_reason = choice_data.get('finish_reason')
126
+ content = delta_data.get('content')
127
+ role = delta_data.get('role', 'assistant') # Default role
128
+
129
+ # Only yield chunks with content or finish reason
130
+ if content is not None or finish_reason:
131
+ delta = ChoiceDelta(content=content, role=role)
132
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
133
+ chunk = ChatCompletionChunk(
134
+ id=request_id,
135
+ choices=[choice],
136
+ created=created_time,
137
+ model=model,
138
+ system_fingerprint=json_data.get('system_fingerprint')
139
+ )
140
+ yield chunk
141
+
142
+ except (json.JSONDecodeError, UnicodeDecodeError):
143
+ print(f"{RED}Warning: Could not decode JSON line: {line}{RESET}")
144
+ continue
145
+ except Exception as e:
146
+ print(f"{RED}Error processing stream line: {e} - Line: {line}{RESET}")
147
+ continue
148
+
149
+ # Final chunk to ensure stream termination is signaled correctly
150
+ # (even if [DONE] was received, this confirms the generator end)
151
+ final_delta = ChoiceDelta()
152
+ # Include usage in the final chunk if available
153
+ usage_obj = None
154
+ if final_usage_data:
155
+ usage_obj = CompletionUsage(
156
+ prompt_tokens=final_usage_data.get('prompt_tokens', 0),
157
+ completion_tokens=final_usage_data.get('completion_tokens', 0),
158
+ total_tokens=final_usage_data.get('total_tokens', 0),
159
+ )
160
+
161
+ final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
162
+ final_chunk = ChatCompletionChunk(
163
+ id=request_id,
164
+ choices=[final_choice],
165
+ created=created_time,
166
+ model=model,
167
+ # system_fingerprint=..., # Can be added if available in final event
168
+ )
169
+ # Add usage to the final chunk dictionary representation if available
170
+ final_chunk_dict = final_chunk.to_dict()
171
+ if usage_obj:
172
+ final_chunk_dict["usage"] = usage_obj.to_dict()
173
+
174
+ # Yield the final dictionary or object as needed by downstream consumers
175
+ # Yielding the object aligns better with the generator type hint
176
+ yield final_chunk
177
+
178
+
179
+ except CurlError as e:
180
+ print(f"{RED}CurlError during MCPCore stream request: {e}{RESET}")
181
+ raise IOError(f"MCPCore request failed due to network/curl issue: {e}") from e
182
+ except Exception as e:
183
+ print(f"{RED}Unexpected error during MCPCore stream: {e}{RESET}")
184
+ error_details = ""
185
+ if hasattr(e, 'response') and e.response is not None:
186
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
187
+ raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
188
+
189
+ def _create_non_stream_from_stream(
190
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
191
+ ) -> ChatCompletion:
192
+ """Handles the non-streaming response by making a single POST request (like deepinfra)."""
193
+ try:
194
+ # Ensure stream is False for non-streaming
195
+ payload = dict(payload)
196
+ payload["stream"] = False
197
+
198
+ response = self._client.session.post(
199
+ self._client.api_endpoint,
200
+ headers=self._client.headers,
201
+ json=payload,
202
+ timeout=self._client.timeout,
203
+ impersonate="chrome110"
204
+ )
205
+ if not response.ok:
206
+ try:
207
+ error_text = response.text
208
+ except Exception:
209
+ error_text = "<Failed to read error response>"
210
+ raise IOError(
211
+ f"MCPCore API Error: {response.status_code} {response.reason} - {error_text}"
212
+ )
213
+
214
+ data = response.json()
215
+ choices_data = data.get('choices', [])
216
+ usage_data = data.get('usage', {})
217
+
218
+ choices = []
219
+ for choice_d in choices_data:
220
+ message_d = choice_d.get('message', {})
221
+ message = ChatCompletionMessage(
222
+ role=message_d.get('role', 'assistant'),
223
+ content=message_d.get('content', '')
224
+ )
225
+ choice = Choice(
226
+ index=choice_d.get('index', 0),
227
+ message=message,
228
+ finish_reason=choice_d.get('finish_reason', 'stop')
229
+ )
230
+ choices.append(choice)
231
+
232
+ usage = CompletionUsage(
233
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
234
+ completion_tokens=usage_data.get('completion_tokens', 0),
235
+ total_tokens=usage_data.get('total_tokens', 0)
236
+ )
237
+
238
+ completion = ChatCompletion(
239
+ id=request_id,
240
+ choices=choices,
241
+ created=created_time,
242
+ model=data.get('model', model),
243
+ usage=usage,
244
+ )
245
+ return completion
246
+
247
+ except CurlError as e:
248
+ print(f"{RED}CurlError during MCPCore non-stream request: {e}{RESET}")
249
+ raise IOError(f"MCPCore request failed due to network/curl issue: {e}") from e
250
+ except Exception as e:
251
+ print(f"{RED}Unexpected error during MCPCore non-stream: {e}{RESET}")
252
+ error_details = ""
253
+ if hasattr(e, 'response') and e.response is not None:
254
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
255
+ raise IOError(f"MCPCore non-stream processing failed: {e}{error_details}") from e
256
+
257
+
258
+ class Chat(BaseChat):
259
+ def __init__(self, client: 'MCPCore'):
260
+ self.completions = Completions(client)
261
+
262
+
263
+ class MCPCore(OpenAICompatibleProvider):
264
+ """
265
+ OpenAI-compatible client for the MCPCore API (chat.mcpcore.xyz).
266
+
267
+ Requires cookies to be stored in a JSON file (e.g., 'cookies.json').
268
+ The JSON file should contain a list of cookie objects, including one named 'token'.
269
+
270
+ Usage:
271
+ client = MCPCore(cookies_path="path/to/your/cookies.json")
272
+ response = client.chat.completions.create(
273
+ model="google/gemma-7b-it",
274
+ messages=[{"role": "user", "content": "Hello!"}]
275
+ )
276
+ print(response.choices[0].message.content)
277
+ """
278
+ AVAILABLE_MODELS = [
279
+ "google/gemma-7b-it",
280
+ "deepseek-ai/deepseek-coder-33b-instruct",
281
+ "deepseek-ai/DeepSeek-R1",
282
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
283
+ "deepseek-ai/DeepSeek-v3-0324",
284
+ "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
285
+ "meta-llama/Llama-3.3-70B-Instruct",
286
+ "meta-llama/Llama-4-Maverick-Instruct",
287
+ "mistralai/Mistral-7B-Instruct-v0.2",
288
+ "qwen-max-latest",
289
+ "qwen-plus-latest",
290
+ "qwen2.5-coder-32b-instruct",
291
+ "qwen-turbo-latest",
292
+ "qwen2.5-14b-instruct-1m",
293
+ "GLM-4-32B",
294
+ "Z1-32B",
295
+ "Z1-Rumination",
296
+ "arena-model",
297
+ "qvq-72b-preview-0310",
298
+ "qwq-32b",
299
+ "qwen3-235b-a22b",
300
+ "qwen3-30b-a3b",
301
+ "qwen3-32b",
302
+ "deepseek-flash",
303
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
304
+ "任务专用",
305
+ ]
306
+
307
+ def __init__(
308
+ self,
309
+ cookies_path: str, # Make cookies path mandatory for authentication
310
+ timeout: int = 60,
311
+ ):
312
+ """
313
+ Initializes the MCPCore OpenAI-compatible client.
314
+
315
+ Args:
316
+ cookies_path: Path to the JSON file containing cookies (must include 'token').
317
+ timeout: Request timeout in seconds.
318
+ proxies: Optional proxy configuration.
319
+ system_prompt: Default system prompt to use if none is provided in messages.
320
+ """
321
+ self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
322
+ self.timeout = timeout
323
+ self.cookies_path = cookies_path
324
+
325
+ try:
326
+ self.token = self._load_token_from_cookies()
327
+ if not self.token:
328
+ raise ValueError("Could not find 'token' cookie in the provided file.")
329
+ except Exception as e:
330
+ raise ValueError(f"Failed to load authentication token from cookies file '{cookies_path}': {e}") from e
331
+
332
+ self.session = Session() # Use curl_cffi Session
333
+
334
+ # Basic headers + Authorization
335
+ self.headers = {
336
+ 'authority': 'chat.mcpcore.xyz',
337
+ 'accept': '*/*', # Accept anything, let the server decide
338
+ 'accept-language': 'en-US,en;q=0.9',
339
+ 'authorization': f'Bearer {self.token}',
340
+ 'content-type': 'application/json',
341
+ 'origin': 'https://chat.mcpcore.xyz',
342
+ 'referer': 'https://chat.mcpcore.xyz/',
343
+ 'user-agent': LitAgent().random(), # Use LitAgent for User-Agent
344
+ }
345
+ # Add more headers mimicking browser behavior if needed, e.g., sec-ch-ua, etc.
346
+ # Example:
347
+ # self.headers.update({
348
+ # 'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
349
+ # 'sec-ch-ua-mobile': '?0',
350
+ # 'sec-ch-ua-platform': '"Windows"',
351
+ # 'sec-fetch-dest': 'empty',
352
+ # 'sec-fetch-mode': 'cors',
353
+ # 'sec-fetch-site': 'same-origin',
354
+ # })
355
+
356
+ self.session.headers.update(self.headers)
357
+ self.chat = Chat(self) # Initialize chat interface
358
+
359
+ def _load_token_from_cookies(self) -> Optional[str]:
360
+ """Load the 'token' value from a JSON cookies file."""
361
+ try:
362
+ with open(self.cookies_path, "r") as f:
363
+ cookies = json.load(f)
364
+ # Find the cookie named 'token'
365
+ token_cookie = next((cookie for cookie in cookies if cookie.get("name") == "token"), None)
366
+ return token_cookie.get("value") if token_cookie else None
367
+ except FileNotFoundError:
368
+ print(f"{RED}Error: Cookies file not found at {self.cookies_path}!{RESET}")
369
+ return None
370
+ except json.JSONDecodeError:
371
+ print(f"{RED}Error: Invalid JSON format in cookies file: {self.cookies_path}!{RESET}")
372
+ return None
373
+ except Exception as e:
374
+ print(f"{RED}An unexpected error occurred loading cookies: {e}{RESET}")
375
+ return None
376
+